Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenoit Bolsee <benoit.bolsee@online.be>2009-09-25 01:22:24 +0400
committerBenoit Bolsee <benoit.bolsee@online.be>2009-09-25 01:22:24 +0400
commit1483fafd1372a3d3e025d08634e798adb7da512f (patch)
tree9191765749e29866339f4c31d892603f5f8b334d /extern/Eigen2/Eigen/src/Sparse
parentc995c605f640d8d688e6e58e0fe247ca83f91696 (diff)
parent222fe6b1a5d49f67177cbb762f55a0e482145f5d (diff)
Merge of itasc branch. Project files, scons and cmake should be working. Makefile updated but not tested. Comes with Eigen2 2.0.6 C++ matrix library.
Diffstat (limited to 'extern/Eigen2/Eigen/src/Sparse')
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/AmbiVector.h371
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/CholmodSupport.h236
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h230
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/CoreIterators.h68
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h297
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h175
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/RandomSetter.h330
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseAssign.h0
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseBlock.h449
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseCwise.h175
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h442
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h183
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseDiagonalProduct.h157
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseDot.h97
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h97
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h41
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h346
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseLLT.h205
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseLU.h148
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h447
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h626
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseProduct.h415
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseRedux.h40
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseTranspose.h85
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseUtil.h148
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseVector.h365
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h565
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h210
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/TriangularSolver.h178
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h289
30 files changed, 7415 insertions, 0 deletions
diff --git a/extern/Eigen2/Eigen/src/Sparse/AmbiVector.h b/extern/Eigen2/Eigen/src/Sparse/AmbiVector.h
new file mode 100644
index 00000000000..75001a2fa25
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/AmbiVector.h
@@ -0,0 +1,371 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_AMBIVECTOR_H
+#define EIGEN_AMBIVECTOR_H
+
+/** \internal
+ * Hybrid sparse/dense vector class designed for intensive read-write operations.
+ *
+ * See BasicSparseLLT and SparseProduct for usage examples.
+ */
+template<typename _Scalar> class AmbiVector
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ AmbiVector(int size)
+ : m_buffer(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
+ {
+ resize(size);
+ }
+
+ void init(RealScalar estimatedDensity);
+ void init(int mode);
+
+ void nonZeros() const;
+
+ /** Specifies a sub-vector to work on */
+ void setBounds(int start, int end) { m_start = start; m_end = end; }
+
+ void setZero();
+
+ void restart();
+ Scalar& coeffRef(int i);
+ Scalar coeff(int i);
+
+ class Iterator;
+
+ ~AmbiVector() { delete[] m_buffer; }
+
+ void resize(int size)
+ {
+ if (m_allocatedSize < size)
+ reallocate(size);
+ m_size = size;
+ }
+
+ int size() const { return m_size; }
+
+ protected:
+
+ void reallocate(int size)
+ {
+ // if the size of the matrix is not too large, let's allocate a bit more than needed such
+ // that we can handle dense vector even in sparse mode.
+ delete[] m_buffer;
+ if (size<1000)
+ {
+ int allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
+ m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
+ m_buffer = new Scalar[allocSize];
+ }
+ else
+ {
+ m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
+ m_buffer = new Scalar[size];
+ }
+ m_size = size;
+ m_start = 0;
+ m_end = m_size;
+ }
+
+ void reallocateSparse()
+ {
+ int copyElements = m_allocatedElements;
+ m_allocatedElements = std::min(int(m_allocatedElements*1.5),m_size);
+ int allocSize = m_allocatedElements * sizeof(ListEl);
+ allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
+ Scalar* newBuffer = new Scalar[allocSize];
+ memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
+ }
+
+ protected:
+ // element type of the linked list
+ struct ListEl
+ {
+ int next;
+ int index;
+ Scalar value;
+ };
+
+ // used to store data in both mode
+ Scalar* m_buffer;
+ int m_size;
+ int m_start;
+ int m_end;
+ int m_allocatedSize;
+ int m_allocatedElements;
+ int m_mode;
+
+ // linked list mode
+ int m_llStart;
+ int m_llCurrent;
+ int m_llSize;
+
+ private:
+ AmbiVector(const AmbiVector&);
+
+};
+
+/** \returns the number of non zeros in the current sub vector */
+template<typename Scalar>
+void AmbiVector<Scalar>::nonZeros() const
+{
+ if (m_mode==IsSparse)
+ return m_llSize;
+ else
+ return m_end - m_start;
+}
+
+template<typename Scalar>
+void AmbiVector<Scalar>::init(RealScalar estimatedDensity)
+{
+ if (estimatedDensity>0.1)
+ init(IsDense);
+ else
+ init(IsSparse);
+}
+
+template<typename Scalar>
+void AmbiVector<Scalar>::init(int mode)
+{
+ m_mode = mode;
+ if (m_mode==IsSparse)
+ {
+ m_llSize = 0;
+ m_llStart = -1;
+ }
+}
+
+/** Must be called whenever we might perform a write access
+ * with an index smaller than the previous one.
+ *
+ * Don't worry, this function is extremely cheap.
+ */
+template<typename Scalar>
+void AmbiVector<Scalar>::restart()
+{
+ m_llCurrent = m_llStart;
+}
+
+/** Set all coefficients of current subvector to zero */
+template<typename Scalar>
+void AmbiVector<Scalar>::setZero()
+{
+ if (m_mode==IsDense)
+ {
+ for (int i=m_start; i<m_end; ++i)
+ m_buffer[i] = Scalar(0);
+ }
+ else
+ {
+ ei_assert(m_mode==IsSparse);
+ m_llSize = 0;
+ m_llStart = -1;
+ }
+}
+
+template<typename Scalar>
+Scalar& AmbiVector<Scalar>::coeffRef(int i)
+{
+ if (m_mode==IsDense)
+ return m_buffer[i];
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+ // TODO factorize the following code to reduce code generation
+ ei_assert(m_mode==IsSparse);
+ if (m_llSize==0)
+ {
+ // this is the first element
+ m_llStart = 0;
+ m_llCurrent = 0;
+ ++m_llSize;
+ llElements[0].value = Scalar(0);
+ llElements[0].index = i;
+ llElements[0].next = -1;
+ return llElements[0].value;
+ }
+ else if (i<llElements[m_llStart].index)
+ {
+ // this is going to be the new first element of the list
+ ListEl& el = llElements[m_llSize];
+ el.value = Scalar(0);
+ el.index = i;
+ el.next = m_llStart;
+ m_llStart = m_llSize;
+ ++m_llSize;
+ m_llCurrent = m_llStart;
+ return el.value;
+ }
+ else
+ {
+ int nextel = llElements[m_llCurrent].next;
+ ei_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
+ while (nextel >= 0 && llElements[nextel].index<=i)
+ {
+ m_llCurrent = nextel;
+ nextel = llElements[nextel].next;
+ }
+
+ if (llElements[m_llCurrent].index==i)
+ {
+ // the coefficient already exists and we found it !
+ return llElements[m_llCurrent].value;
+ }
+ else
+ {
+ if (m_llSize>=m_allocatedElements)
+ reallocateSparse();
+ ei_internal_assert(m_llSize<m_size && "internal error: overflow in sparse mode");
+ // let's insert a new coefficient
+ ListEl& el = llElements[m_llSize];
+ el.value = Scalar(0);
+ el.index = i;
+ el.next = llElements[m_llCurrent].next;
+ llElements[m_llCurrent].next = m_llSize;
+ ++m_llSize;
+ return el.value;
+ }
+ }
+ }
+}
+
+template<typename Scalar>
+Scalar AmbiVector<Scalar>::coeff(int i)
+{
+ if (m_mode==IsDense)
+ return m_buffer[i];
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+ ei_assert(m_mode==IsSparse);
+ if ((m_llSize==0) || (i<llElements[m_llStart].index))
+ {
+ return Scalar(0);
+ }
+ else
+ {
+ int elid = m_llStart;
+ while (elid >= 0 && llElements[elid].index<i)
+ elid = llElements[elid].next;
+
+ if (llElements[elid].index==i)
+ return llElements[m_llCurrent].value;
+ else
+ return Scalar(0);
+ }
+ }
+}
+
+/** Iterator over the nonzero coefficients */
+template<typename _Scalar>
+class AmbiVector<_Scalar>::Iterator
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** Default constructor
+ * \param vec the vector on which we iterate
+ * \param epsilon the minimal value used to prune zero coefficients.
+ * In practice, all coefficients having a magnitude smaller than \a epsilon
+ * are skipped.
+ */
+ Iterator(const AmbiVector& vec, RealScalar epsilon = RealScalar(0.1)*precision<RealScalar>())
+ : m_vector(vec)
+ {
+ m_epsilon = epsilon;
+ m_isDense = m_vector.m_mode==IsDense;
+ if (m_isDense)
+ {
+ m_cachedIndex = m_vector.m_start-1;
+ ++(*this);
+ }
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+ m_currentEl = m_vector.m_llStart;
+ while (m_currentEl>=0 && ei_abs(llElements[m_currentEl].value)<m_epsilon)
+ m_currentEl = llElements[m_currentEl].next;
+ if (m_currentEl<0)
+ {
+ m_cachedIndex = -1;
+ }
+ else
+ {
+ m_cachedIndex = llElements[m_currentEl].index;
+ m_cachedValue = llElements[m_currentEl].value;
+ }
+ }
+ }
+
+ int index() const { return m_cachedIndex; }
+ Scalar value() const { return m_cachedValue; }
+
+ operator bool() const { return m_cachedIndex>=0; }
+
+ Iterator& operator++()
+ {
+ if (m_isDense)
+ {
+ do {
+ ++m_cachedIndex;
+ } while (m_cachedIndex<m_vector.m_end && ei_abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
+ if (m_cachedIndex<m_vector.m_end)
+ m_cachedValue = m_vector.m_buffer[m_cachedIndex];
+ else
+ m_cachedIndex=-1;
+ }
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+ do {
+ m_currentEl = llElements[m_currentEl].next;
+ } while (m_currentEl>=0 && ei_abs(llElements[m_currentEl].value)<m_epsilon);
+ if (m_currentEl<0)
+ {
+ m_cachedIndex = -1;
+ }
+ else
+ {
+ m_cachedIndex = llElements[m_currentEl].index;
+ m_cachedValue = llElements[m_currentEl].value;
+ }
+ }
+ return *this;
+ }
+
+ protected:
+ const AmbiVector& m_vector; // the target vector
+ int m_currentEl; // the current element in sparse/linked-list mode
+ RealScalar m_epsilon; // epsilon used to prune zero coefficients
+ int m_cachedIndex; // current coordinate
+ Scalar m_cachedValue; // current value
+ bool m_isDense; // mode of the vector
+};
+
+
+#endif // EIGEN_AMBIVECTOR_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/CholmodSupport.h b/extern/Eigen2/Eigen/src/Sparse/CholmodSupport.h
new file mode 100644
index 00000000000..dfd9c787ae9
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/CholmodSupport.h
@@ -0,0 +1,236 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CHOLMODSUPPORT_H
+#define EIGEN_CHOLMODSUPPORT_H
+
+template<typename Scalar, typename CholmodType>
+void ei_cholmod_configure_matrix(CholmodType& mat)
+{
+ if (ei_is_same_type<Scalar,float>::ret)
+ {
+ mat.xtype = CHOLMOD_REAL;
+ mat.dtype = 1;
+ }
+ else if (ei_is_same_type<Scalar,double>::ret)
+ {
+ mat.xtype = CHOLMOD_REAL;
+ mat.dtype = 0;
+ }
+ else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
+ {
+ mat.xtype = CHOLMOD_COMPLEX;
+ mat.dtype = 1;
+ }
+ else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
+ {
+ mat.xtype = CHOLMOD_COMPLEX;
+ mat.dtype = 0;
+ }
+ else
+ {
+ ei_assert(false && "Scalar type not supported by CHOLMOD");
+ }
+}
+
+template<typename Derived>
+cholmod_sparse SparseMatrixBase<Derived>::asCholmodMatrix()
+{
+ typedef typename Derived::Scalar Scalar;
+ cholmod_sparse res;
+ res.nzmax = nonZeros();
+ res.nrow = rows();;
+ res.ncol = cols();
+ res.p = derived()._outerIndexPtr();
+ res.i = derived()._innerIndexPtr();
+ res.x = derived()._valuePtr();
+ res.xtype = CHOLMOD_REAL;
+ res.itype = CHOLMOD_INT;
+ res.sorted = 1;
+ res.packed = 1;
+ res.dtype = 0;
+ res.stype = -1;
+
+ ei_cholmod_configure_matrix<Scalar>(res);
+
+ if (Derived::Flags & SelfAdjoint)
+ {
+ if (Derived::Flags & UpperTriangular)
+ res.stype = 1;
+ else if (Derived::Flags & LowerTriangular)
+ res.stype = -1;
+ else
+ res.stype = 0;
+ }
+ else
+ res.stype = 0;
+
+ return res;
+}
+
+template<typename Derived>
+cholmod_dense ei_cholmod_map_eigen_to_dense(MatrixBase<Derived>& mat)
+{
+ EIGEN_STATIC_ASSERT((ei_traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ typedef typename Derived::Scalar Scalar;
+
+ cholmod_dense res;
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ res.nzmax = res.nrow * res.ncol;
+ res.d = mat.derived().stride();
+ res.x = mat.derived().data();
+ res.z = 0;
+
+ ei_cholmod_configure_matrix<Scalar>(res);
+
+ return res;
+}
+
+template<typename Scalar, int Flags>
+MappedSparseMatrix<Scalar,Flags>::MappedSparseMatrix(cholmod_sparse& cm)
+{
+ m_innerSize = cm.nrow;
+ m_outerSize = cm.ncol;
+ m_outerIndex = reinterpret_cast<int*>(cm.p);
+ m_innerIndices = reinterpret_cast<int*>(cm.i);
+ m_values = reinterpret_cast<Scalar*>(cm.x);
+ m_nnz = m_outerIndex[cm.ncol];
+}
+
+template<typename MatrixType>
+class SparseLLT<MatrixType,Cholmod> : public SparseLLT<MatrixType>
+{
+ protected:
+ typedef SparseLLT<MatrixType> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ using Base::MatrixLIsDirty;
+ using Base::SupernodalFactorIsDirty;
+ using Base::m_flags;
+ using Base::m_matrix;
+ using Base::m_status;
+
+ public:
+
+ SparseLLT(int flags = 0)
+ : Base(flags), m_cholmodFactor(0)
+ {
+ cholmod_start(&m_cholmod);
+ }
+
+ SparseLLT(const MatrixType& matrix, int flags = 0)
+ : Base(flags), m_cholmodFactor(0)
+ {
+ cholmod_start(&m_cholmod);
+ compute(matrix);
+ }
+
+ ~SparseLLT()
+ {
+ if (m_cholmodFactor)
+ cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+ cholmod_finish(&m_cholmod);
+ }
+
+ inline const typename Base::CholMatrixType& matrixL(void) const;
+
+ template<typename Derived>
+ void solveInPlace(MatrixBase<Derived> &b) const;
+
+ void compute(const MatrixType& matrix);
+
+ protected:
+ mutable cholmod_common m_cholmod;
+ cholmod_factor* m_cholmodFactor;
+};
+
+template<typename MatrixType>
+void SparseLLT<MatrixType,Cholmod>::compute(const MatrixType& a)
+{
+ if (m_cholmodFactor)
+ {
+ cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+ m_cholmodFactor = 0;
+ }
+
+ cholmod_sparse A = const_cast<MatrixType&>(a).asCholmodMatrix();
+ m_cholmod.supernodal = CHOLMOD_AUTO;
+ // TODO
+ if (m_flags&IncompleteFactorization)
+ {
+ m_cholmod.nmethods = 1;
+ m_cholmod.method[0].ordering = CHOLMOD_NATURAL;
+ m_cholmod.postorder = 0;
+ }
+ else
+ {
+ m_cholmod.nmethods = 1;
+ m_cholmod.method[0].ordering = CHOLMOD_NATURAL;
+ m_cholmod.postorder = 0;
+ }
+ m_cholmod.final_ll = 1;
+ m_cholmodFactor = cholmod_analyze(&A, &m_cholmod);
+ cholmod_factorize(&A, m_cholmodFactor, &m_cholmod);
+
+ m_status = (m_status & ~SupernodalFactorIsDirty) | MatrixLIsDirty;
+}
+
+template<typename MatrixType>
+inline const typename SparseLLT<MatrixType>::CholMatrixType&
+SparseLLT<MatrixType,Cholmod>::matrixL() const
+{
+ if (m_status & MatrixLIsDirty)
+ {
+ ei_assert(!(m_status & SupernodalFactorIsDirty));
+
+ cholmod_sparse* cmRes = cholmod_factor_to_sparse(m_cholmodFactor, &m_cholmod);
+ const_cast<typename Base::CholMatrixType&>(m_matrix) = MappedSparseMatrix<Scalar>(*cmRes);
+ free(cmRes);
+
+ m_status = (m_status & ~MatrixLIsDirty);
+ }
+ return m_matrix;
+}
+
+template<typename MatrixType>
+template<typename Derived>
+void SparseLLT<MatrixType,Cholmod>::solveInPlace(MatrixBase<Derived> &b) const
+{
+ const int size = m_cholmodFactor->n;
+ ei_assert(size==b.rows());
+
+ // this uses Eigen's triangular sparse solver
+// if (m_status & MatrixLIsDirty)
+// matrixL();
+// Base::solveInPlace(b);
+ // as long as our own triangular sparse solver is not fully optimal,
+ // let's use CHOLMOD's one:
+ cholmod_dense cdb = ei_cholmod_map_eigen_to_dense(b);
+ cholmod_dense* x = cholmod_solve(CHOLMOD_LDLt, m_cholmodFactor, &cdb, &m_cholmod);
+ b = Matrix<typename Base::Scalar,Dynamic,1>::Map(reinterpret_cast<typename Base::Scalar*>(x->x),b.rows());
+ cholmod_free_dense(&x, &m_cholmod);
+}
+
+#endif // EIGEN_CHOLMODSUPPORT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h b/extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h
new file mode 100644
index 00000000000..4dbd3230985
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h
@@ -0,0 +1,230 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COMPRESSED_STORAGE_H
+#define EIGEN_COMPRESSED_STORAGE_H
+
+/** Stores a sparse set of values as a list of values and a list of indices.
+ *
+ */
+template<typename Scalar>
+class CompressedStorage
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ public:
+ CompressedStorage()
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {}
+
+ CompressedStorage(size_t size)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {
+ resize(size);
+ }
+
+ CompressedStorage(const CompressedStorage& other)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {
+ *this = other;
+ }
+
+ CompressedStorage& operator=(const CompressedStorage& other)
+ {
+ resize(other.size());
+ memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
+ memcpy(m_indices, other.m_indices, m_size * sizeof(int));
+ return *this;
+ }
+
+ void swap(CompressedStorage& other)
+ {
+ std::swap(m_values, other.m_values);
+ std::swap(m_indices, other.m_indices);
+ std::swap(m_size, other.m_size);
+ std::swap(m_allocatedSize, other.m_allocatedSize);
+ }
+
+ ~CompressedStorage()
+ {
+ delete[] m_values;
+ delete[] m_indices;
+ }
+
+ void reserve(size_t size)
+ {
+ size_t newAllocatedSize = m_size + size;
+ if (newAllocatedSize > m_allocatedSize)
+ reallocate(newAllocatedSize);
+ }
+
+ void squeeze()
+ {
+ if (m_allocatedSize>m_size)
+ reallocate(m_size);
+ }
+
+ void resize(size_t size, float reserveSizeFactor = 0)
+ {
+ if (m_allocatedSize<size)
+ reallocate(size + size_t(reserveSizeFactor*size));
+ m_size = size;
+ }
+
+ void append(const Scalar& v, int i)
+ {
+ int id = m_size;
+ resize(m_size+1, 1);
+ m_values[id] = v;
+ m_indices[id] = i;
+ }
+
+ inline size_t size() const { return m_size; }
+ inline size_t allocatedSize() const { return m_allocatedSize; }
+ inline void clear() { m_size = 0; }
+
+ inline Scalar& value(size_t i) { return m_values[i]; }
+ inline const Scalar& value(size_t i) const { return m_values[i]; }
+
+ inline int& index(size_t i) { return m_indices[i]; }
+ inline const int& index(size_t i) const { return m_indices[i]; }
+
+ static CompressedStorage Map(int* indices, Scalar* values, size_t size)
+ {
+ CompressedStorage res;
+ res.m_indices = indices;
+ res.m_values = values;
+ res.m_allocatedSize = res.m_size = size;
+ return res;
+ }
+
+ /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
+ inline int searchLowerIndex(int key) const
+ {
+ return searchLowerIndex(0, m_size, key);
+ }
+
+ /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
+ inline int searchLowerIndex(size_t start, size_t end, int key) const
+ {
+ while(end>start)
+ {
+ size_t mid = (end+start)>>1;
+ if (m_indices[mid]<key)
+ start = mid+1;
+ else
+ end = mid;
+ }
+ return start;
+ }
+
+ /** \returns the stored value at index \a key
+ * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
+ inline Scalar at(int key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (m_size==0)
+ return defaultValue;
+ else if (key==m_indices[m_size-1])
+ return m_values[m_size-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const size_t id = searchLowerIndex(0,m_size-1,key);
+ return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** Like at(), but the search is performed in the range [start,end) */
+ inline Scalar atInRange(size_t start, size_t end, int key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (start==end)
+ return Scalar(0);
+ else if (end>start && key==m_indices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const size_t id = searchLowerIndex(start,end-1,key);
+ return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** \returns a reference to the value at index \a key
+ * If the value does not exist, then the value \a defaultValue is inserted
+ * such that the keys are sorted. */
+ inline Scalar& atWithInsertion(int key, Scalar defaultValue = Scalar(0))
+ {
+ size_t id = searchLowerIndex(0,m_size,key);
+ if (id>=m_size || m_indices[id]!=key)
+ {
+ resize(m_size+1,1);
+ for (size_t j=m_size-1; j>id; --j)
+ {
+ m_indices[j] = m_indices[j-1];
+ m_values[j] = m_values[j-1];
+ }
+ m_indices[id] = key;
+ m_values[id] = defaultValue;
+ }
+ return m_values[id];
+ }
+
+ void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
+ {
+ size_t k = 0;
+ size_t n = size();
+ for (size_t i=0; i<n; ++i)
+ {
+ if (!ei_isMuchSmallerThan(value(i), reference, epsilon))
+ {
+ value(k) = value(i);
+ index(k) = index(i);
+ ++k;
+ }
+ }
+ resize(k,0);
+ }
+
+ protected:
+
+ inline void reallocate(size_t size)
+ {
+ Scalar* newValues = new Scalar[size];
+ int* newIndices = new int[size];
+ size_t copySize = std::min(size, m_size);
+ // copy
+ memcpy(newValues, m_values, copySize * sizeof(Scalar));
+ memcpy(newIndices, m_indices, copySize * sizeof(int));
+ // delete old stuff
+ delete[] m_values;
+ delete[] m_indices;
+ m_values = newValues;
+ m_indices = newIndices;
+ m_allocatedSize = size;
+ }
+
+ protected:
+ Scalar* m_values;
+ int* m_indices;
+ size_t m_size;
+ size_t m_allocatedSize;
+
+};
+
+#endif // EIGEN_COMPRESSED_STORAGE_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/CoreIterators.h b/extern/Eigen2/Eigen/src/Sparse/CoreIterators.h
new file mode 100644
index 00000000000..f1520a585ca
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/CoreIterators.h
@@ -0,0 +1,68 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COREITERATORS_H
+#define EIGEN_COREITERATORS_H
+
+/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
+ */
+
+/** \class InnerIterator
+ * \brief An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression
+ *
+ * todo
+ */
+
+// generic version for dense matrix and expressions
+template<typename Derived> class MatrixBase<Derived>::InnerIterator
+{
+ typedef typename Derived::Scalar Scalar;
+ enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit };
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, int outer)
+ : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.rows())
+ {}
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ {
+ return (IsRowMajor) ? m_expression.coeff(m_outer, m_inner)
+ : m_expression.coeff(m_inner, m_outer);
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; }
+
+ EIGEN_STRONG_INLINE int index() const { return m_inner; }
+ inline int row() const { return IsRowMajor ? m_outer : index(); }
+ inline int col() const { return IsRowMajor ? index() : m_outer; }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
+
+ protected:
+ const Derived& m_expression;
+ int m_inner;
+ const int m_outer;
+ const int m_end;
+};
+
+#endif // EIGEN_COREITERATORS_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h b/extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h
new file mode 100644
index 00000000000..7119a84bd51
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h
@@ -0,0 +1,297 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H
+#define EIGEN_DYNAMIC_SPARSEMATRIX_H
+
+/** \class DynamicSparseMatrix
+ *
+ * \brief A sparse matrix class designed for matrix assembly purpose
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows
+ * random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is
+ * nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows
+ * otherwise.
+ *
+ * Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might
+ * decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance
+ * till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors.
+ *
+ * \see SparseMatrix
+ */
+template<typename _Scalar, int _Flags>
+struct ei_traits<DynamicSparseMatrix<_Scalar, _Flags> >
+{
+ typedef _Scalar Scalar;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = SparseBit | _Flags,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = OuterRandomAccessPattern
+ };
+};
+
+template<typename _Scalar, int _Flags>
+class DynamicSparseMatrix
+ : public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Flags> >
+{
+ public:
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(DynamicSparseMatrix)
+ // FIXME: why are these operator already alvailable ???
+ // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=)
+ // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=)
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+
+ protected:
+
+ enum { IsRowMajor = Base::IsRowMajor };
+ typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ int m_innerSize;
+ std::vector<CompressedStorage<Scalar> > m_data;
+
+ public:
+
+ inline int rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
+ inline int cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
+ inline int innerSize() const { return m_innerSize; }
+ inline int outerSize() const { return m_data.size(); }
+ inline int innerNonZeros(int j) const { return m_data[j].size(); }
+
+ std::vector<CompressedStorage<Scalar> >& _data() { return m_data; }
+ const std::vector<CompressedStorage<Scalar> >& _data() const { return m_data; }
+
+ /** \returns the coefficient value at given position \a row, \a col
+ * This operation involes a log(rho*outer_size) binary search.
+ */
+ inline Scalar coeff(int row, int col) const
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+ return m_data[outer].at(inner);
+ }
+
+ /** \returns a reference to the coefficient value at given position \a row, \a col
+ * This operation involes a log(rho*outer_size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ */
+ inline Scalar& coeffRef(int row, int col)
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+ return m_data[outer].atWithInsertion(inner);
+ }
+
+ class InnerIterator;
+
+ inline void setZero()
+ {
+ for (int j=0; j<outerSize(); ++j)
+ m_data[j].clear();
+ }
+
+ /** \returns the number of non zero coefficients */
+ inline int nonZeros() const
+ {
+ int res = 0;
+ for (int j=0; j<outerSize(); ++j)
+ res += m_data[j].size();
+ return res;
+ }
+
+ /** Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
+ inline void startFill(int reserveSize = 1000)
+ {
+ if (outerSize()>0)
+ {
+ int reserveSizePerVector = std::max(reserveSize/outerSize(),4);
+ for (int j=0; j<outerSize(); ++j)
+ {
+ m_data[j].clear();
+ m_data[j].reserve(reserveSizePerVector);
+ }
+ }
+ }
+
+ /** inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
+ * 1 - the coefficient does not exist yet
+ * 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
+ * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
+ * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
+ *
+ * \see fillrand(), coeffRef()
+ */
+ inline Scalar& fill(int row, int col)
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+ ei_assert(outer<int(m_data.size()) && inner<m_innerSize);
+ ei_assert((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner));
+ m_data[outer].append(0, inner);
+ return m_data[outer].value(m_data[outer].size()-1);
+ }
+
+ /** Like fill() but with random inner coordinates.
+ * Compared to the generic coeffRef(), the unique limitation is that we assume
+ * the coefficient does not exist yet.
+ */
+ inline Scalar& fillrand(int row, int col)
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+
+ int startId = 0;
+ int id = m_data[outer].size() - 1;
+ m_data[outer].resize(id+2,1);
+
+ while ( (id >= startId) && (m_data[outer].index(id) > inner) )
+ {
+ m_data[outer].index(id+1) = m_data[outer].index(id);
+ m_data[outer].value(id+1) = m_data[outer].value(id);
+ --id;
+ }
+ m_data[outer].index(id+1) = inner;
+ m_data[outer].value(id+1) = 0;
+ return m_data[outer].value(id+1);
+ }
+
+ /** Does nothing. Provided for compatibility with SparseMatrix. */
+ inline void endFill() {}
+
+ void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
+ {
+ for (int j=0; j<outerSize(); ++j)
+ m_data[j].prune(reference,epsilon);
+ }
+
+ /** Resize the matrix without preserving the data (the matrix is set to zero)
+ */
+ void resize(int rows, int cols)
+ {
+ const int outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ setZero();
+ if (int(m_data.size()) != outerSize)
+ {
+ m_data.resize(outerSize);
+ }
+ }
+
+ void resizeAndKeepData(int rows, int cols)
+ {
+ const int outerSize = IsRowMajor ? rows : cols;
+ const int innerSize = IsRowMajor ? cols : rows;
+ if (m_innerSize>innerSize)
+ {
+ // remove all coefficients with innerCoord>=innerSize
+ // TODO
+ std::cerr << "not implemented yet\n";
+ exit(2);
+ }
+ if (m_data.size() != outerSize)
+ {
+ m_data.resize(outerSize);
+ }
+ }
+
+ inline DynamicSparseMatrix()
+ : m_innerSize(0), m_data(0)
+ {
+ ei_assert(innerSize()==0 && outerSize()==0);
+ }
+
+ inline DynamicSparseMatrix(int rows, int cols)
+ : m_innerSize(0)
+ {
+ resize(rows, cols);
+ }
+
+ template<typename OtherDerived>
+ inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_innerSize(0)
+ {
+ *this = other.derived();
+ }
+
+ inline DynamicSparseMatrix(const DynamicSparseMatrix& other)
+ : Base(), m_innerSize(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(DynamicSparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_innerSize, other.m_innerSize);
+ //std::swap(m_outerSize, other.m_outerSize);
+ m_data.swap(other.m_data);
+ }
+
+ inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.rows(), other.cols());
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ inline DynamicSparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ return SparseMatrixBase<DynamicSparseMatrix>::operator=(other.derived());
+ }
+
+ /** Destructor */
+ inline ~DynamicSparseMatrix() {}
+};
+
+template<typename Scalar, int _Flags>
+class DynamicSparseMatrix<Scalar,_Flags>::InnerIterator : public SparseVector<Scalar,_Flags>::InnerIterator
+{
+ typedef typename SparseVector<Scalar,_Flags>::InnerIterator Base;
+ public:
+ InnerIterator(const DynamicSparseMatrix& mat, int outer)
+ : Base(mat.m_data[outer]), m_outer(outer)
+ {}
+
+ inline int row() const { return IsRowMajor ? m_outer : Base::index(); }
+ inline int col() const { return IsRowMajor ? Base::index() : m_outer; }
+
+
+ protected:
+ const int m_outer;
+};
+
+#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h b/extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h
new file mode 100644
index 00000000000..f4935d8344e
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h
@@ -0,0 +1,175 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
+#define EIGEN_MAPPED_SPARSEMATRIX_H
+
+/** \class MappedSparseMatrix
+ *
+ * \brief Sparse matrix
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ */
+template<typename _Scalar, int _Flags>
+struct ei_traits<MappedSparseMatrix<_Scalar, _Flags> > : ei_traits<SparseMatrix<_Scalar, _Flags> >
+{};
+
+template<typename _Scalar, int _Flags>
+class MappedSparseMatrix
+ : public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags> >
+{
+ public:
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(MappedSparseMatrix)
+
+ protected:
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ int m_outerSize;
+ int m_innerSize;
+ int m_nnz;
+ int* m_outerIndex;
+ int* m_innerIndices;
+ Scalar* m_values;
+
+ public:
+
+ inline int rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline int cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ inline int innerSize() const { return m_innerSize; }
+ inline int outerSize() const { return m_outerSize; }
+ inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
+
+ //----------------------------------------
+ // direct access interface
+ inline const Scalar* _valuePtr() const { return m_values; }
+ inline Scalar* _valuePtr() { return m_values; }
+
+ inline const int* _innerIndexPtr() const { return m_innerIndices; }
+ inline int* _innerIndexPtr() { return m_innerIndices; }
+
+ inline const int* _outerIndexPtr() const { return m_outerIndex; }
+ inline int* _outerIndexPtr() { return m_outerIndex; }
+ //----------------------------------------
+
+ inline Scalar coeff(int row, int col) const
+ {
+ const int outer = RowMajor ? row : col;
+ const int inner = RowMajor ? col : row;
+
+ int start = m_outerIndex[outer];
+ int end = m_outerIndex[outer+1];
+ if (start==end)
+ return Scalar(0);
+ else if (end>0 && inner==m_innerIndices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+
+ const int* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
+ const int id = r-&m_innerIndices[0];
+ return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
+ }
+
+ inline Scalar& coeffRef(int row, int col)
+ {
+ const int outer = RowMajor ? row : col;
+ const int inner = RowMajor ? col : row;
+
+ int start = m_outerIndex[outer];
+ int end = m_outerIndex[outer+1];
+ ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ int* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
+ const int id = r-&m_innerIndices[0];
+ ei_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
+ return m_values[id];
+ }
+
+ class InnerIterator;
+
+ /** \returns the number of non zero coefficients */
+ inline int nonZeros() const { return m_nnz; }
+
+ inline MappedSparseMatrix(int rows, int cols, int nnz, int* outerIndexPtr, int* innerIndexPtr, Scalar* valuePtr)
+ : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
+ m_innerIndices(innerIndexPtr), m_values(valuePtr)
+ {}
+
+ #ifdef EIGEN_TAUCS_SUPPORT
+ explicit MappedSparseMatrix(taucs_ccs_matrix& taucsMatrix);
+ #endif
+
+ #ifdef EIGEN_CHOLMOD_SUPPORT
+ explicit MappedSparseMatrix(cholmod_sparse& cholmodMatrix);
+ #endif
+
+ #ifdef EIGEN_SUPERLU_SUPPORT
+ explicit MappedSparseMatrix(SluMatrix& sluMatrix);
+ #endif
+
+ /** Empty destructor */
+ inline ~MappedSparseMatrix() {}
+};
+
+template<typename Scalar, int _Flags>
+class MappedSparseMatrix<Scalar,_Flags>::InnerIterator
+{
+ public:
+ InnerIterator(const MappedSparseMatrix& mat, int outer)
+ : m_matrix(mat),
+ m_outer(outer),
+ m_id(mat._outerIndexPtr()[outer]),
+ m_start(m_id),
+ m_end(mat._outerIndexPtr()[outer+1])
+ {}
+
+ template<unsigned int Added, unsigned int Removed>
+ InnerIterator(const Flagged<MappedSparseMatrix,Added,Removed>& mat, int outer)
+ : m_matrix(mat._expression()), m_id(m_matrix._outerIndexPtr()[outer]),
+ m_start(m_id), m_end(m_matrix._outerIndexPtr()[outer+1])
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_matrix._valuePtr()[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix._valuePtr()[m_id]); }
+
+ inline int index() const { return m_matrix._innerIndexPtr()[m_id]; }
+ inline int row() const { return IsRowMajor ? m_outer : index(); }
+ inline int col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
+
+ protected:
+ const MappedSparseMatrix& m_matrix;
+ const int m_outer;
+ int m_id;
+ const int m_start;
+ const int m_end;
+};
+
+#endif // EIGEN_MAPPED_SPARSEMATRIX_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/RandomSetter.h b/extern/Eigen2/Eigen/src/Sparse/RandomSetter.h
new file mode 100644
index 00000000000..d908e315f3b
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/RandomSetter.h
@@ -0,0 +1,330 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_RANDOMSETTER_H
+#define EIGEN_RANDOMSETTER_H
+
+/** Represents a std::map
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct StdMapTraits
+{
+ typedef int KeyType;
+ typedef std::map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 1
+ };
+
+ static void setInvalidKey(Type&, const KeyType&) {}
+};
+
+#ifdef EIGEN_UNORDERED_MAP_SUPPORT
+/** Represents a std::unordered_map
+ *
+ * To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file
+ * yourself making sure that unordered_map is defined in the std namespace.
+ *
+ * For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do:
+ * \code
+ * #include <tr1/unordered_map>
+ * #define EIGEN_UNORDERED_MAP_SUPPORT
+ * namespace std {
+ * using std::tr1::unordered_map;
+ * }
+ * \endcode
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct StdUnorderedMapTraits
+{
+ typedef int KeyType;
+ typedef std::unordered_map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 0
+ };
+
+ static void setInvalidKey(Type&, const KeyType&) {}
+};
+#endif // EIGEN_UNORDERED_MAP_SUPPORT
+
+#ifdef _DENSE_HASH_MAP_H_
+/** Represents a google::dense_hash_map
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct GoogleDenseHashMapTraits
+{
+ typedef int KeyType;
+ typedef google::dense_hash_map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 0
+ };
+
+ static void setInvalidKey(Type& map, const KeyType& k)
+ { map.set_empty_key(k); }
+};
+#endif
+
+#ifdef _SPARSE_HASH_MAP_H_
+/** Represents a google::sparse_hash_map
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct GoogleSparseHashMapTraits
+{
+ typedef int KeyType;
+ typedef google::sparse_hash_map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 0
+ };
+
+ static void setInvalidKey(Type&, const KeyType&) {}
+};
+#endif
+
+/** \class RandomSetter
+ *
+ * \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access
+ *
+ * \param SparseMatrixType the type of the sparse matrix we are updating
+ * \param MapTraits a traits class representing the map implementation used for the temporary sparse storage.
+ * Its default value depends on the system.
+ * \param OuterPacketBits defines the number of rows (or columns) manage by a single map object
+ * as a power of two exponent.
+ *
+ * This class temporarily represents a sparse matrix object using a generic map implementation allowing for
+ * efficient random access. The conversion from the compressed representation to a hash_map object is performed
+ * in the RandomSetter constructor, while the sparse matrix is updated back at destruction time. This strategy
+ * suggest the use of nested blocks as in this example:
+ *
+ * \code
+ * SparseMatrix<double> m(rows,cols);
+ * {
+ * RandomSetter<SparseMatrix<double> > w(m);
+ * // don't use m but w instead with read/write random access to the coefficients:
+ * for(;;)
+ * w(rand(),rand()) = rand;
+ * }
+ * // when w is deleted, the data are copied back to m
+ * // and m is ready to use.
+ * \endcode
+ *
+ * Since hash_map objects are not fully sorted, representing a full matrix as a single hash_map would
+ * involve a big and costly sort to update the compressed matrix back. To overcome this issue, a RandomSetter
+ * use multiple hash_map, each representing 2^OuterPacketBits columns or rows according to the storage order.
+ * To reach optimal performance, this value should be adjusted according to the average number of nonzeros
+ * per rows/columns.
+ *
+ * The possible values for the template parameter MapTraits are:
+ * - \b StdMapTraits: corresponds to std::map. (does not perform very well)
+ * - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC)
+ * - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption)
+ * - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance)
+ *
+ * The default map implementation depends on the availability, and the preferred order is:
+ * GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits.
+ *
+ * For performance and memory consumption reasons it is highly recommended to use one of
+ * the Google's hash_map implementation. To enable the support for them, you have two options:
+ * - \#include <google/dense_hash_map> yourself \b before Eigen/Sparse header
+ * - define EIGEN_GOOGLEHASH_SUPPORT
+ * In the later case the inclusion of <google/dense_hash_map> is made for you.
+ *
+ * \see http://code.google.com/p/google-sparsehash/
+ */
+template<typename SparseMatrixType,
+ template <typename T> class MapTraits =
+#if defined _DENSE_HASH_MAP_H_
+ GoogleDenseHashMapTraits
+#elif defined _HASH_MAP
+ GnuHashMapTraits
+#else
+ StdMapTraits
+#endif
+ ,int OuterPacketBits = 6>
+class RandomSetter
+{
+ typedef typename ei_traits<SparseMatrixType>::Scalar Scalar;
+ struct ScalarWrapper
+ {
+ ScalarWrapper() : value(0) {}
+ Scalar value;
+ };
+ typedef typename MapTraits<ScalarWrapper>::KeyType KeyType;
+ typedef typename MapTraits<ScalarWrapper>::Type HashMapType;
+ static const int OuterPacketMask = (1 << OuterPacketBits) - 1;
+ enum {
+ SwapStorage = 1 - MapTraits<ScalarWrapper>::IsSorted,
+ TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0,
+ SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor,
+ IsUpperTriangular = SparseMatrixType::Flags & UpperTriangularBit,
+ IsLowerTriangular = SparseMatrixType::Flags & LowerTriangularBit
+ };
+
+ public:
+
+ /** Constructs a random setter object from the sparse matrix \a target
+ *
+ * Note that the initial value of \a target are imported. If you want to re-set
+ * a sparse matrix from scratch, then you must set it to zero first using the
+ * setZero() function.
+ */
+ inline RandomSetter(SparseMatrixType& target)
+ : mp_target(&target)
+ {
+ const int outerSize = SwapStorage ? target.innerSize() : target.outerSize();
+ const int innerSize = SwapStorage ? target.outerSize() : target.innerSize();
+ m_outerPackets = outerSize >> OuterPacketBits;
+ if (outerSize&OuterPacketMask)
+ m_outerPackets += 1;
+ m_hashmaps = new HashMapType[m_outerPackets];
+ // compute number of bits needed to store inner indices
+ int aux = innerSize - 1;
+ m_keyBitsOffset = 0;
+ while (aux)
+ {
+ ++m_keyBitsOffset;
+ aux = aux >> 1;
+ }
+ KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset));
+ for (int k=0; k<m_outerPackets; ++k)
+ MapTraits<ScalarWrapper>::setInvalidKey(m_hashmaps[k],ik);
+
+ // insert current coeffs
+ for (int j=0; j<mp_target->outerSize(); ++j)
+ for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it)
+ (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value();
+ }
+
+ /** Destructor updating back the sparse matrix target */
+ ~RandomSetter()
+ {
+ KeyType keyBitsMask = (1<<m_keyBitsOffset)-1;
+ if (!SwapStorage) // also means the map is sorted
+ {
+ mp_target->startFill(nonZeros());
+ for (int k=0; k<m_outerPackets; ++k)
+ {
+ const int outerOffset = (1<<OuterPacketBits) * k;
+ typename HashMapType::iterator end = m_hashmaps[k].end();
+ for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+ {
+ const int outer = (it->first >> m_keyBitsOffset) + outerOffset;
+ const int inner = it->first & keyBitsMask;
+ mp_target->fill(TargetRowMajor ? outer : inner, TargetRowMajor ? inner : outer) = it->second.value;
+ }
+ }
+ mp_target->endFill();
+ }
+ else
+ {
+ VectorXi positions(mp_target->outerSize());
+ positions.setZero();
+ // pass 1
+ for (int k=0; k<m_outerPackets; ++k)
+ {
+ typename HashMapType::iterator end = m_hashmaps[k].end();
+ for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+ {
+ const int outer = it->first & keyBitsMask;
+ ++positions[outer];
+ }
+ }
+ // prefix sum
+ int count = 0;
+ for (int j=0; j<mp_target->outerSize(); ++j)
+ {
+ int tmp = positions[j];
+ mp_target->_outerIndexPtr()[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ mp_target->_outerIndexPtr()[mp_target->outerSize()] = count;
+ mp_target->resizeNonZeros(count);
+ // pass 2
+ for (int k=0; k<m_outerPackets; ++k)
+ {
+ const int outerOffset = (1<<OuterPacketBits) * k;
+ typename HashMapType::iterator end = m_hashmaps[k].end();
+ for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+ {
+ const int inner = (it->first >> m_keyBitsOffset) + outerOffset;
+ const int outer = it->first & keyBitsMask;
+ // sorted insertion
+ // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients,
+ // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a
+ // small fraction of them have to be sorted, whence the following simple procedure:
+ int posStart = mp_target->_outerIndexPtr()[outer];
+ int i = (positions[outer]++) - 1;
+ while ( (i >= posStart) && (mp_target->_innerIndexPtr()[i] > inner) )
+ {
+ mp_target->_valuePtr()[i+1] = mp_target->_valuePtr()[i];
+ mp_target->_innerIndexPtr()[i+1] = mp_target->_innerIndexPtr()[i];
+ --i;
+ }
+ mp_target->_innerIndexPtr()[i+1] = inner;
+ mp_target->_valuePtr()[i+1] = it->second.value;
+ }
+ }
+ }
+ delete[] m_hashmaps;
+ }
+
+ /** \returns a reference to the coefficient at given coordinates \a row, \a col */
+ Scalar& operator() (int row, int col)
+ {
+ ei_assert(((!IsUpperTriangular) || (row<=col)) && "Invalid access to an upper triangular matrix");
+ ei_assert(((!IsLowerTriangular) || (col<=row)) && "Invalid access to an upper triangular matrix");
+ const int outer = SetterRowMajor ? row : col;
+ const int inner = SetterRowMajor ? col : row;
+ const int outerMajor = outer >> OuterPacketBits; // index of the packet/map
+ const int outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet
+ const KeyType key = (KeyType(outerMinor)<<m_keyBitsOffset) | inner;
+ return m_hashmaps[outerMajor][key].value;
+ }
+
+ /** \returns the number of non zero coefficients
+ *
+ * \note According to the underlying map/hash_map implementation,
+ * this function might be quite expensive.
+ */
+ int nonZeros() const
+ {
+ int nz = 0;
+ for (int k=0; k<m_outerPackets; ++k)
+ nz += m_hashmaps[k].size();
+ return nz;
+ }
+
+
+ protected:
+
+ HashMapType* m_hashmaps;
+ SparseMatrixType* mp_target;
+ int m_outerPackets;
+ unsigned char m_keyBitsOffset;
+};
+
+#endif // EIGEN_RANDOMSETTER_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseAssign.h b/extern/Eigen2/Eigen/src/Sparse/SparseAssign.h
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseAssign.h
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseBlock.h b/extern/Eigen2/Eigen/src/Sparse/SparseBlock.h
new file mode 100644
index 00000000000..c39066676b6
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseBlock.h
@@ -0,0 +1,449 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Daniel Gomez Ferro <dgomezferro@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_BLOCK_H
+#define EIGEN_SPARSE_BLOCK_H
+
+template<typename MatrixType, int Size>
+struct ei_traits<SparseInnerVectorSet<MatrixType, Size> >
+{
+ typedef typename ei_traits<MatrixType>::Scalar Scalar;
+ enum {
+ IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit,
+ Flags = MatrixType::Flags,
+ RowsAtCompileTime = IsRowMajor ? Size : MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = IsRowMajor ? MatrixType::ColsAtCompileTime : Size,
+ CoeffReadCost = MatrixType::CoeffReadCost
+ };
+};
+
+template<typename MatrixType, int Size>
+class SparseInnerVectorSet : ei_no_assignment_operator,
+ public SparseMatrixBase<SparseInnerVectorSet<MatrixType, Size> >
+{
+ enum { IsRowMajor = ei_traits<SparseInnerVectorSet>::IsRowMajor };
+ public:
+
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, int outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer)
+ {}
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, int outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ ei_assert(Size!=Dynamic);
+ ei_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+// template<typename OtherDerived>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ int m_outerStart;
+ const ei_int_if_dynamic<Size> m_outerSize;
+
+};
+
+/***************************************************************************
+* specialisation for DynamicSparseMatrix
+***************************************************************************/
+
+template<typename _Scalar, int _Options, int Size>
+class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size>
+ : public SparseMatrixBase<SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size> >
+{
+ typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType;
+ enum { IsRowMajor = ei_traits<SparseInnerVectorSet>::IsRowMajor };
+ public:
+
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, int outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer)
+ {}
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, int outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ ei_assert(Size!=Dynamic);
+ ei_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+ template<typename OtherDerived>
+ inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit))
+ {
+ // need to transpose => perform a block evaluation followed by a big swap
+ DynamicSparseMatrix<Scalar,IsRowMajor?RowMajorBit:0> aux(other);
+ *this = aux.markAsRValue();
+ }
+ else
+ {
+ // evaluate/copy vector per vector
+ for (int j=0; j<m_outerSize.value(); ++j)
+ {
+ SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
+ m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
+ }
+ }
+ return *this;
+ }
+
+ inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+ {
+ return operator=<SparseInnerVectorSet>(other);
+ }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ int m_outerStart;
+ const ei_int_if_dynamic<Size> m_outerSize;
+
+};
+
+
+/***************************************************************************
+* specialisation for SparseMatrix
+***************************************************************************/
+/*
+template<typename _Scalar, int _Options, int Size>
+class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options>, Size>
+ : public SparseMatrixBase<SparseInnerVectorSet<SparseMatrix<_Scalar, _Options>, Size> >
+{
+ typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType;
+ enum { IsRowMajor = ei_traits<SparseInnerVectorSet>::IsRowMajor };
+ public:
+
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, int outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer)
+ {}
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, int outer)
+ : m_matrix(matrix), m_outerStart(outer)
+ {
+ ei_assert(Size==1);
+ ei_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+ template<typename OtherDerived>
+ inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit))
+ {
+ // need to transpose => perform a block evaluation followed by a big swap
+ DynamicSparseMatrix<Scalar,IsRowMajor?RowMajorBit:0> aux(other);
+ *this = aux.markAsRValue();
+ }
+ else
+ {
+ // evaluate/copy vector per vector
+ for (int j=0; j<m_outerSize.value(); ++j)
+ {
+ SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
+ m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
+ }
+ }
+ return *this;
+ }
+
+ inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+ {
+ return operator=<SparseInnerVectorSet>(other);
+ }
+
+ inline const Scalar* _valuePtr() const
+ { return m_matrix._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+ inline const int* _innerIndexPtr() const
+ { return m_matrix._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+ inline const int* _outerIndexPtr() const { return m_matrix._outerIndexPtr() + m_outerStart; }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ int m_outerStart;
+ const ei_int_if_dynamic<Size> m_outerSize;
+
+};
+*/
+//----------
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(int i)
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(int i) const
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(int i)
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(int i) const
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(int outer)
+{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(int outer) const
+{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+
+//----------
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(int start, int size)
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(int start, int size) const
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(int start, int size)
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(int start, int size) const
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(int outerStart, int outerSize)
+{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(int outerStart, int outerSize) const
+{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+
+# if 0
+template<typename MatrixType, int BlockRows, int BlockCols, int PacketAccess>
+class Block<MatrixType,BlockRows,BlockCols,PacketAccess,IsSparse>
+ : public SparseMatrixBase<Block<MatrixType,BlockRows,BlockCols,PacketAccess,IsSparse> >
+{
+public:
+
+ _EIGEN_GENERIC_PUBLIC_INTERFACE(Block, SparseMatrixBase<Block>)
+ class InnerIterator;
+
+ /** Column or Row constructor
+ */
+ inline Block(const MatrixType& matrix, int i)
+ : m_matrix(matrix),
+ // It is a row if and only if BlockRows==1 and BlockCols==MatrixType::ColsAtCompileTime,
+ // and it is a column if and only if BlockRows==MatrixType::RowsAtCompileTime and BlockCols==1,
+ // all other cases are invalid.
+ // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
+ m_startRow( (BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) ? i : 0),
+ m_startCol( (BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
+ m_blockRows(matrix.rows()), // if it is a row, then m_blockRows has a fixed-size of 1, so no pb to try to overwrite it
+ m_blockCols(matrix.cols()) // same for m_blockCols
+ {
+ ei_assert( (i>=0) && (
+ ((BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) && i<matrix.rows())
+ ||((BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) && i<matrix.cols())));
+ }
+
+ /** Fixed-size constructor
+ */
+ inline Block(const MatrixType& matrix, int startRow, int startCol)
+ : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol),
+ m_blockRows(matrix.rows()), m_blockCols(matrix.cols())
+ {
+ EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && RowsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
+ ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= matrix.rows()
+ && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= matrix.cols());
+ }
+
+ /** Dynamic-size constructor
+ */
+ inline Block(const MatrixType& matrix,
+ int startRow, int startCol,
+ int blockRows, int blockCols)
+ : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol),
+ m_blockRows(blockRows), m_blockCols(blockCols)
+ {
+ ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
+ && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
+ ei_assert(startRow >= 0 && blockRows >= 1 && startRow + blockRows <= matrix.rows()
+ && startCol >= 0 && blockCols >= 1 && startCol + blockCols <= matrix.cols());
+ }
+
+ inline int rows() const { return m_blockRows.value(); }
+ inline int cols() const { return m_blockCols.value(); }
+
+ inline int stride(void) const { return m_matrix.stride(); }
+
+ inline Scalar& coeffRef(int row, int col)
+ {
+ return m_matrix.const_cast_derived()
+ .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+ }
+
+ inline const Scalar coeff(int row, int col) const
+ {
+ return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
+ }
+
+ inline Scalar& coeffRef(int index)
+ {
+ return m_matrix.const_cast_derived()
+ .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ }
+
+ inline const Scalar coeff(int index) const
+ {
+ return m_matrix
+ .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ const ei_int_if_dynamic<MatrixType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
+ const ei_int_if_dynamic<MatrixType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
+ const ei_int_if_dynamic<RowsAtCompileTime> m_blockRows;
+ const ei_int_if_dynamic<ColsAtCompileTime> m_blockCols;
+
+};
+#endif
+
+#endif // EIGEN_SPARSE_BLOCK_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseCwise.h b/extern/Eigen2/Eigen/src/Sparse/SparseCwise.h
new file mode 100644
index 00000000000..2206883cc76
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseCwise.h
@@ -0,0 +1,175 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_H
+#define EIGEN_SPARSE_CWISE_H
+
+/** \internal
+ * convenient macro to defined the return type of a cwise binary operation */
+#define EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(OP) \
+ CwiseBinaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType, OtherDerived>
+
+#define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
+ SparseCwiseBinaryOp< \
+ ei_scalar_product_op< \
+ typename ei_scalar_product_traits< \
+ typename ei_traits<ExpressionType>::Scalar, \
+ typename ei_traits<OtherDerived>::Scalar \
+ >::ReturnType \
+ >, \
+ ExpressionType, \
+ OtherDerived \
+ >
+
+/** \internal
+ * convenient macro to defined the return type of a cwise unary operation */
+#define EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(OP) \
+ SparseCwiseUnaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType>
+
+/** \internal
+ * convenient macro to defined the return type of a cwise comparison to a scalar */
+/*#define EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(OP) \
+ CwiseBinaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType, \
+ NestByValue<typename ExpressionType::ConstantReturnType> >*/
+
+template<typename ExpressionType> class SparseCwise
+{
+ public:
+
+ typedef typename ei_traits<ExpressionType>::Scalar Scalar;
+ typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
+ ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
+ typedef CwiseUnaryOp<ei_scalar_add_op<Scalar>, ExpressionType> ScalarAddReturnType;
+
+ inline SparseCwise(const ExpressionType& matrix) : m_matrix(matrix) {}
+
+ /** \internal */
+ inline const ExpressionType& _expression() const { return m_matrix; }
+
+ template<typename OtherDerived>
+ const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+ operator*(const SparseMatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+ operator*(const MatrixBase<OtherDerived> &other) const;
+
+// template<typename OtherDerived>
+// const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
+// operator/(const SparseMatrixBase<OtherDerived> &other) const;
+//
+// template<typename OtherDerived>
+// const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
+// operator/(const MatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)
+ min(const SparseMatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)
+ max(const SparseMatrixBase<OtherDerived> &other) const;
+
+ const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op) abs() const;
+ const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op) abs2() const;
+// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op) square() const;
+// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op) cube() const;
+// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op) inverse() const;
+// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op) sqrt() const;
+// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op) exp() const;
+// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op) log() const;
+// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op) cos() const;
+// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op) sin() const;
+// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op) pow(const Scalar& exponent) const;
+
+ template<typename OtherDerived>
+ inline ExpressionType& operator*=(const SparseMatrixBase<OtherDerived> &other);
+
+// template<typename OtherDerived>
+// inline ExpressionType& operator/=(const SparseMatrixBase<OtherDerived> &other);
+
+ /*
+ template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)
+ operator<(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)
+ operator<=(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)
+ operator>(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)
+ operator>=(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)
+ operator==(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)
+ operator!=(const MatrixBase<OtherDerived>& other) const;
+
+ // comparisons to a scalar value
+ const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less)
+ operator<(Scalar s) const;
+
+ const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal)
+ operator<=(Scalar s) const;
+
+ const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater)
+ operator>(Scalar s) const;
+
+ const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal)
+ operator>=(Scalar s) const;
+
+ const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to)
+ operator==(Scalar s) const;
+
+ const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to)
+ operator!=(Scalar s) const;
+ */
+
+ // allow to extend SparseCwise outside Eigen
+ #ifdef EIGEN_SPARSE_CWISE_PLUGIN
+ #include EIGEN_SPARSE_CWISE_PLUGIN
+ #endif
+
+ protected:
+ ExpressionTypeNested m_matrix;
+};
+
+template<typename Derived>
+inline const SparseCwise<Derived>
+SparseMatrixBase<Derived>::cwise() const
+{
+ return derived();
+}
+
+template<typename Derived>
+inline SparseCwise<Derived>
+SparseMatrixBase<Derived>::cwise()
+{
+ return derived();
+}
+
+#endif // EIGEN_SPARSE_CWISE_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h
new file mode 100644
index 00000000000..d19970efcb1
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h
@@ -0,0 +1,442 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
+#define EIGEN_SPARSE_CWISE_BINARY_OP_H
+
+// Here we have to handle 3 cases:
+// 1 - sparse op dense
+// 2 - dense op sparse
+// 3 - sparse op sparse
+// We also need to implement a 4th iterator for:
+// 4 - dense op dense
+// Finally, we also need to distinguish between the product and other operations :
+// configuration returned mode
+// 1 - sparse op dense product sparse
+// generic dense
+// 2 - dense op sparse product sparse
+// generic dense
+// 3 - sparse op sparse product sparse
+// generic sparse
+// 4 - dense op dense product dense
+// generic dense
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct ei_traits<SparseCwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+ typedef typename ei_result_of<
+ BinaryOp(
+ typename Lhs::Scalar,
+ typename Rhs::Scalar
+ )
+ >::type Scalar;
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+ typedef typename ei_unref<LhsNested>::type _LhsNested;
+ typedef typename ei_unref<RhsNested>::type _RhsNested;
+ enum {
+ LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+ RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+ LhsFlags = _LhsNested::Flags,
+ RhsFlags = _RhsNested::Flags,
+ RowsAtCompileTime = Lhs::RowsAtCompileTime,
+ ColsAtCompileTime = Lhs::ColsAtCompileTime,
+ MaxRowsAtCompileTime = Lhs::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = Lhs::MaxColsAtCompileTime,
+ Flags = (int(LhsFlags) | int(RhsFlags)) & HereditaryBits,
+ CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + ei_functor_traits<BinaryOp>::Cost
+ };
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class SparseCwiseBinaryOp : ei_no_assignment_operator,
+ public SparseMatrixBase<SparseCwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+ public:
+
+ class InnerIterator;
+
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseCwiseBinaryOp)
+ typedef typename ei_traits<SparseCwiseBinaryOp>::LhsNested LhsNested;
+ typedef typename ei_traits<SparseCwiseBinaryOp>::RhsNested RhsNested;
+ typedef typename ei_unref<LhsNested>::type _LhsNested;
+ typedef typename ei_unref<RhsNested>::type _RhsNested;
+
+ EIGEN_STRONG_INLINE SparseCwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp())
+ : m_lhs(lhs), m_rhs(rhs), m_functor(func)
+ {
+ EIGEN_STATIC_ASSERT((_LhsNested::Flags&RowMajorBit)==(_RhsNested::Flags&RowMajorBit),
+ BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER)
+ EIGEN_STATIC_ASSERT((ei_functor_allows_mixing_real_and_complex<BinaryOp>::ret
+ ? int(ei_is_same_type<typename Lhs::RealScalar, typename Rhs::RealScalar>::ret)
+ : int(ei_is_same_type<typename Lhs::Scalar, typename Rhs::Scalar>::ret)),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ // require the sizes to match
+ EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
+ ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols());
+ }
+
+ EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE int cols() const { return m_lhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+ EIGEN_STRONG_INLINE const BinaryOp& functor() const { return m_functor; }
+
+ protected:
+ const LhsNested m_lhs;
+ const RhsNested m_rhs;
+ const BinaryOp m_functor;
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
+ int _LhsStorageMode = int(Lhs::Flags) & SparseBit,
+ int _RhsStorageMode = int(Rhs::Flags) & SparseBit>
+class ei_sparse_cwise_binary_op_inner_iterator_selector;
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class SparseCwiseBinaryOp<BinaryOp,Lhs,Rhs>::InnerIterator
+ : public ei_sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs, typename SparseCwiseBinaryOp<BinaryOp,Lhs,Rhs>::InnerIterator>
+{
+ public:
+ typedef ei_sparse_cwise_binary_op_inner_iterator_selector<
+ BinaryOp,Lhs,Rhs, InnerIterator> Base;
+
+ EIGEN_STRONG_INLINE InnerIterator(const SparseCwiseBinaryOp& binOp, int outer)
+ : Base(binOp,outer)
+ {}
+};
+
+/***************************************************************************
+* Implementation of inner-iterators
+***************************************************************************/
+
+// template<typename T> struct ei_func_is_conjunction { enum { ret = false }; };
+// template<typename T> struct ei_func_is_conjunction<ei_scalar_product_op<T> > { enum { ret = true }; };
+
+// TODO generalize the ei_scalar_product_op specialization to all conjunctions if any !
+
+// sparse - sparse (generic)
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
+class ei_sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived, IsSparse, IsSparse>
+{
+ typedef SparseCwiseBinaryOp<BinaryOp, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename ei_traits<CwiseBinaryXpr>::Scalar Scalar;
+ typedef typename ei_traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename ei_traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ public:
+
+ EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
+ ++m_lhsIter;
+ ++m_rhsIter;
+ }
+ else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), Scalar(0));
+ ++m_lhsIter;
+ }
+ else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
+ {
+ m_id = m_rhsIter.index();
+ m_value = m_functor(Scalar(0), m_rhsIter.value());
+ ++m_rhsIter;
+ }
+ else
+ {
+ m_id = -1;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+
+ EIGEN_STRONG_INLINE int index() const { return m_id; }
+ EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ int m_id;
+};
+
+// sparse - sparse (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class ei_sparse_cwise_binary_op_inner_iterator_selector<ei_scalar_product_op<T>, Lhs, Rhs, Derived, IsSparse, IsSparse>
+{
+ typedef ei_scalar_product_op<T> BinaryFunc;
+ typedef SparseCwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename ei_traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename ei_traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ public:
+
+ EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ ++m_rhsIter;
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class ei_sparse_cwise_binary_op_inner_iterator_selector<ei_scalar_product_op<T>, Lhs, Rhs, Derived, IsSparse, IsDense>
+{
+ typedef ei_scalar_product_op<T> BinaryFunc;
+ typedef SparseCwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename ei_traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename ei_traits<CwiseBinaryXpr>::RhsNested RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
+ : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_lhsIter.value(),
+ m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
+
+ EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
+
+ protected:
+ const RhsNested m_rhs;
+ LhsIterator m_lhsIter;
+ const BinaryFunc m_functor;
+ const int m_outer;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class ei_sparse_cwise_binary_op_inner_iterator_selector<ei_scalar_product_op<T>, Lhs, Rhs, Derived, IsDense, IsSparse>
+{
+ typedef ei_scalar_product_op<T> BinaryFunc;
+ typedef SparseCwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename ei_traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
+ : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_rhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE int index() const { return m_rhsIter.index(); }
+ EIGEN_STRONG_INLINE int row() const { return m_rhsIter.row(); }
+ EIGEN_STRONG_INLINE int col() const { return m_rhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
+
+ protected:
+ const CwiseBinaryXpr& m_xpr;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+ const int m_outer;
+};
+
+
+/***************************************************************************
+* Implementation of SparseMatrixBase and SparseCwise functions/operators
+***************************************************************************/
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const SparseCwiseBinaryOp<ei_scalar_difference_op<typename ei_traits<Derived>::Scalar>,
+ Derived, OtherDerived>
+SparseMatrixBase<Derived>::operator-(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return SparseCwiseBinaryOp<ei_scalar_difference_op<Scalar>,
+ Derived, OtherDerived>(derived(), other.derived());
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
+{
+ return *this = derived() - other.derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const SparseCwiseBinaryOp<ei_scalar_sum_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
+SparseMatrixBase<Derived>::operator+(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return SparseCwiseBinaryOp<ei_scalar_sum_op<Scalar>, Derived, OtherDerived>(derived(), other.derived());
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
+{
+ return *this = derived() + other.derived();
+}
+
+template<typename ExpressionType>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+SparseCwise<ExpressionType>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived());
+}
+
+template<typename ExpressionType>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+SparseCwise<ExpressionType>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived());
+}
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
+// SparseCwise<ExpressionType>::operator/(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived());
+// }
+//
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
+// SparseCwise<ExpressionType>::operator/(const MatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived());
+// }
+
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline ExpressionType& SparseCwise<ExpressionType>::operator*=(const SparseMatrixBase<OtherDerived> &other)
+{
+ return m_matrix.const_cast_derived() = _expression() * other.derived();
+}
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// inline ExpressionType& SparseCwise<ExpressionType>::operator/=(const SparseMatrixBase<OtherDerived> &other)
+// {
+// return m_matrix.const_cast_derived() = *this / other;
+// }
+
+template<typename ExpressionType>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)
+SparseCwise<ExpressionType>::min(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)(_expression(), other.derived());
+}
+
+template<typename ExpressionType>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)
+SparseCwise<ExpressionType>::max(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)(_expression(), other.derived());
+}
+
+// template<typename Derived>
+// template<typename CustomBinaryOp, typename OtherDerived>
+// EIGEN_STRONG_INLINE const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
+// SparseMatrixBase<Derived>::binaryExpr(const SparseMatrixBase<OtherDerived> &other, const CustomBinaryOp& func) const
+// {
+// return CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>(derived(), other.derived(), func);
+// }
+
+#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h
new file mode 100644
index 00000000000..b11c0f8a377
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h
@@ -0,0 +1,183 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
+#define EIGEN_SPARSE_CWISE_UNARY_OP_H
+
+template<typename UnaryOp, typename MatrixType>
+struct ei_traits<SparseCwiseUnaryOp<UnaryOp, MatrixType> > : ei_traits<MatrixType>
+{
+ typedef typename ei_result_of<
+ UnaryOp(typename MatrixType::Scalar)
+ >::type Scalar;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
+ enum {
+ CoeffReadCost = _MatrixTypeNested::CoeffReadCost + ei_functor_traits<UnaryOp>::Cost
+ };
+};
+
+template<typename UnaryOp, typename MatrixType>
+class SparseCwiseUnaryOp : ei_no_assignment_operator,
+ public SparseMatrixBase<SparseCwiseUnaryOp<UnaryOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+// typedef typename ei_unref<LhsNested>::type _LhsNested;
+
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseCwiseUnaryOp)
+
+ inline SparseCwiseUnaryOp(const MatrixType& mat, const UnaryOp& func = UnaryOp())
+ : m_matrix(mat), m_functor(func) {}
+
+ EIGEN_STRONG_INLINE int rows() const { return m_matrix.rows(); }
+ EIGEN_STRONG_INLINE int cols() const { return m_matrix.cols(); }
+
+// EIGEN_STRONG_INLINE const typename MatrixType::Nested& _matrix() const { return m_matrix; }
+// EIGEN_STRONG_INLINE const UnaryOp& _functor() const { return m_functor; }
+
+ protected:
+ const typename MatrixType::Nested m_matrix;
+ const UnaryOp m_functor;
+};
+
+
+template<typename UnaryOp, typename MatrixType>
+class SparseCwiseUnaryOp<UnaryOp,MatrixType>::InnerIterator
+{
+ typedef typename SparseCwiseUnaryOp::Scalar Scalar;
+ typedef typename ei_traits<SparseCwiseUnaryOp>::_MatrixTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const SparseCwiseUnaryOp& unaryOp, int outer)
+ : m_iter(unaryOp.m_matrix,outer), m_functor(unaryOp.m_functor)
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { ++m_iter; return *this; }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); }
+
+ EIGEN_STRONG_INLINE int index() const { return m_iter.index(); }
+ EIGEN_STRONG_INLINE int row() const { return m_iter.row(); }
+ EIGEN_STRONG_INLINE int col() const { return m_iter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
+
+ protected:
+ MatrixTypeIterator m_iter;
+ const UnaryOp m_functor;
+};
+
+template<typename Derived>
+template<typename CustomUnaryOp>
+EIGEN_STRONG_INLINE const SparseCwiseUnaryOp<CustomUnaryOp, Derived>
+SparseMatrixBase<Derived>::unaryExpr(const CustomUnaryOp& func) const
+{
+ return SparseCwiseUnaryOp<CustomUnaryOp, Derived>(derived(), func);
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE const SparseCwiseUnaryOp<ei_scalar_opposite_op<typename ei_traits<Derived>::Scalar>,Derived>
+SparseMatrixBase<Derived>::operator-() const
+{
+ return derived();
+}
+
+template<typename ExpressionType>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op)
+SparseCwise<ExpressionType>::abs() const
+{
+ return _expression();
+}
+
+template<typename ExpressionType>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op)
+SparseCwise<ExpressionType>::abs2() const
+{
+ return _expression();
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE typename SparseMatrixBase<Derived>::ConjugateReturnType
+SparseMatrixBase<Derived>::conjugate() const
+{
+ return ConjugateReturnType(derived());
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::RealReturnType
+SparseMatrixBase<Derived>::real() const { return derived(); }
+
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::ImagReturnType
+SparseMatrixBase<Derived>::imag() const { return derived(); }
+
+template<typename Derived>
+template<typename NewType>
+EIGEN_STRONG_INLINE const SparseCwiseUnaryOp<ei_scalar_cast_op<typename ei_traits<Derived>::Scalar, NewType>, Derived>
+SparseMatrixBase<Derived>::cast() const
+{
+ return derived();
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE const SparseCwiseUnaryOp<ei_scalar_multiple_op<typename ei_traits<Derived>::Scalar>, Derived>
+SparseMatrixBase<Derived>::operator*(const Scalar& scalar) const
+{
+ return SparseCwiseUnaryOp<ei_scalar_multiple_op<Scalar>, Derived>
+ (derived(), ei_scalar_multiple_op<Scalar>(scalar));
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE const SparseCwiseUnaryOp<ei_scalar_quotient1_op<typename ei_traits<Derived>::Scalar>, Derived>
+SparseMatrixBase<Derived>::operator/(const Scalar& scalar) const
+{
+ return SparseCwiseUnaryOp<ei_scalar_quotient1_op<Scalar>, Derived>
+ (derived(), ei_scalar_quotient1_op<Scalar>(scalar));
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator*=(const Scalar& other)
+{
+ for (int j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() *= other;
+ return derived();
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator/=(const Scalar& other)
+{
+ for (int j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() /= other;
+ return derived();
+}
+
+#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseDiagonalProduct.h b/extern/Eigen2/Eigen/src/Sparse/SparseDiagonalProduct.h
new file mode 100644
index 00000000000..932daf220b9
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseDiagonalProduct.h
@@ -0,0 +1,157 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+
+// the product a diagonal matrix with a sparse matrix can be easily
+// implemented using expression template. We have two very different cases:
+// 1 - diag * row-major sparse
+// => each inner vector <=> scalar * sparse vector product
+// => so we can reuse CwiseUnaryOp::InnerIterator
+// 2 - diag * col-major sparse
+// => each inner vector <=> densevector * sparse vector cwise product
+// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
+// for that particular case
+// The two other cases are symmetric.
+
+template<typename Lhs, typename Rhs>
+struct ei_traits<SparseDiagonalProduct<Lhs, Rhs> > : ei_traits<SparseProduct<Lhs, Rhs, DiagonalProduct> >
+{
+ typedef typename ei_cleantype<Lhs>::type _Lhs;
+ typedef typename ei_cleantype<Rhs>::type _Rhs;
+ enum {
+ SparseFlags = ((int(_Lhs::Flags)&Diagonal)==Diagonal) ? int(_Rhs::Flags) : int(_Lhs::Flags),
+ Flags = SparseBit | (SparseFlags&RowMajorBit)
+ };
+};
+
+enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
+class ei_sparse_diagonal_product_inner_iterator_selector;
+
+template<typename LhsNested, typename RhsNested>
+class SparseDiagonalProduct : public SparseMatrixBase<SparseDiagonalProduct<LhsNested,RhsNested> >, ei_no_assignment_operator
+{
+ typedef typename ei_traits<SparseDiagonalProduct>::_LhsNested _LhsNested;
+ typedef typename ei_traits<SparseDiagonalProduct>::_RhsNested _RhsNested;
+
+ enum {
+ LhsMode = (_LhsNested::Flags&Diagonal)==Diagonal ? SDP_IsDiagonal
+ : (_LhsNested::Flags&RowMajorBit) ? SDP_IsSparseRowMajor : SDP_IsSparseColMajor,
+ RhsMode = (_RhsNested::Flags&Diagonal)==Diagonal ? SDP_IsDiagonal
+ : (_RhsNested::Flags&RowMajorBit) ? SDP_IsSparseRowMajor : SDP_IsSparseColMajor
+ };
+
+ public:
+
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseDiagonalProduct)
+
+ typedef ei_sparse_diagonal_product_inner_iterator_selector
+ <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+
+ template<typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ ei_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
+ }
+
+ EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class ei_sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
+ : public SparseCwiseUnaryOp<ei_scalar_multiple_op<typename Lhs::Scalar>,Rhs>::InnerIterator
+{
+ typedef typename SparseCwiseUnaryOp<ei_scalar_multiple_op<typename Lhs::Scalar>,Rhs>::InnerIterator Base;
+ public:
+ inline ei_sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, int outer)
+ : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class ei_sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
+ : public SparseCwiseBinaryOp<
+ ei_scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::_CoeffsVectorType>::InnerIterator
+{
+ typedef typename SparseCwiseBinaryOp<
+ ei_scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::_CoeffsVectorType>::InnerIterator Base;
+ public:
+ inline ei_sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, int outer)
+ : Base(expr.rhs().innerVector(outer) .cwise()* expr.lhs().diagonal(), 0)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class ei_sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
+ : public SparseCwiseUnaryOp<ei_scalar_multiple_op<typename Rhs::Scalar>,Lhs>::InnerIterator
+{
+ typedef typename SparseCwiseUnaryOp<ei_scalar_multiple_op<typename Rhs::Scalar>,Lhs>::InnerIterator Base;
+ public:
+ inline ei_sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, int outer)
+ : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class ei_sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
+ : public SparseCwiseBinaryOp<
+ ei_scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ NestByValue<Transpose<typename Rhs::_CoeffsVectorType> > >::InnerIterator
+{
+ typedef typename SparseCwiseBinaryOp<
+ ei_scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ NestByValue<Transpose<typename Rhs::_CoeffsVectorType> > >::InnerIterator Base;
+ public:
+ inline ei_sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, int outer)
+ : Base(expr.lhs().innerVector(outer) .cwise()* expr.rhs().diagonal().transpose().nestByValue(), 0)
+ {}
+};
+
+#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseDot.h b/extern/Eigen2/Eigen/src/Sparse/SparseDot.h
new file mode 100644
index 00000000000..7a26e0f4ba5
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseDot.h
@@ -0,0 +1,97 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_DOT_H
+#define EIGEN_SPARSE_DOT_H
+
+template<typename Derived>
+template<typename OtherDerived>
+typename ei_traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename OtherDerived::Scalar>::ret),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ ei_assert(size() == other.size());
+ ei_assert(other.size()>0 && "you are using a non initialized vector");
+
+ typename Derived::InnerIterator i(derived(),0);
+ Scalar res = 0;
+ while (i)
+ {
+ res += i.value() * ei_conj(other.coeff(i.index()));
+ ++i;
+ }
+ return res;
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+typename ei_traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename OtherDerived::Scalar>::ret),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ ei_assert(size() == other.size());
+
+ typename Derived::InnerIterator i(derived(),0);
+ typename OtherDerived::InnerIterator j(other.derived(),0);
+ Scalar res = 0;
+ while (i && j)
+ {
+ if (i.index()==j.index())
+ {
+ res += i.value() * ei_conj(j.value());
+ ++i; ++j;
+ }
+ else if (i.index()<j.index())
+ ++i;
+ else
+ ++j;
+ }
+ return res;
+}
+
+template<typename Derived>
+inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::squaredNorm() const
+{
+ return ei_real((*this).cwise().abs2().sum());
+}
+
+template<typename Derived>
+inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::norm() const
+{
+ return ei_sqrt(squaredNorm());
+}
+
+#endif // EIGEN_SPARSE_DOT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h b/extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h
new file mode 100644
index 00000000000..c47e162f538
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h
@@ -0,0 +1,97 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_FLAGGED_H
+#define EIGEN_SPARSE_FLAGGED_H
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed>
+struct ei_traits<SparseFlagged<ExpressionType, Added, Removed> > : ei_traits<ExpressionType>
+{
+ enum { Flags = (ExpressionType::Flags | Added) & ~Removed };
+};
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed> class SparseFlagged
+ : public SparseMatrixBase<SparseFlagged<ExpressionType, Added, Removed> >
+{
+ public:
+
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseFlagged)
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
+ ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
+
+ inline SparseFlagged(const ExpressionType& matrix) : m_matrix(matrix) {}
+
+ inline int rows() const { return m_matrix.rows(); }
+ inline int cols() const { return m_matrix.cols(); }
+
+ // FIXME should be keep them ?
+ inline Scalar& coeffRef(int row, int col)
+ { return m_matrix.const_cast_derived().coeffRef(col, row); }
+
+ inline const Scalar coeff(int row, int col) const
+ { return m_matrix.coeff(col, row); }
+
+ inline const Scalar coeff(int index) const
+ { return m_matrix.coeff(index); }
+
+ inline Scalar& coeffRef(int index)
+ { return m_matrix.const_cast_derived().coeffRef(index); }
+
+ protected:
+ ExpressionTypeNested m_matrix;
+};
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed>
+ class SparseFlagged<ExpressionType,Added,Removed>::InnerIterator : public ExpressionType::InnerIterator
+{
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const SparseFlagged& xpr, int outer)
+ : ExpressionType::InnerIterator(xpr.m_matrix, outer)
+ {}
+};
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed>
+ class SparseFlagged<ExpressionType,Added,Removed>::ReverseInnerIterator : public ExpressionType::ReverseInnerIterator
+{
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseFlagged& xpr, int outer)
+ : ExpressionType::ReverseInnerIterator(xpr.m_matrix, outer)
+ {}
+};
+
+template<typename Derived>
+template<unsigned int Added>
+inline const SparseFlagged<Derived, Added, 0>
+SparseMatrixBase<Derived>::marked() const
+{
+ return derived();
+}
+
+#endif // EIGEN_SPARSE_FLAGGED_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h b/extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h
new file mode 100644
index 00000000000..355f4d52eab
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h
@@ -0,0 +1,41 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_FUZZY_H
+#define EIGEN_SPARSE_FUZZY_H
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// bool SparseMatrixBase<Derived>::isApprox(
+// const OtherDerived& other,
+// typename NumTraits<Scalar>::Real prec
+// ) const
+// {
+// const typename ei_nested<Derived,2>::type nested(derived());
+// const typename ei_nested<OtherDerived,2>::type otherNested(other.derived());
+// return (nested - otherNested).cwise().abs2().sum()
+// <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
+// }
+
+#endif // EIGEN_SPARSE_FUZZY_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h b/extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h
new file mode 100644
index 00000000000..a1bac4d084d
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h
@@ -0,0 +1,346 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+/*
+
+NOTE: the _symbolic, and _numeric functions has been adapted from
+ the LDL library:
+
+LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
+
+LDL License:
+
+ Your use or distribution of LDL or any modified version of
+ LDL implies that you agree to this License.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
+ USA
+
+ Permission is hereby granted to use or copy this program under the
+ terms of the GNU LGPL, provided that the Copyright, this License,
+ and the Availability of the original version is retained on all copies.
+ User documentation of any code that uses this code or any modified
+ version of this code must cite the Copyright, this License, the
+ Availability note, and "Used by permission." Permission to modify
+ the code and to distribute modified code is granted, provided the
+ Copyright, this License, and the Availability note are retained,
+ and a notice that the code was modified is included.
+ */
+
+#ifndef EIGEN_SPARSELDLT_H
+#define EIGEN_SPARSELDLT_H
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseLDLT
+ *
+ * \brief LDLT Cholesky decomposition of a sparse matrix and associated features
+ *
+ * \param MatrixType the type of the matrix of which we are computing the LDLT Cholesky decomposition
+ *
+ * \sa class LDLT, class LDLT
+ */
+template<typename MatrixType, int Backend = DefaultBackend>
+class SparseLDLT
+{
+ protected:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef SparseMatrix<Scalar,LowerTriangular|UnitDiagBit> CholMatrixType;
+ typedef Matrix<Scalar,MatrixType::ColsAtCompileTime,1> VectorType;
+
+ enum {
+ SupernodalFactorIsDirty = 0x10000,
+ MatrixLIsDirty = 0x20000
+ };
+
+ public:
+
+ /** Creates a dummy LDLT factorization object with flags \a flags. */
+ SparseLDLT(int flags = 0)
+ : m_flags(flags), m_status(0)
+ {
+ ei_assert((MatrixType::Flags&RowMajorBit)==0);
+ m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
+ }
+
+ /** Creates a LDLT object and compute the respective factorization of \a matrix using
+ * flags \a flags. */
+ SparseLDLT(const MatrixType& matrix, int flags = 0)
+ : m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0)
+ {
+ ei_assert((MatrixType::Flags&RowMajorBit)==0);
+ m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
+ compute(matrix);
+ }
+
+ /** Sets the relative threshold value used to prune zero coefficients during the decomposition.
+ *
+ * Setting a value greater than zero speeds up computation, and yields to an imcomplete
+ * factorization with fewer non zero coefficients. Such approximate factors are especially
+ * useful to initialize an iterative solver.
+ *
+ * \warning if precision is greater that zero, the LDLT factorization is not guaranteed to succeed
+ * even if the matrix is positive definite.
+ *
+ * Note that the exact meaning of this parameter might depends on the actual
+ * backend. Moreover, not all backends support this feature.
+ *
+ * \sa precision() */
+ void setPrecision(RealScalar v) { m_precision = v; }
+
+ /** \returns the current precision.
+ *
+ * \sa setPrecision() */
+ RealScalar precision() const { return m_precision; }
+
+ /** Sets the flags. Possible values are:
+ * - CompleteFactorization
+ * - IncompleteFactorization
+ * - MemoryEfficient (hint to use the memory most efficient method offered by the backend)
+ * - SupernodalMultifrontal (implies a complete factorization if supported by the backend,
+ * overloads the MemoryEfficient flags)
+ * - SupernodalLeftLooking (implies a complete factorization if supported by the backend,
+ * overloads the MemoryEfficient flags)
+ *
+ * \sa flags() */
+ void settagss(int f) { m_flags = f; }
+ /** \returns the current flags */
+ int flags() const { return m_flags; }
+
+ /** Computes/re-computes the LDLT factorization */
+ void compute(const MatrixType& matrix);
+
+ /** Perform a symbolic factorization */
+ void _symbolic(const MatrixType& matrix);
+ /** Perform the actual factorization using the previously
+ * computed symbolic factorization */
+ bool _numeric(const MatrixType& matrix);
+
+ /** \returns the lower triangular matrix L */
+ inline const CholMatrixType& matrixL(void) const { return m_matrix; }
+
+ /** \returns the coefficients of the diagonal matrix D */
+ inline VectorType vectorD(void) const { return m_diag; }
+
+ template<typename Derived>
+ bool solveInPlace(MatrixBase<Derived> &b) const;
+
+ /** \returns true if the factorization succeeded */
+ inline bool succeeded(void) const { return m_succeeded; }
+
+ protected:
+ CholMatrixType m_matrix;
+ VectorType m_diag;
+ VectorXi m_parent; // elimination tree
+ VectorXi m_nonZerosPerCol;
+// VectorXi m_w; // workspace
+ RealScalar m_precision;
+ int m_flags;
+ mutable int m_status;
+ bool m_succeeded;
+};
+
+/** Computes / recomputes the LDLT decomposition of matrix \a a
+ * using the default algorithm.
+ */
+template<typename MatrixType, int Backend>
+void SparseLDLT<MatrixType,Backend>::compute(const MatrixType& a)
+{
+ _symbolic(a);
+ m_succeeded = _numeric(a);
+}
+
+template<typename MatrixType, int Backend>
+void SparseLDLT<MatrixType,Backend>::_symbolic(const MatrixType& a)
+{
+ assert(a.rows()==a.cols());
+ const int size = a.rows();
+ m_matrix.resize(size, size);
+ m_parent.resize(size);
+ m_nonZerosPerCol.resize(size);
+ int * tags = ei_aligned_stack_new(int, size);
+
+ const int* Ap = a._outerIndexPtr();
+ const int* Ai = a._innerIndexPtr();
+ int* Lp = m_matrix._outerIndexPtr();
+ const int* P = 0;
+ int* Pinv = 0;
+
+ if (P)
+ {
+ /* If P is present then compute Pinv, the inverse of P */
+ for (int k = 0; k < size; ++k)
+ Pinv[P[k]] = k;
+ }
+ for (int k = 0; k < size; ++k)
+ {
+ /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
+ m_parent[k] = -1; /* parent of k is not yet known */
+ tags[k] = k; /* mark node k as visited */
+ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
+ int kk = P ? P[k] : k; /* kth original, or permuted, column */
+ int p2 = Ap[kk+1];
+ for (int p = Ap[kk]; p < p2; ++p)
+ {
+ /* A (i,k) is nonzero (original or permuted A) */
+ int i = Pinv ? Pinv[Ai[p]] : Ai[p];
+ if (i < k)
+ {
+ /* follow path from i to root of etree, stop at flagged node */
+ for (; tags[i] != k; i = m_parent[i])
+ {
+ /* find parent of i if not yet determined */
+ if (m_parent[i] == -1)
+ m_parent[i] = k;
+ ++m_nonZerosPerCol[i]; /* L (k,i) is nonzero */
+ tags[i] = k; /* mark i as visited */
+ }
+ }
+ }
+ }
+ /* construct Lp index array from m_nonZerosPerCol column counts */
+ Lp[0] = 0;
+ for (int k = 0; k < size; ++k)
+ Lp[k+1] = Lp[k] + m_nonZerosPerCol[k];
+
+ m_matrix.resizeNonZeros(Lp[size]);
+ ei_aligned_stack_delete(int, tags, size);
+}
+
+template<typename MatrixType, int Backend>
+bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
+{
+ assert(a.rows()==a.cols());
+ const int size = a.rows();
+ assert(m_parent.size()==size);
+ assert(m_nonZerosPerCol.size()==size);
+
+ const int* Ap = a._outerIndexPtr();
+ const int* Ai = a._innerIndexPtr();
+ const Scalar* Ax = a._valuePtr();
+ const int* Lp = m_matrix._outerIndexPtr();
+ int* Li = m_matrix._innerIndexPtr();
+ Scalar* Lx = m_matrix._valuePtr();
+ m_diag.resize(size);
+
+ Scalar * y = ei_aligned_stack_new(Scalar, size);
+ int * pattern = ei_aligned_stack_new(int, size);
+ int * tags = ei_aligned_stack_new(int, size);
+
+ const int* P = 0;
+ const int* Pinv = 0;
+ bool ok = true;
+
+ for (int k = 0; k < size; ++k)
+ {
+ /* compute nonzero pattern of kth row of L, in topological order */
+ y[k] = 0.0; /* Y(0:k) is now all zero */
+ int top = size; /* stack for pattern is empty */
+ tags[k] = k; /* mark node k as visited */
+ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
+ int kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */
+ int p2 = Ap[kk+1];
+ for (int p = Ap[kk]; p < p2; ++p)
+ {
+ int i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */
+ if (i <= k)
+ {
+ y[i] += Ax[p]; /* scatter A(i,k) into Y (sum duplicates) */
+ int len;
+ for (len = 0; tags[i] != k; i = m_parent[i])
+ {
+ pattern[len++] = i; /* L(k,i) is nonzero */
+ tags[i] = k; /* mark i as visited */
+ }
+ while (len > 0)
+ pattern[--top] = pattern[--len];
+ }
+ }
+ /* compute numerical values kth row of L (a sparse triangular solve) */
+ m_diag[k] = y[k]; /* get D(k,k) and clear Y(k) */
+ y[k] = 0.0;
+ for (; top < size; ++top)
+ {
+ int i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
+ Scalar yi = y[i]; /* get and clear Y(i) */
+ y[i] = 0.0;
+ int p2 = Lp[i] + m_nonZerosPerCol[i];
+ int p;
+ for (p = Lp[i]; p < p2; ++p)
+ y[Li[p]] -= Lx[p] * yi;
+ Scalar l_ki = yi / m_diag[i]; /* the nonzero entry L(k,i) */
+ m_diag[k] -= l_ki * yi;
+ Li[p] = k; /* store L(k,i) in column form of L */
+ Lx[p] = l_ki;
+ ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
+ }
+ if (m_diag[k] == 0.0)
+ {
+ ok = false; /* failure, D(k,k) is zero */
+ break;
+ }
+ }
+
+ ei_aligned_stack_delete(Scalar, y, size);
+ ei_aligned_stack_delete(int, pattern, size);
+ ei_aligned_stack_delete(int, tags, size);
+
+ return ok; /* success, diagonal of D is all nonzero */
+}
+
+/** Computes b = L^-T L^-1 b */
+template<typename MatrixType, int Backend>
+template<typename Derived>
+bool SparseLDLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
+{
+ const int size = m_matrix.rows();
+ ei_assert(size==b.rows());
+ if (!m_succeeded)
+ return false;
+
+ if (m_matrix.nonZeros()>0) // otherwise L==I
+ m_matrix.solveTriangularInPlace(b);
+ b = b.cwise() / m_diag;
+ // FIXME should be .adjoint() but it fails to compile...
+
+ if (m_matrix.nonZeros()>0) // otherwise L==I
+ m_matrix.transpose().solveTriangularInPlace(b);
+
+ return true;
+}
+
+#endif // EIGEN_SPARSELDLT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseLLT.h b/extern/Eigen2/Eigen/src/Sparse/SparseLLT.h
new file mode 100644
index 00000000000..e7c314c2cad
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseLLT.h
@@ -0,0 +1,205 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSELLT_H
+#define EIGEN_SPARSELLT_H
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseLLT
+ *
+ * \brief LLT Cholesky decomposition of a sparse matrix and associated features
+ *
+ * \param MatrixType the type of the matrix of which we are computing the LLT Cholesky decomposition
+ *
+ * \sa class LLT, class LDLT
+ */
+template<typename MatrixType, int Backend = DefaultBackend>
+class SparseLLT
+{
+ protected:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef SparseMatrix<Scalar,LowerTriangular> CholMatrixType;
+
+ enum {
+ SupernodalFactorIsDirty = 0x10000,
+ MatrixLIsDirty = 0x20000
+ };
+
+ public:
+
+ /** Creates a dummy LLT factorization object with flags \a flags. */
+ SparseLLT(int flags = 0)
+ : m_flags(flags), m_status(0)
+ {
+ m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
+ }
+
+ /** Creates a LLT object and compute the respective factorization of \a matrix using
+ * flags \a flags. */
+ SparseLLT(const MatrixType& matrix, int flags = 0)
+ : m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0)
+ {
+ m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
+ compute(matrix);
+ }
+
+ /** Sets the relative threshold value used to prune zero coefficients during the decomposition.
+ *
+ * Setting a value greater than zero speeds up computation, and yields to an imcomplete
+ * factorization with fewer non zero coefficients. Such approximate factors are especially
+ * useful to initialize an iterative solver.
+ *
+ * \warning if precision is greater that zero, the LLT factorization is not guaranteed to succeed
+ * even if the matrix is positive definite.
+ *
+ * Note that the exact meaning of this parameter might depends on the actual
+ * backend. Moreover, not all backends support this feature.
+ *
+ * \sa precision() */
+ void setPrecision(RealScalar v) { m_precision = v; }
+
+ /** \returns the current precision.
+ *
+ * \sa setPrecision() */
+ RealScalar precision() const { return m_precision; }
+
+ /** Sets the flags. Possible values are:
+ * - CompleteFactorization
+ * - IncompleteFactorization
+ * - MemoryEfficient (hint to use the memory most efficient method offered by the backend)
+ * - SupernodalMultifrontal (implies a complete factorization if supported by the backend,
+ * overloads the MemoryEfficient flags)
+ * - SupernodalLeftLooking (implies a complete factorization if supported by the backend,
+ * overloads the MemoryEfficient flags)
+ *
+ * \sa flags() */
+ void setFlags(int f) { m_flags = f; }
+ /** \returns the current flags */
+ int flags() const { return m_flags; }
+
+ /** Computes/re-computes the LLT factorization */
+ void compute(const MatrixType& matrix);
+
+ /** \returns the lower triangular matrix L */
+ inline const CholMatrixType& matrixL(void) const { return m_matrix; }
+
+ template<typename Derived>
+ bool solveInPlace(MatrixBase<Derived> &b) const;
+
+ /** \returns true if the factorization succeeded */
+ inline bool succeeded(void) const { return m_succeeded; }
+
+ protected:
+ CholMatrixType m_matrix;
+ RealScalar m_precision;
+ int m_flags;
+ mutable int m_status;
+ bool m_succeeded;
+};
+
+/** Computes / recomputes the LLT decomposition of matrix \a a
+ * using the default algorithm.
+ */
+template<typename MatrixType, int Backend>
+void SparseLLT<MatrixType,Backend>::compute(const MatrixType& a)
+{
+ assert(a.rows()==a.cols());
+ const int size = a.rows();
+ m_matrix.resize(size, size);
+
+ // allocate a temporary vector for accumulations
+ AmbiVector<Scalar> tempVector(size);
+ RealScalar density = a.nonZeros()/RealScalar(size*size);
+
+ // TODO estimate the number of non zeros
+ m_matrix.startFill(a.nonZeros()*2);
+ for (int j = 0; j < size; ++j)
+ {
+ Scalar x = ei_real(a.coeff(j,j));
+
+ // TODO better estimate of the density !
+ tempVector.init(density>0.001? IsDense : IsSparse);
+ tempVector.setBounds(j+1,size);
+ tempVector.setZero();
+ // init with current matrix a
+ {
+ typename MatrixType::InnerIterator it(a,j);
+ ++it; // skip diagonal element
+ for (; it; ++it)
+ tempVector.coeffRef(it.index()) = it.value();
+ }
+ for (int k=0; k<j+1; ++k)
+ {
+ typename CholMatrixType::InnerIterator it(m_matrix, k);
+ while (it && it.index()<j)
+ ++it;
+ if (it && it.index()==j)
+ {
+ Scalar y = it.value();
+ x -= ei_abs2(y);
+ ++it; // skip j-th element, and process remaining column coefficients
+ tempVector.restart();
+ for (; it; ++it)
+ {
+ tempVector.coeffRef(it.index()) -= it.value() * y;
+ }
+ }
+ }
+ // copy the temporary vector to the respective m_matrix.col()
+ // while scaling the result by 1/real(x)
+ RealScalar rx = ei_sqrt(ei_real(x));
+ m_matrix.fill(j,j) = rx;
+ Scalar y = Scalar(1)/rx;
+ for (typename AmbiVector<Scalar>::Iterator it(tempVector, m_precision*rx); it; ++it)
+ {
+ m_matrix.fill(it.index(), j) = it.value() * y;
+ }
+ }
+ m_matrix.endFill();
+}
+
+/** Computes b = L^-T L^-1 b */
+template<typename MatrixType, int Backend>
+template<typename Derived>
+bool SparseLLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
+{
+ const int size = m_matrix.rows();
+ ei_assert(size==b.rows());
+
+ m_matrix.solveTriangularInPlace(b);
+ // FIXME should be simply .adjoint() but it fails to compile...
+ if (NumTraits<Scalar>::IsComplex)
+ {
+ CholMatrixType aux = m_matrix.conjugate();
+ aux.transpose().solveTriangularInPlace(b);
+ }
+ else
+ m_matrix.transpose().solveTriangularInPlace(b);
+
+ return true;
+}
+
+#endif // EIGEN_SPARSELLT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseLU.h b/extern/Eigen2/Eigen/src/Sparse/SparseLU.h
new file mode 100644
index 00000000000..1425920509f
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseLU.h
@@ -0,0 +1,148 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSELU_H
+#define EIGEN_SPARSELU_H
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseLU
+ *
+ * \brief LU decomposition of a sparse matrix and associated features
+ *
+ * \param MatrixType the type of the matrix of which we are computing the LU factorization
+ *
+ * \sa class LU, class SparseLLT
+ */
+template<typename MatrixType, int Backend = DefaultBackend>
+class SparseLU
+{
+ protected:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef SparseMatrix<Scalar,LowerTriangular> LUMatrixType;
+
+ enum {
+ MatrixLUIsDirty = 0x10000
+ };
+
+ public:
+
+ /** Creates a dummy LU factorization object with flags \a flags. */
+ SparseLU(int flags = 0)
+ : m_flags(flags), m_status(0)
+ {
+ m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
+ }
+
+ /** Creates a LU object and compute the respective factorization of \a matrix using
+ * flags \a flags. */
+ SparseLU(const MatrixType& matrix, int flags = 0)
+ : /*m_matrix(matrix.rows(), matrix.cols()),*/ m_flags(flags), m_status(0)
+ {
+ m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
+ compute(matrix);
+ }
+
+ /** Sets the relative threshold value used to prune zero coefficients during the decomposition.
+ *
+ * Setting a value greater than zero speeds up computation, and yields to an imcomplete
+ * factorization with fewer non zero coefficients. Such approximate factors are especially
+ * useful to initialize an iterative solver.
+ *
+ * Note that the exact meaning of this parameter might depends on the actual
+ * backend. Moreover, not all backends support this feature.
+ *
+ * \sa precision() */
+ void setPrecision(RealScalar v) { m_precision = v; }
+
+ /** \returns the current precision.
+ *
+ * \sa setPrecision() */
+ RealScalar precision() const { return m_precision; }
+
+ /** Sets the flags. Possible values are:
+ * - CompleteFactorization
+ * - IncompleteFactorization
+ * - MemoryEfficient
+ * - one of the ordering methods
+ * - etc...
+ *
+ * \sa flags() */
+ void setFlags(int f) { m_flags = f; }
+ /** \returns the current flags */
+ int flags() const { return m_flags; }
+
+ void setOrderingMethod(int m)
+ {
+ ei_assert(m&~OrderingMask == 0 && m!=0 && "invalid ordering method");
+ m_flags = m_flags&~OrderingMask | m&OrderingMask;
+ }
+
+ int orderingMethod() const
+ {
+ return m_flags&OrderingMask;
+ }
+
+ /** Computes/re-computes the LU factorization */
+ void compute(const MatrixType& matrix);
+
+ /** \returns the lower triangular matrix L */
+ //inline const MatrixType& matrixL() const { return m_matrixL; }
+
+ /** \returns the upper triangular matrix U */
+ //inline const MatrixType& matrixU() const { return m_matrixU; }
+
+ template<typename BDerived, typename XDerived>
+ bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x) const;
+
+ /** \returns true if the factorization succeeded */
+ inline bool succeeded(void) const { return m_succeeded; }
+
+ protected:
+ RealScalar m_precision;
+ int m_flags;
+ mutable int m_status;
+ bool m_succeeded;
+};
+
+/** Computes / recomputes the LU decomposition of matrix \a a
+ * using the default algorithm.
+ */
+template<typename MatrixType, int Backend>
+void SparseLU<MatrixType,Backend>::compute(const MatrixType& a)
+{
+ ei_assert(false && "not implemented yet");
+}
+
+/** Computes *x = U^-1 L^-1 b */
+template<typename MatrixType, int Backend>
+template<typename BDerived, typename XDerived>
+bool SparseLU<MatrixType,Backend>::solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x) const
+{
+ ei_assert(false && "not implemented yet");
+ return false;
+}
+
+#endif // EIGEN_SPARSELU_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h b/extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h
new file mode 100644
index 00000000000..3f09596bc64
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h
@@ -0,0 +1,447 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEMATRIX_H
+#define EIGEN_SPARSEMATRIX_H
+
+/** \class SparseMatrix
+ *
+ * \brief Sparse matrix
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ */
+template<typename _Scalar, int _Flags>
+struct ei_traits<SparseMatrix<_Scalar, _Flags> >
+{
+ typedef _Scalar Scalar;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = SparseBit | _Flags,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+
+
+
+template<typename _Scalar, int _Flags>
+class SparseMatrix
+ : public SparseMatrixBase<SparseMatrix<_Scalar, _Flags> >
+{
+ public:
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseMatrix)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
+ // FIXME: why are these operator already alvailable ???
+ // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, *=)
+ // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, /=)
+
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+
+ protected:
+
+ enum { IsRowMajor = Base::IsRowMajor };
+ typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ int m_outerSize;
+ int m_innerSize;
+ int* m_outerIndex;
+ CompressedStorage<Scalar> m_data;
+
+ public:
+
+ inline int rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline int cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+
+ inline int innerSize() const { return m_innerSize; }
+ inline int outerSize() const { return m_outerSize; }
+ inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
+
+ inline const Scalar* _valuePtr() const { return &m_data.value(0); }
+ inline Scalar* _valuePtr() { return &m_data.value(0); }
+
+ inline const int* _innerIndexPtr() const { return &m_data.index(0); }
+ inline int* _innerIndexPtr() { return &m_data.index(0); }
+
+ inline const int* _outerIndexPtr() const { return m_outerIndex; }
+ inline int* _outerIndexPtr() { return m_outerIndex; }
+
+ inline Scalar coeff(int row, int col) const
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+ return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner);
+ }
+
+ inline Scalar& coeffRef(int row, int col)
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+
+ int start = m_outerIndex[outer];
+ int end = m_outerIndex[outer+1];
+ ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ const int id = m_data.searchLowerIndex(start,end-1,inner);
+ ei_assert((id<end) && (m_data.index(id)==inner) && "coeffRef cannot be called on a zero coefficient");
+ return m_data.value(id);
+ }
+
+ public:
+
+ class InnerIterator;
+
+ inline void setZero()
+ {
+ m_data.clear();
+ //if (m_outerSize)
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
+// for (int i=0; i<m_outerSize; ++i)
+// m_outerIndex[i] = 0;
+// if (m_outerSize)
+// m_outerIndex[i] = 0;
+ }
+
+ /** \returns the number of non zero coefficients */
+ inline int nonZeros() const { return m_data.size(); }
+
+ /** Initializes the filling process of \c *this.
+ * \param reserveSize approximate number of nonzeros
+ * Note that the matrix \c *this is zero-ed.
+ */
+ inline void startFill(int reserveSize = 1000)
+ {
+ setZero();
+ m_data.reserve(reserveSize);
+ }
+
+ /**
+ */
+ inline Scalar& fill(int row, int col)
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ int i = outer;
+ while (i>=0 && m_outerIndex[i]==0)
+ {
+ m_outerIndex[i] = m_data.size();
+ --i;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+ else
+ {
+ ei_assert(m_data.index(m_data.size()-1)<inner && "wrong sorted insertion");
+ }
+ assert(size_t(m_outerIndex[outer+1]) == m_data.size());
+ int id = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ m_data.append(0, inner);
+ return m_data.value(id);
+ }
+
+ /** Like fill() but with random inner coordinates.
+ */
+ inline Scalar& fillrand(int row, int col)
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ // nothing special to do here
+ int i = outer;
+ while (i>=0 && m_outerIndex[i]==0)
+ {
+ m_outerIndex[i] = m_data.size();
+ --i;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+ assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "invalid outer index");
+ size_t startId = m_outerIndex[outer];
+ // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+ size_t id = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ float reallocRatio = 1;
+ if (m_data.allocatedSize()<id+1)
+ {
+ // we need to reallocate the data, to reduce multiple reallocations
+ // we use a smart resize algorithm based on the current filling ratio
+ // we use float to avoid overflows
+ float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer);
+ reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
+ // let's bounds the realloc ratio to
+ // 1) reduce multiple minor realloc when the matrix is almost filled
+ // 2) avoid to allocate too much memory when the matrix is almost empty
+ reallocRatio = std::min(std::max(reallocRatio,1.5f),8.f);
+ }
+ m_data.resize(id+1,reallocRatio);
+
+ while ( (id > startId) && (m_data.index(id-1) > inner) )
+ {
+ m_data.index(id) = m_data.index(id-1);
+ m_data.value(id) = m_data.value(id-1);
+ --id;
+ }
+
+ m_data.index(id) = inner;
+ return (m_data.value(id) = 0);
+ }
+
+ inline void endFill()
+ {
+ int size = m_data.size();
+ int i = m_outerSize;
+ // find the last filled column
+ while (i>=0 && m_outerIndex[i]==0)
+ --i;
+ ++i;
+ while (i<=m_outerSize)
+ {
+ m_outerIndex[i] = size;
+ ++i;
+ }
+ }
+
+ void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
+ {
+ int k = 0;
+ for (int j=0; j<m_outerSize; ++j)
+ {
+ int previousStart = m_outerIndex[j];
+ m_outerIndex[j] = k;
+ int end = m_outerIndex[j+1];
+ for (int i=previousStart; i<end; ++i)
+ {
+ if (!ei_isMuchSmallerThan(m_data.value(i), reference, epsilon))
+ {
+ m_data.value(k) = m_data.value(i);
+ m_data.index(k) = m_data.index(i);
+ ++k;
+ }
+ }
+ }
+ m_outerIndex[m_outerSize] = k;
+ m_data.resize(k,0);
+ }
+
+ void resize(int rows, int cols)
+ {
+// std::cerr << this << " resize " << rows << "x" << cols << "\n";
+ const int outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ m_data.clear();
+ if (m_outerSize != outerSize)
+ {
+ delete[] m_outerIndex;
+ m_outerIndex = new int [outerSize+1];
+ m_outerSize = outerSize;
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
+ }
+ }
+ void resizeNonZeros(int size)
+ {
+ m_data.resize(size);
+ }
+
+ inline SparseMatrix()
+ : m_outerSize(-1), m_innerSize(0), m_outerIndex(0)
+ {
+ resize(0, 0);
+ }
+
+ inline SparseMatrix(int rows, int cols)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ resize(rows, cols);
+ }
+
+ template<typename OtherDerived>
+ inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ *this = other.derived();
+ }
+
+ inline SparseMatrix(const SparseMatrix& other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(SparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_outerIndex, other.m_outerIndex);
+ std::swap(m_innerSize, other.m_innerSize);
+ std::swap(m_outerSize, other.m_outerSize);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseMatrix& operator=(const SparseMatrix& other)
+ {
+// std::cout << "SparseMatrix& operator=(const SparseMatrix& other)\n";
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.rows(), other.cols());
+ memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(int));
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if (needToTranspose)
+ {
+ // two passes algorithm:
+ // 1 - compute the number of coeffs per dest inner vector
+ // 2 - do the actual copy/eval
+ // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed
+ //typedef typename ei_nested<OtherDerived,2>::type OtherCopy;
+ typedef typename ei_eval<OtherDerived>::type OtherCopy;
+ typedef typename ei_cleantype<OtherCopy>::type _OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ resize(other.rows(), other.cols());
+ Eigen::Map<VectorXi>(m_outerIndex,outerSize()).setZero();
+ // pass 1
+ // FIXME the above copy could be merged with that pass
+ for (int j=0; j<otherCopy.outerSize(); ++j)
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ ++m_outerIndex[it.index()];
+
+ // prefix sum
+ int count = 0;
+ VectorXi positions(outerSize());
+ for (int j=0; j<outerSize(); ++j)
+ {
+ int tmp = m_outerIndex[j];
+ m_outerIndex[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ m_outerIndex[outerSize()] = count;
+ // alloc
+ m_data.resize(count);
+ // pass 2
+ for (int j=0; j<otherCopy.outerSize(); ++j)
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ {
+ int pos = positions[it.index()]++;
+ m_data.index(pos) = j;
+ m_data.value(pos) = it.value();
+ }
+
+ return *this;
+ }
+ else
+ {
+ // there is no special optimization
+ return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
+ }
+ }
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
+ {
+ EIGEN_DBG_SPARSE(
+ s << "Nonzero entries:\n";
+ for (int i=0; i<m.nonZeros(); ++i)
+ {
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ }
+ s << std::endl;
+ s << std::endl;
+ s << "Column pointers:\n";
+ for (int i=0; i<m.outerSize(); ++i)
+ {
+ s << m.m_outerIndex[i] << " ";
+ }
+ s << " $" << std::endl;
+ s << std::endl;
+ );
+ s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
+ return s;
+ }
+
+ /** Destructor */
+ inline ~SparseMatrix()
+ {
+ delete[] m_outerIndex;
+ }
+};
+
+template<typename Scalar, int _Flags>
+class SparseMatrix<Scalar,_Flags>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseMatrix& mat, int outer)
+ : m_matrix(mat), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_start(m_id), m_end(mat.m_outerIndex[outer+1])
+ {}
+
+ template<unsigned int Added, unsigned int Removed>
+ InnerIterator(const Flagged<SparseMatrix,Added,Removed>& mat, int outer)
+ : m_matrix(mat._expression()), m_outer(outer), m_id(m_matrix.m_outerIndex[outer]),
+ m_start(m_id), m_end(m_matrix.m_outerIndex[outer+1])
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_matrix.m_data.value(m_id); }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.m_data.value(m_id)); }
+
+ inline int index() const { return m_matrix.m_data.index(m_id); }
+ inline int row() const { return IsRowMajor ? m_outer : index(); }
+ inline int col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
+
+ protected:
+ const SparseMatrix& m_matrix;
+ const int m_outer;
+ int m_id;
+ const int m_start;
+ const int m_end;
+};
+
+#endif // EIGEN_SPARSEMATRIX_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h b/extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h
new file mode 100644
index 00000000000..468bc9e227c
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h
@@ -0,0 +1,626 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEMATRIXBASE_H
+#define EIGEN_SPARSEMATRIXBASE_H
+
+template<typename Derived> class SparseMatrixBase
+{
+ public:
+
+ typedef typename ei_traits<Derived>::Scalar Scalar;
+// typedef typename Derived::InnerIterator InnerIterator;
+
+ enum {
+
+ RowsAtCompileTime = ei_traits<Derived>::RowsAtCompileTime,
+ /**< The number of rows at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+ ColsAtCompileTime = ei_traits<Derived>::ColsAtCompileTime,
+ /**< The number of columns at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+ SizeAtCompileTime = (ei_size_at_compile_time<ei_traits<Derived>::RowsAtCompileTime,
+ ei_traits<Derived>::ColsAtCompileTime>::ret),
+ /**< This is equal to the number of coefficients, i.e. the number of
+ * rows times the number of columns, or to \a Dynamic if this is not
+ * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+
+ MaxSizeAtCompileTime = (ei_size_at_compile_time<MaxRowsAtCompileTime,
+ MaxColsAtCompileTime>::ret),
+
+ IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
+ /**< This is set to true if either the number of rows or the number of
+ * columns is known at compile-time to be equal to 1. Indeed, in that case,
+ * we are dealing with a column-vector (if there is only one column) or with
+ * a row-vector (if there is only one row). */
+
+ Flags = ei_traits<Derived>::Flags,
+ /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+ * constructed from this one. See the \ref flags "list of flags".
+ */
+
+ CoeffReadCost = ei_traits<Derived>::CoeffReadCost,
+ /**< This is a rough measure of how expensive it is to read one coefficient from
+ * this expression.
+ */
+
+ IsRowMajor = Flags&RowMajorBit ? 1 : 0
+ };
+
+ /** \internal the return type of MatrixBase::conjugate() */
+ typedef typename ei_meta_if<NumTraits<Scalar>::IsComplex,
+ const SparseCwiseUnaryOp<ei_scalar_conjugate_op<Scalar>, Derived>,
+ const Derived&
+ >::ret ConjugateReturnType;
+ /** \internal the return type of MatrixBase::real() */
+ typedef CwiseUnaryOp<ei_scalar_real_op<Scalar>, Derived> RealReturnType;
+ /** \internal the return type of MatrixBase::imag() */
+ typedef CwiseUnaryOp<ei_scalar_imag_op<Scalar>, Derived> ImagReturnType;
+ /** \internal the return type of MatrixBase::adjoint() */
+ typedef SparseTranspose</*NestByValue<*/typename ei_cleantype<ConjugateReturnType>::type> /*>*/
+ AdjointReturnType;
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is the "real scalar" type; if the \a Scalar type is already real numbers
+ * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
+ * \a Scalar is \a std::complex<T> then RealScalar is \a T.
+ *
+ * \sa class NumTraits
+ */
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** type of the equivalent square matrix */
+ typedef Matrix<Scalar,EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime),
+ EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+
+ inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+ inline Derived& derived() { return *static_cast<Derived*>(this); }
+ inline Derived& const_cast_derived() const
+ { return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ /** \returns the number of rows. \sa cols(), RowsAtCompileTime */
+ inline int rows() const { return derived().rows(); }
+ /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
+ inline int cols() const { return derived().cols(); }
+ /** \returns the number of coefficients, which is \a rows()*cols().
+ * \sa rows(), cols(), SizeAtCompileTime. */
+ inline int size() const { return rows() * cols(); }
+ /** \returns the number of nonzero coefficients which is in practice the number
+ * of stored coefficients. */
+ inline int nonZeros() const { return derived().nonZeros(); }
+ /** \returns true if either the number of rows or the number of columns is equal to 1.
+ * In other words, this function returns
+ * \code rows()==1 || cols()==1 \endcode
+ * \sa rows(), cols(), IsVectorAtCompileTime. */
+ inline bool isVector() const { return rows()==1 || cols()==1; }
+ /** \returns the size of the storage major dimension,
+ * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
+ int outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
+ /** \returns the size of the inner dimension according to the storage order,
+ * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
+ int innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
+
+ bool isRValue() const { return m_isRValue; }
+ Derived& markAsRValue() { m_isRValue = true; return derived(); }
+
+ SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
+
+ inline Derived& operator=(const Derived& other)
+ {
+// std::cout << "Derived& operator=(const Derived& other)\n";
+// if (other.isRValue())
+// derived().swap(other.const_cast_derived());
+// else
+ this->operator=<Derived>(other);
+ return derived();
+ }
+
+
+ template<typename OtherDerived>
+ inline void assignGeneric(const OtherDerived& other)
+ {
+// std::cout << "Derived& operator=(const MatrixBase<OtherDerived>& other)\n";
+ //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ ei_assert(( ((ei_traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+ (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
+ "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
+ const int outerSize = other.outerSize();
+ //typedef typename ei_meta_if<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::ret TempType;
+ // thanks to shallow copies, we always eval to a tempary
+ Derived temp(other.rows(), other.cols());
+
+ temp.startFill(std::max(this->rows(),this->cols())*2);
+ for (int j=0; j<outerSize; ++j)
+ {
+ for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
+ {
+ Scalar v = it.value();
+ if (v!=Scalar(0))
+ {
+ if (OtherDerived::Flags & RowMajorBit) temp.fill(j,it.index()) = v;
+ else temp.fill(it.index(),j) = v;
+ }
+ }
+ }
+ temp.endFill();
+
+ derived() = temp.markAsRValue();
+ }
+
+
+ template<typename OtherDerived>
+ inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+// std::cout << typeid(OtherDerived).name() << "\n";
+// std::cout << Flags << " " << OtherDerived::Flags << "\n";
+ const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+// std::cout << "eval transpose = " << transpose << "\n";
+ const int outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
+ if ((!transpose) && other.isRValue())
+ {
+ // eval without temporary
+ derived().resize(other.rows(), other.cols());
+ derived().startFill(std::max(this->rows(),this->cols())*2);
+ for (int j=0; j<outerSize; ++j)
+ {
+ for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
+ {
+ Scalar v = it.value();
+ if (v!=Scalar(0))
+ {
+ if (IsRowMajor) derived().fill(j,it.index()) = v;
+ else derived().fill(it.index(),j) = v;
+ }
+ }
+ }
+ derived().endFill();
+ }
+ else
+ {
+ assignGeneric(other.derived());
+ }
+ return derived();
+ }
+
+ template<typename Lhs, typename Rhs>
+ inline Derived& operator=(const SparseProduct<Lhs,Rhs,SparseTimeSparseProduct>& product);
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
+ {
+ if (Flags&RowMajorBit)
+ {
+ for (int row=0; row<m.outerSize(); ++row)
+ {
+ int col = 0;
+ for (typename Derived::InnerIterator it(m.derived(), row); it; ++it)
+ {
+ for ( ; col<it.index(); ++col)
+ s << "0 ";
+ s << it.value() << " ";
+ ++col;
+ }
+ for ( ; col<m.cols(); ++col)
+ s << "0 ";
+ s << std::endl;
+ }
+ }
+ else
+ {
+ if (m.cols() == 1) {
+ int row = 0;
+ for (typename Derived::InnerIterator it(m.derived(), 0); it; ++it)
+ {
+ for ( ; row<it.index(); ++row)
+ s << "0" << std::endl;
+ s << it.value() << std::endl;
+ ++row;
+ }
+ for ( ; row<m.rows(); ++row)
+ s << "0" << std::endl;
+ }
+ else
+ {
+ SparseMatrix<Scalar, RowMajorBit> trans = m.derived();
+ s << trans;
+ }
+ }
+ return s;
+ }
+
+ const SparseCwiseUnaryOp<ei_scalar_opposite_op<typename ei_traits<Derived>::Scalar>,Derived> operator-() const;
+
+ template<typename OtherDerived>
+ const SparseCwiseBinaryOp<ei_scalar_sum_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
+ operator+(const SparseMatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ const SparseCwiseBinaryOp<ei_scalar_difference_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
+ operator-(const SparseMatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
+ template<typename OtherDerived>
+ Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
+
+// template<typename Lhs,typename Rhs>
+// Derived& operator+=(const Flagged<Product<Lhs,Rhs,CacheFriendlyProduct>, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other);
+
+ Derived& operator*=(const Scalar& other);
+ Derived& operator/=(const Scalar& other);
+
+ const SparseCwiseUnaryOp<ei_scalar_multiple_op<typename ei_traits<Derived>::Scalar>, Derived>
+ operator*(const Scalar& scalar) const;
+ const SparseCwiseUnaryOp<ei_scalar_quotient1_op<typename ei_traits<Derived>::Scalar>, Derived>
+ operator/(const Scalar& scalar) const;
+
+ inline friend const SparseCwiseUnaryOp<ei_scalar_multiple_op<typename ei_traits<Derived>::Scalar>, Derived>
+ operator*(const Scalar& scalar, const SparseMatrixBase& matrix)
+ { return matrix*scalar; }
+
+
+ template<typename OtherDerived>
+ const typename SparseProductReturnType<Derived,OtherDerived>::Type
+ operator*(const SparseMatrixBase<OtherDerived> &other) const;
+
+ // dense * sparse (return a dense object)
+ template<typename OtherDerived> friend
+ const typename SparseProductReturnType<OtherDerived,Derived>::Type
+ operator*(const MatrixBase<OtherDerived>& lhs, const Derived& rhs)
+ { return typename SparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
+
+ template<typename OtherDerived>
+ const typename SparseProductReturnType<Derived,OtherDerived>::Type
+ operator*(const MatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ typename ei_plain_matrix_type_column_major<OtherDerived>::type
+ solveTriangular(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived>
+ void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
+ RealScalar squaredNorm() const;
+ RealScalar norm() const;
+// const PlainMatrixType normalized() const;
+// void normalize();
+
+ SparseTranspose<Derived> transpose() { return derived(); }
+ const SparseTranspose<Derived> transpose() const { return derived(); }
+ // void transposeInPlace();
+ const AdjointReturnType adjoint() const { return conjugate()/*.nestByValue()*/; }
+
+ // sub-vector
+ SparseInnerVectorSet<Derived,1> row(int i);
+ const SparseInnerVectorSet<Derived,1> row(int i) const;
+ SparseInnerVectorSet<Derived,1> col(int j);
+ const SparseInnerVectorSet<Derived,1> col(int j) const;
+ SparseInnerVectorSet<Derived,1> innerVector(int outer);
+ const SparseInnerVectorSet<Derived,1> innerVector(int outer) const;
+
+ // set of sub-vectors
+ SparseInnerVectorSet<Derived,Dynamic> subrows(int start, int size);
+ const SparseInnerVectorSet<Derived,Dynamic> subrows(int start, int size) const;
+ SparseInnerVectorSet<Derived,Dynamic> subcols(int start, int size);
+ const SparseInnerVectorSet<Derived,Dynamic> subcols(int start, int size) const;
+ SparseInnerVectorSet<Derived,Dynamic> innerVectors(int outerStart, int outerSize);
+ const SparseInnerVectorSet<Derived,Dynamic> innerVectors(int outerStart, int outerSize) const;
+
+// typename BlockReturnType<Derived>::Type block(int startRow, int startCol, int blockRows, int blockCols);
+// const typename BlockReturnType<Derived>::Type
+// block(int startRow, int startCol, int blockRows, int blockCols) const;
+//
+// typename BlockReturnType<Derived>::SubVectorType segment(int start, int size);
+// const typename BlockReturnType<Derived>::SubVectorType segment(int start, int size) const;
+//
+// typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size);
+// const typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size) const;
+//
+// typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size);
+// const typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size) const;
+//
+// typename BlockReturnType<Derived>::Type corner(CornerType type, int cRows, int cCols);
+// const typename BlockReturnType<Derived>::Type corner(CornerType type, int cRows, int cCols) const;
+//
+// template<int BlockRows, int BlockCols>
+// typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol);
+// template<int BlockRows, int BlockCols>
+// const typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol) const;
+
+// template<int CRows, int CCols>
+// typename BlockReturnType<Derived, CRows, CCols>::Type corner(CornerType type);
+// template<int CRows, int CCols>
+// const typename BlockReturnType<Derived, CRows, CCols>::Type corner(CornerType type) const;
+
+// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType start(void);
+// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType start() const;
+
+// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType end();
+// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType end() const;
+
+// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType segment(int start);
+// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType segment(int start) const;
+
+// DiagonalCoeffs<Derived> diagonal();
+// const DiagonalCoeffs<Derived> diagonal() const;
+
+// template<unsigned int Mode> Part<Derived, Mode> part();
+// template<unsigned int Mode> const Part<Derived, Mode> part() const;
+
+
+// static const ConstantReturnType Constant(int rows, int cols, const Scalar& value);
+// static const ConstantReturnType Constant(int size, const Scalar& value);
+// static const ConstantReturnType Constant(const Scalar& value);
+
+// template<typename CustomNullaryOp>
+// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(int rows, int cols, const CustomNullaryOp& func);
+// template<typename CustomNullaryOp>
+// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(int size, const CustomNullaryOp& func);
+// template<typename CustomNullaryOp>
+// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(const CustomNullaryOp& func);
+
+// static const ConstantReturnType Zero(int rows, int cols);
+// static const ConstantReturnType Zero(int size);
+// static const ConstantReturnType Zero();
+// static const ConstantReturnType Ones(int rows, int cols);
+// static const ConstantReturnType Ones(int size);
+// static const ConstantReturnType Ones();
+// static const IdentityReturnType Identity();
+// static const IdentityReturnType Identity(int rows, int cols);
+// static const BasisReturnType Unit(int size, int i);
+// static const BasisReturnType Unit(int i);
+// static const BasisReturnType UnitX();
+// static const BasisReturnType UnitY();
+// static const BasisReturnType UnitZ();
+// static const BasisReturnType UnitW();
+
+// const DiagonalMatrix<Derived> asDiagonal() const;
+
+// Derived& setConstant(const Scalar& value);
+// Derived& setZero();
+// Derived& setOnes();
+// Derived& setRandom();
+// Derived& setIdentity();
+
+ Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+ {
+ Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> res(rows(),cols());
+ res.setZero();
+ for (int j=0; j<outerSize(); ++j)
+ {
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ if(IsRowMajor)
+ res.coeffRef(j,i.index()) = i.value();
+ else
+ res.coeffRef(i.index(),j) = i.value();
+ }
+ return res;
+ }
+
+ template<typename OtherDerived>
+ bool isApprox(const SparseMatrixBase<OtherDerived>& other,
+ RealScalar prec = precision<Scalar>()) const
+ { return toDense().isApprox(other.toDense(),prec); }
+
+ template<typename OtherDerived>
+ bool isApprox(const MatrixBase<OtherDerived>& other,
+ RealScalar prec = precision<Scalar>()) const
+ { return toDense().isApprox(other,prec); }
+// bool isMuchSmallerThan(const RealScalar& other,
+// RealScalar prec = precision<Scalar>()) const;
+// template<typename OtherDerived>
+// bool isMuchSmallerThan(const MatrixBase<OtherDerived>& other,
+// RealScalar prec = precision<Scalar>()) const;
+
+// bool isApproxToConstant(const Scalar& value, RealScalar prec = precision<Scalar>()) const;
+// bool isZero(RealScalar prec = precision<Scalar>()) const;
+// bool isOnes(RealScalar prec = precision<Scalar>()) const;
+// bool isIdentity(RealScalar prec = precision<Scalar>()) const;
+// bool isDiagonal(RealScalar prec = precision<Scalar>()) const;
+
+// bool isUpperTriangular(RealScalar prec = precision<Scalar>()) const;
+// bool isLowerTriangular(RealScalar prec = precision<Scalar>()) const;
+
+// template<typename OtherDerived>
+// bool isOrthogonal(const MatrixBase<OtherDerived>& other,
+// RealScalar prec = precision<Scalar>()) const;
+// bool isUnitary(RealScalar prec = precision<Scalar>()) const;
+
+// template<typename OtherDerived>
+// inline bool operator==(const MatrixBase<OtherDerived>& other) const
+// { return (cwise() == other).all(); }
+
+// template<typename OtherDerived>
+// inline bool operator!=(const MatrixBase<OtherDerived>& other) const
+// { return (cwise() != other).any(); }
+
+
+ template<typename NewType>
+ const SparseCwiseUnaryOp<ei_scalar_cast_op<typename ei_traits<Derived>::Scalar, NewType>, Derived> cast() const;
+
+ /** \returns the matrix or vector obtained by evaluating this expression.
+ *
+ * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+ * a const reference, in order to avoid a useless copy.
+ */
+ EIGEN_STRONG_INLINE const typename ei_eval<Derived>::type eval() const
+ { return typename ei_eval<Derived>::type(derived()); }
+
+// template<typename OtherDerived>
+// void swap(const MatrixBase<OtherDerived>& other);
+
+ template<unsigned int Added>
+ const SparseFlagged<Derived, Added, 0> marked() const;
+// const Flagged<Derived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit> lazy() const;
+
+ /** \returns number of elements to skip to pass from one row (resp. column) to another
+ * for a row-major (resp. column-major) matrix.
+ * Combined with coeffRef() and the \ref flags flags, it allows a direct access to the data
+ * of the underlying matrix.
+ */
+// inline int stride(void) const { return derived().stride(); }
+
+// inline const NestByValue<Derived> nestByValue() const;
+
+
+ ConjugateReturnType conjugate() const;
+ const RealReturnType real() const;
+ const ImagReturnType imag() const;
+
+ template<typename CustomUnaryOp>
+ const SparseCwiseUnaryOp<CustomUnaryOp, Derived> unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const;
+
+// template<typename CustomBinaryOp, typename OtherDerived>
+// const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
+// binaryExpr(const MatrixBase<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const;
+
+
+ Scalar sum() const;
+// Scalar trace() const;
+
+// typename ei_traits<Derived>::Scalar minCoeff() const;
+// typename ei_traits<Derived>::Scalar maxCoeff() const;
+
+// typename ei_traits<Derived>::Scalar minCoeff(int* row, int* col = 0) const;
+// typename ei_traits<Derived>::Scalar maxCoeff(int* row, int* col = 0) const;
+
+// template<typename BinaryOp>
+// typename ei_result_of<BinaryOp(typename ei_traits<Derived>::Scalar)>::type
+// redux(const BinaryOp& func) const;
+
+// template<typename Visitor>
+// void visit(Visitor& func) const;
+
+
+ const SparseCwise<Derived> cwise() const;
+ SparseCwise<Derived> cwise();
+
+// inline const WithFormat<Derived> format(const IOFormat& fmt) const;
+
+/////////// Array module ///////////
+ /*
+ bool all(void) const;
+ bool any(void) const;
+
+ const PartialRedux<Derived,Horizontal> rowwise() const;
+ const PartialRedux<Derived,Vertical> colwise() const;
+
+ static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int rows, int cols);
+ static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int size);
+ static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random();
+
+ template<typename ThenDerived,typename ElseDerived>
+ const Select<Derived,ThenDerived,ElseDerived>
+ select(const MatrixBase<ThenDerived>& thenMatrix,
+ const MatrixBase<ElseDerived>& elseMatrix) const;
+
+ template<typename ThenDerived>
+ inline const Select<Derived,ThenDerived, NestByValue<typename ThenDerived::ConstantReturnType> >
+ select(const MatrixBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const;
+
+ template<typename ElseDerived>
+ inline const Select<Derived, NestByValue<typename ElseDerived::ConstantReturnType>, ElseDerived >
+ select(typename ElseDerived::Scalar thenScalar, const MatrixBase<ElseDerived>& elseMatrix) const;
+
+ template<int p> RealScalar lpNorm() const;
+ */
+
+
+// template<typename OtherDerived>
+// Scalar dot(const MatrixBase<OtherDerived>& other) const
+// {
+// EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+// EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+// EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename OtherDerived::Scalar>::ret),
+// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+//
+// ei_assert(derived().size() == other.size());
+// // short version, but the assembly looks more complicated because
+// // of the CwiseBinaryOp iterator complexity
+// // return res = (derived().cwise() * other.derived().conjugate()).sum();
+//
+// // optimized, generic version
+// typename Derived::InnerIterator i(derived(),0);
+// typename OtherDerived::InnerIterator j(other.derived(),0);
+// Scalar res = 0;
+// while (i && j)
+// {
+// if (i.index()==j.index())
+// {
+// // std::cerr << i.value() << " * " << j.value() << "\n";
+// res += i.value() * ei_conj(j.value());
+// ++i; ++j;
+// }
+// else if (i.index()<j.index())
+// ++i;
+// else
+// ++j;
+// }
+// return res;
+// }
+//
+// Scalar sum() const
+// {
+// Scalar res = 0;
+// for (typename Derived::InnerIterator iter(*this,0); iter; ++iter)
+// {
+// res += iter.value();
+// }
+// return res;
+// }
+
+ #ifdef EIGEN_TAUCS_SUPPORT
+ taucs_ccs_matrix asTaucsMatrix();
+ #endif
+
+ #ifdef EIGEN_CHOLMOD_SUPPORT
+ cholmod_sparse asCholmodMatrix();
+ #endif
+
+ #ifdef EIGEN_SUPERLU_SUPPORT
+ SluMatrix asSluMatrix();
+ #endif
+
+ protected:
+
+ bool m_isRValue;
+};
+
+#endif // EIGEN_SPARSEMATRIXBASE_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseProduct.h b/extern/Eigen2/Eigen/src/Sparse/SparseProduct.h
new file mode 100644
index 00000000000..c98a71e993b
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseProduct.h
@@ -0,0 +1,415 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEPRODUCT_H
+#define EIGEN_SPARSEPRODUCT_H
+
+template<typename Lhs, typename Rhs> struct ei_sparse_product_mode
+{
+ enum {
+
+ value = ((Lhs::Flags&Diagonal)==Diagonal || (Rhs::Flags&Diagonal)==Diagonal)
+ ? DiagonalProduct
+ : (Rhs::Flags&Lhs::Flags&SparseBit)==SparseBit
+ ? SparseTimeSparseProduct
+ : (Lhs::Flags&SparseBit)==SparseBit
+ ? SparseTimeDenseProduct
+ : DenseTimeSparseProduct };
+};
+
+template<typename Lhs, typename Rhs, int ProductMode>
+struct SparseProductReturnType
+{
+ typedef const typename ei_nested<Lhs,Rhs::RowsAtCompileTime>::type LhsNested;
+ typedef const typename ei_nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
+
+ typedef SparseProduct<LhsNested, RhsNested, ProductMode> Type;
+};
+
+template<typename Lhs, typename Rhs>
+struct SparseProductReturnType<Lhs,Rhs,DiagonalProduct>
+{
+ typedef const typename ei_nested<Lhs,Rhs::RowsAtCompileTime>::type LhsNested;
+ typedef const typename ei_nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
+
+ typedef SparseDiagonalProduct<LhsNested, RhsNested> Type;
+};
+
+// sparse product return type specialization
+template<typename Lhs, typename Rhs>
+struct SparseProductReturnType<Lhs,Rhs,SparseTimeSparseProduct>
+{
+ typedef typename ei_traits<Lhs>::Scalar Scalar;
+ enum {
+ LhsRowMajor = ei_traits<Lhs>::Flags & RowMajorBit,
+ RhsRowMajor = ei_traits<Rhs>::Flags & RowMajorBit,
+ TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
+ TransposeLhs = LhsRowMajor && (!RhsRowMajor)
+ };
+
+ // FIXME if we transpose let's evaluate to a LinkedVectorMatrix since it is the
+ // type of the temporary to perform the transpose op
+ typedef typename ei_meta_if<TransposeLhs,
+ SparseMatrix<Scalar,0>,
+ const typename ei_nested<Lhs,Rhs::RowsAtCompileTime>::type>::ret LhsNested;
+
+ typedef typename ei_meta_if<TransposeRhs,
+ SparseMatrix<Scalar,0>,
+ const typename ei_nested<Rhs,Lhs::RowsAtCompileTime>::type>::ret RhsNested;
+
+ typedef SparseProduct<LhsNested, RhsNested, SparseTimeSparseProduct> Type;
+};
+
+template<typename LhsNested, typename RhsNested, int ProductMode>
+struct ei_traits<SparseProduct<LhsNested, RhsNested, ProductMode> >
+{
+ // clean the nested types:
+ typedef typename ei_cleantype<LhsNested>::type _LhsNested;
+ typedef typename ei_cleantype<RhsNested>::type _RhsNested;
+ typedef typename _LhsNested::Scalar Scalar;
+
+ enum {
+ LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+ RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+ LhsFlags = _LhsNested::Flags,
+ RhsFlags = _RhsNested::Flags,
+
+ RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
+ ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
+ InnerSize = EIGEN_ENUM_MIN(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+ MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+// LhsIsRowMajor = (LhsFlags & RowMajorBit)==RowMajorBit,
+// RhsIsRowMajor = (RhsFlags & RowMajorBit)==RowMajorBit,
+
+ EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
+ ResultIsSparse = ProductMode==SparseTimeSparseProduct || ProductMode==DiagonalProduct,
+
+ RemovedBits = ~( (EvalToRowMajor ? 0 : RowMajorBit) | (ResultIsSparse ? 0 : SparseBit) ),
+
+ Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+ | EvalBeforeAssigningBit
+ | EvalBeforeNestingBit,
+
+ CoeffReadCost = Dynamic
+ };
+
+ typedef typename ei_meta_if<ResultIsSparse,
+ SparseMatrixBase<SparseProduct<LhsNested, RhsNested, ProductMode> >,
+ MatrixBase<SparseProduct<LhsNested, RhsNested, ProductMode> > >::ret Base;
+};
+
+template<typename LhsNested, typename RhsNested, int ProductMode>
+class SparseProduct : ei_no_assignment_operator,
+ public ei_traits<SparseProduct<LhsNested, RhsNested, ProductMode> >::Base
+{
+ public:
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(SparseProduct)
+
+ private:
+
+ typedef typename ei_traits<SparseProduct>::_LhsNested _LhsNested;
+ typedef typename ei_traits<SparseProduct>::_RhsNested _RhsNested;
+
+ public:
+
+ template<typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE SparseProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ ei_assert(lhs.cols() == rhs.rows());
+
+ enum {
+ ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
+ || _RhsNested::RowsAtCompileTime==Dynamic
+ || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
+ AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwise()*v2
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+ }
+
+ EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
+template<typename Lhs, typename Rhs, typename ResultType>
+static void ei_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+ typedef typename ei_traits<typename ei_cleantype<Lhs>::type>::Scalar Scalar;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ int rows = lhs.innerSize();
+ int cols = rhs.outerSize();
+ //int size = lhs.outerSize();
+ ei_assert(lhs.outerSize() == rhs.innerSize());
+
+ // allocate a temporary buffer
+ AmbiVector<Scalar> tempVector(rows);
+
+ // estimate the number of non zero entries
+ float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
+ float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
+ float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
+
+ res.resize(rows, cols);
+ res.startFill(int(ratioRes*rows*cols));
+ for (int j=0; j<cols; ++j)
+ {
+ // let's do a more accurate determination of the nnz ratio for the current column j of res
+ //float ratioColRes = std::min(ratioLhs * rhs.innerNonZeros(j), 1.f);
+ // FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
+ float ratioColRes = ratioRes;
+ tempVector.init(ratioColRes);
+ tempVector.setZero();
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
+ tempVector.restart();
+ Scalar x = rhsIt.value();
+ for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
+ {
+ tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
+ }
+ }
+ for (typename AmbiVector<Scalar>::Iterator it(tempVector); it; ++it)
+ if (ResultType::Flags&RowMajorBit)
+ res.fill(j,it.index()) = it.value();
+ else
+ res.fill(it.index(), j) = it.value();
+ }
+ res.endFill();
+}
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = ei_traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = ei_traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = ei_traits<ResultType>::Flags&RowMajorBit>
+struct ei_sparse_product_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct ei_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename ei_traits<typename ei_cleantype<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typename ei_cleantype<ResultType>::type _res(res.rows(), res.cols());
+ ei_sparse_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct ei_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ // we need a col-major matrix to hold the result
+ typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+ SparseTemporaryType _res(res.rows(), res.cols());
+ ei_sparse_product_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res);
+ res = _res;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ // let's transpose the product to get a column x column product
+ typename ei_cleantype<ResultType>::type _res(res.rows(), res.cols());
+ ei_sparse_product_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ // let's transpose the product to get a column x column product
+ typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+ SparseTemporaryType _res(res.cols(), res.rows());
+ ei_sparse_product_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
+ res = _res.transpose();
+ }
+};
+
+// NOTE eventually let's transpose one argument even in this case since it might be expensive if
+// the result is not dense.
+// template<typename Lhs, typename Rhs, typename ResultType, int ResStorageOrder>
+// struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ResStorageOrder>
+// {
+// static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+// {
+// // trivial product as lhs.row/rhs.col dot products
+// // loop over the preferred order of the result
+// }
+// };
+
+// NOTE the 2 others cases (col row *) must never occurs since they are caught
+// by ProductReturnType which transform it to (col col *) by evaluating rhs.
+
+
+// template<typename Derived>
+// template<typename Lhs, typename Rhs>
+// inline Derived& SparseMatrixBase<Derived>::lazyAssign(const SparseProduct<Lhs,Rhs>& product)
+// {
+// // std::cout << "sparse product to dense\n";
+// ei_sparse_product_selector<
+// typename ei_cleantype<Lhs>::type,
+// typename ei_cleantype<Rhs>::type,
+// typename ei_cleantype<Derived>::type>::run(product.lhs(),product.rhs(),derived());
+// return derived();
+// }
+
+// sparse = sparse * sparse
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseProduct<Lhs,Rhs,SparseTimeSparseProduct>& product)
+{
+ ei_sparse_product_selector<
+ typename ei_cleantype<Lhs>::type,
+ typename ei_cleantype<Rhs>::type,
+ Derived>::run(product.lhs(),product.rhs(),derived());
+ return derived();
+}
+
+// dense = sparse * dense
+// template<typename Derived>
+// template<typename Lhs, typename Rhs>
+// Derived& MatrixBase<Derived>::lazyAssign(const SparseProduct<Lhs,Rhs,SparseTimeDenseProduct>& product)
+// {
+// typedef typename ei_cleantype<Lhs>::type _Lhs;
+// typedef typename _Lhs::InnerIterator LhsInnerIterator;
+// enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit };
+// derived().setZero();
+// for (int j=0; j<product.lhs().outerSize(); ++j)
+// for (LhsInnerIterator i(product.lhs(),j); i; ++i)
+// derived().row(LhsIsRowMajor ? j : i.index()) += i.value() * product.rhs().row(LhsIsRowMajor ? i.index() : j);
+// return derived();
+// }
+
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::lazyAssign(const SparseProduct<Lhs,Rhs,SparseTimeDenseProduct>& product)
+{
+ typedef typename ei_cleantype<Lhs>::type _Lhs;
+ typedef typename ei_cleantype<Rhs>::type _Rhs;
+ typedef typename _Lhs::InnerIterator LhsInnerIterator;
+ enum {
+ LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
+ LhsIsSelfAdjoint = (_Lhs::Flags&SelfAdjointBit)==SelfAdjointBit,
+ ProcessFirstHalf = LhsIsSelfAdjoint
+ && ( ((_Lhs::Flags&(UpperTriangularBit|LowerTriangularBit))==0)
+ || ( (_Lhs::Flags&UpperTriangularBit) && !LhsIsRowMajor)
+ || ( (_Lhs::Flags&LowerTriangularBit) && LhsIsRowMajor) ),
+ ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf)
+ };
+ derived().setZero();
+ for (int j=0; j<product.lhs().outerSize(); ++j)
+ {
+ LhsInnerIterator i(product.lhs(),j);
+ if (ProcessSecondHalf && i && (i.index()==j))
+ {
+ derived().row(j) += i.value() * product.rhs().row(j);
+ ++i;
+ }
+ Block<Derived,1,Derived::ColsAtCompileTime> res(derived().row(LhsIsRowMajor ? j : 0));
+ for (; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+ {
+ if (LhsIsSelfAdjoint)
+ {
+ int a = LhsIsRowMajor ? j : i.index();
+ int b = LhsIsRowMajor ? i.index() : j;
+ Scalar v = i.value();
+ derived().row(a) += (v) * product.rhs().row(b);
+ derived().row(b) += ei_conj(v) * product.rhs().row(a);
+ }
+ else if (LhsIsRowMajor)
+ res += i.value() * product.rhs().row(i.index());
+ else
+ derived().row(i.index()) += i.value() * product.rhs().row(j);
+ }
+ if (ProcessFirstHalf && i && (i.index()==j))
+ derived().row(j) += i.value() * product.rhs().row(j);
+ }
+ return derived();
+}
+
+// dense = dense * sparse
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::lazyAssign(const SparseProduct<Lhs,Rhs,DenseTimeSparseProduct>& product)
+{
+ typedef typename ei_cleantype<Rhs>::type _Rhs;
+ typedef typename _Rhs::InnerIterator RhsInnerIterator;
+ enum { RhsIsRowMajor = (_Rhs::Flags&RowMajorBit)==RowMajorBit };
+ derived().setZero();
+ for (int j=0; j<product.rhs().outerSize(); ++j)
+ for (RhsInnerIterator i(product.rhs(),j); i; ++i)
+ derived().col(RhsIsRowMajor ? i.index() : j) += i.value() * product.lhs().col(RhsIsRowMajor ? j : i.index());
+ return derived();
+}
+
+// sparse * sparse
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const typename SparseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return typename SparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+// sparse * dense
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const typename SparseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ return typename SparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseRedux.h b/extern/Eigen2/Eigen/src/Sparse/SparseRedux.h
new file mode 100644
index 00000000000..f0d3705488e
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseRedux.h
@@ -0,0 +1,40 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEREDUX_H
+#define EIGEN_SPARSEREDUX_H
+
+template<typename Derived>
+typename ei_traits<Derived>::Scalar
+SparseMatrixBase<Derived>::sum() const
+{
+ ei_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ Scalar res = 0;
+ for (int j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
+ res += iter.value();
+ return res;
+}
+
+#endif // EIGEN_SPARSEREDUX_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseTranspose.h b/extern/Eigen2/Eigen/src/Sparse/SparseTranspose.h
new file mode 100644
index 00000000000..89a14d70707
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseTranspose.h
@@ -0,0 +1,85 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSETRANSPOSE_H
+#define EIGEN_SPARSETRANSPOSE_H
+
+template<typename MatrixType>
+struct ei_traits<SparseTranspose<MatrixType> > : ei_traits<Transpose<MatrixType> >
+{};
+
+template<typename MatrixType> class SparseTranspose
+ : public SparseMatrixBase<SparseTranspose<MatrixType> >
+{
+ public:
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(SparseTranspose)
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ inline SparseTranspose(const MatrixType& matrix) : m_matrix(matrix) {}
+
+ //EIGEN_INHERIT_ASSIGNMENT_OPERATORS(SparseTranspose)
+
+ inline int rows() const { return m_matrix.cols(); }
+ inline int cols() const { return m_matrix.rows(); }
+ inline int nonZeros() const { return m_matrix.nonZeros(); }
+
+ // FIXME should be keep them ?
+ inline Scalar& coeffRef(int row, int col)
+ { return m_matrix.const_cast_derived().coeffRef(col, row); }
+
+ inline const Scalar coeff(int row, int col) const
+ { return m_matrix.coeff(col, row); }
+
+ inline const Scalar coeff(int index) const
+ { return m_matrix.coeff(index); }
+
+ inline Scalar& coeffRef(int index)
+ { return m_matrix.const_cast_derived().coeffRef(index); }
+
+ protected:
+ const typename MatrixType::Nested m_matrix;
+};
+
+template<typename MatrixType> class SparseTranspose<MatrixType>::InnerIterator : public MatrixType::InnerIterator
+{
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const SparseTranspose& trans, int outer)
+ : MatrixType::InnerIterator(trans.m_matrix, outer)
+ {}
+};
+
+template<typename MatrixType> class SparseTranspose<MatrixType>::ReverseInnerIterator : public MatrixType::ReverseInnerIterator
+{
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTranspose& xpr, int outer)
+ : MatrixType::ReverseInnerIterator(xpr.m_matrix, outer)
+ {}
+};
+
+#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseUtil.h b/extern/Eigen2/Eigen/src/Sparse/SparseUtil.h
new file mode 100644
index 00000000000..393cdda6ea2
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseUtil.h
@@ -0,0 +1,148 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEUTIL_H
+#define EIGEN_SPARSEUTIL_H
+
+#ifdef NDEBUG
+#define EIGEN_DBG_SPARSE(X)
+#else
+#define EIGEN_DBG_SPARSE(X) X
+#endif
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
+{ \
+ return Base::operator Op(other.derived()); \
+} \
+EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
+{ \
+ return Base::operator Op(other); \
+}
+
+#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename Other> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
+{ \
+ return Base::operator Op(scalar); \
+}
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
+
+#define _EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \
+typedef BaseClass Base; \
+typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
+typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+typedef typename Eigen::ei_nested<Derived>::type Nested; \
+enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
+ Flags = Eigen::ei_traits<Derived>::Flags, \
+ CoeffReadCost = Eigen::ei_traits<Derived>::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
+
+#define EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived) \
+_EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived>)
+
+enum SparseBackend {
+ DefaultBackend,
+ Taucs,
+ Cholmod,
+ SuperLU,
+ UmfPack
+};
+
+// solver flags
+enum {
+ CompleteFactorization = 0x0000, // the default
+ IncompleteFactorization = 0x0001,
+ MemoryEfficient = 0x0002,
+
+ // For LLT Cholesky:
+ SupernodalMultifrontal = 0x0010,
+ SupernodalLeftLooking = 0x0020,
+
+ // Ordering methods:
+ NaturalOrdering = 0x0100, // the default
+ MinimumDegree_AT_PLUS_A = 0x0200,
+ MinimumDegree_ATA = 0x0300,
+ ColApproxMinimumDegree = 0x0400,
+ Metis = 0x0500,
+ Scotch = 0x0600,
+ Chaco = 0x0700,
+ OrderingMask = 0x0f00
+};
+
+template<typename Derived> class SparseMatrixBase;
+template<typename _Scalar, int _Flags = 0> class SparseMatrix;
+template<typename _Scalar, int _Flags = 0> class DynamicSparseMatrix;
+template<typename _Scalar, int _Flags = 0> class SparseVector;
+template<typename _Scalar, int _Flags = 0> class MappedSparseMatrix;
+
+template<typename MatrixType> class SparseTranspose;
+template<typename MatrixType, int Size> class SparseInnerVectorSet;
+template<typename Derived> class SparseCwise;
+template<typename UnaryOp, typename MatrixType> class SparseCwiseUnaryOp;
+template<typename BinaryOp, typename Lhs, typename Rhs> class SparseCwiseBinaryOp;
+template<typename ExpressionType,
+ unsigned int Added, unsigned int Removed> class SparseFlagged;
+template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
+
+template<typename Lhs, typename Rhs> struct ei_sparse_product_mode;
+template<typename Lhs, typename Rhs, int ProductMode = ei_sparse_product_mode<Lhs,Rhs>::value> struct SparseProductReturnType;
+
+const int CoherentAccessPattern = 0x1;
+const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
+const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
+const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
+
+// const int AccessPatternNotSupported = 0x0;
+// const int AccessPatternSupported = 0x1;
+//
+// template<typename MatrixType, int AccessPattern> struct ei_support_access_pattern
+// {
+// enum { ret = (int(ei_traits<MatrixType>::SupportedAccessPatterns) & AccessPattern) == AccessPattern
+// ? AccessPatternSupported
+// : AccessPatternNotSupported
+// };
+// };
+
+template<typename T> class ei_eval<T,IsSparse>
+{
+ typedef typename ei_traits<T>::Scalar _Scalar;
+ enum {
+ _Flags = ei_traits<T>::Flags
+ };
+
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+#endif // EIGEN_SPARSEUTIL_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseVector.h b/extern/Eigen2/Eigen/src/Sparse/SparseVector.h
new file mode 100644
index 00000000000..8e5a6efeda8
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SparseVector.h
@@ -0,0 +1,365 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEVECTOR_H
+#define EIGEN_SPARSEVECTOR_H
+
+/** \class SparseVector
+ *
+ * \brief a sparse vector class
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ */
+template<typename _Scalar, int _Flags>
+struct ei_traits<SparseVector<_Scalar, _Flags> >
+{
+ typedef _Scalar Scalar;
+ enum {
+ IsColVector = _Flags & RowMajorBit ? 0 : 1,
+
+ RowsAtCompileTime = IsColVector ? Dynamic : 1,
+ ColsAtCompileTime = IsColVector ? 1 : Dynamic,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ Flags = SparseBit | _Flags,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+
+template<typename _Scalar, int _Flags>
+class SparseVector
+ : public SparseMatrixBase<SparseVector<_Scalar, _Flags> >
+{
+ public:
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseVector)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
+// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, =)
+
+ protected:
+ public:
+
+ typedef SparseMatrixBase<SparseVector> SparseBase;
+ enum { IsColVector = ei_traits<SparseVector>::IsColVector };
+
+ CompressedStorage<Scalar> m_data;
+ int m_size;
+
+ CompressedStorage<Scalar>& _data() { return m_data; }
+ CompressedStorage<Scalar>& _data() const { return m_data; }
+
+ public:
+
+ EIGEN_STRONG_INLINE int rows() const { return IsColVector ? m_size : 1; }
+ EIGEN_STRONG_INLINE int cols() const { return IsColVector ? 1 : m_size; }
+ EIGEN_STRONG_INLINE int innerSize() const { return m_size; }
+ EIGEN_STRONG_INLINE int outerSize() const { return 1; }
+ EIGEN_STRONG_INLINE int innerNonZeros(int j) const { ei_assert(j==0); return m_size; }
+
+ EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); }
+ EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); }
+
+ EIGEN_STRONG_INLINE const int* _innerIndexPtr() const { return &m_data.index(0); }
+ EIGEN_STRONG_INLINE int* _innerIndexPtr() { return &m_data.index(0); }
+
+ inline Scalar coeff(int row, int col) const
+ {
+ ei_assert((IsColVector ? col : row)==0);
+ return coeff(IsColVector ? row : col);
+ }
+ inline Scalar coeff(int i) const { return m_data.at(i); }
+
+ inline Scalar& coeffRef(int row, int col)
+ {
+ ei_assert((IsColVector ? col : row)==0);
+ return coeff(IsColVector ? row : col);
+ }
+
+ /** \returns a reference to the coefficient value at given index \a i
+ * This operation involes a log(rho*size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ *
+ * This insertion might be very costly if the number of nonzeros above \a i is large.
+ */
+ inline Scalar& coeffRef(int i)
+ {
+ return m_data.atWithInsertion(i);
+ }
+
+ public:
+
+ class InnerIterator;
+
+ inline void setZero() { m_data.clear(); }
+
+ /** \returns the number of non zero coefficients */
+ inline int nonZeros() const { return m_data.size(); }
+
+ /**
+ */
+ inline void reserve(int reserveSize) { m_data.reserve(reserveSize); }
+
+ inline void startFill(int reserve)
+ {
+ setZero();
+ m_data.reserve(reserve);
+ }
+
+ /**
+ */
+ inline Scalar& fill(int r, int c)
+ {
+ ei_assert(r==0 || c==0);
+ return fill(IsColVector ? r : c);
+ }
+
+ inline Scalar& fill(int i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
+
+ inline Scalar& fillrand(int r, int c)
+ {
+ ei_assert(r==0 || c==0);
+ return fillrand(IsColVector ? r : c);
+ }
+
+ /** Like fill() but with random coordinates.
+ */
+ inline Scalar& fillrand(int i)
+ {
+ int startId = 0;
+ int id = m_data.size() - 1;
+ m_data.resize(id+2,1);
+
+ while ( (id >= startId) && (m_data.index(id) > i) )
+ {
+ m_data.index(id+1) = m_data.index(id);
+ m_data.value(id+1) = m_data.value(id);
+ --id;
+ }
+ m_data.index(id+1) = i;
+ m_data.value(id+1) = 0;
+ return m_data.value(id+1);
+ }
+
+ inline void endFill() {}
+
+ void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
+ {
+ m_data.prune(reference,epsilon);
+ }
+
+ void resize(int rows, int cols)
+ {
+ ei_assert(rows==1 || cols==1);
+ resize(IsColVector ? rows : cols);
+ }
+
+ void resize(int newSize)
+ {
+ m_size = newSize;
+ m_data.clear();
+ }
+
+ void resizeNonZeros(int size) { m_data.resize(size); }
+
+ inline SparseVector() : m_size(0) { resize(0); }
+
+ inline SparseVector(int size) : m_size(0) { resize(size); }
+
+ inline SparseVector(int rows, int cols) : m_size(0) { resize(rows,cols); }
+
+ template<typename OtherDerived>
+ inline SparseVector(const MatrixBase<OtherDerived>& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ template<typename OtherDerived>
+ inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ inline SparseVector(const SparseVector& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(SparseVector& other)
+ {
+ std::swap(m_size, other.m_size);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseVector& operator=(const SparseVector& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.size());
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ return Base::operator=(other);
+ }
+
+// const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+// if (needToTranspose)
+// {
+// // two passes algorithm:
+// // 1 - compute the number of coeffs per dest inner vector
+// // 2 - do the actual copy/eval
+// // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed
+// typedef typename ei_nested<OtherDerived,2>::type OtherCopy;
+// OtherCopy otherCopy(other.derived());
+// typedef typename ei_cleantype<OtherCopy>::type _OtherCopy;
+//
+// resize(other.rows(), other.cols());
+// Eigen::Map<VectorXi>(m_outerIndex,outerSize()).setZero();
+// // pass 1
+// // FIXME the above copy could be merged with that pass
+// for (int j=0; j<otherCopy.outerSize(); ++j)
+// for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+// ++m_outerIndex[it.index()];
+//
+// // prefix sum
+// int count = 0;
+// VectorXi positions(outerSize());
+// for (int j=0; j<outerSize(); ++j)
+// {
+// int tmp = m_outerIndex[j];
+// m_outerIndex[j] = count;
+// positions[j] = count;
+// count += tmp;
+// }
+// m_outerIndex[outerSize()] = count;
+// // alloc
+// m_data.resize(count);
+// // pass 2
+// for (int j=0; j<otherCopy.outerSize(); ++j)
+// for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+// {
+// int pos = positions[it.index()]++;
+// m_data.index(pos) = j;
+// m_data.value(pos) = it.value();
+// }
+//
+// return *this;
+// }
+// else
+// {
+// // there is no special optimization
+// return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
+// }
+// }
+
+ friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
+ {
+ for (unsigned int i=0; i<m.nonZeros(); ++i)
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ s << std::endl;
+ return s;
+ }
+
+ // this specialized version does not seems to be faster
+// Scalar dot(const SparseVector& other) const
+// {
+// int i=0, j=0;
+// Scalar res = 0;
+// asm("#begindot");
+// while (i<nonZeros() && j<other.nonZeros())
+// {
+// if (m_data.index(i)==other.m_data.index(j))
+// {
+// res += m_data.value(i) * ei_conj(other.m_data.value(j));
+// ++i; ++j;
+// }
+// else if (m_data.index(i)<other.m_data.index(j))
+// ++i;
+// else
+// ++j;
+// }
+// asm("#enddot");
+// return res;
+// }
+
+ /** Destructor */
+ inline ~SparseVector() {}
+};
+
+template<typename Scalar, int _Flags>
+class SparseVector<Scalar,_Flags>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseVector& vec, int outer=0)
+ : m_data(vec.m_data), m_id(0), m_end(m_data.size())
+ {
+ ei_assert(outer==0);
+ }
+
+ InnerIterator(const CompressedStorage<Scalar>& data)
+ : m_data(data), m_id(0), m_end(m_data.size())
+ {}
+
+ template<unsigned int Added, unsigned int Removed>
+ InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, int outer)
+ : m_data(vec._expression().m_data), m_id(0), m_end(m_data.size())
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_data.value(m_id); }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
+
+ inline int index() const { return m_data.index(m_id); }
+ inline int row() const { return IsColVector ? index() : 0; }
+ inline int col() const { return IsColVector ? 0 : index(); }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const CompressedStorage<Scalar>& m_data;
+ int m_id;
+ const int m_end;
+};
+
+#endif // EIGEN_SPARSEVECTOR_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h b/extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h
new file mode 100644
index 00000000000..3c9a4fcced6
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h
@@ -0,0 +1,565 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SUPERLUSUPPORT_H
+#define EIGEN_SUPERLUSUPPORT_H
+
+// declaration of gssvx taken from GMM++
+#define DECL_GSSVX(NAMESPACE,FNAME,FLOATTYPE,KEYTYPE) \
+ inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \
+ int *perm_c, int *perm_r, int *etree, char *equed, \
+ FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
+ SuperMatrix *U, void *work, int lwork, \
+ SuperMatrix *B, SuperMatrix *X, \
+ FLOATTYPE *recip_pivot_growth, \
+ FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \
+ SuperLUStat_t *stats, int *info, KEYTYPE) { \
+ using namespace NAMESPACE; \
+ mem_usage_t mem_usage; \
+ NAMESPACE::FNAME(options, A, perm_c, perm_r, etree, equed, R, C, L, \
+ U, work, lwork, B, X, recip_pivot_growth, rcond, \
+ ferr, berr, &mem_usage, stats, info); \
+ return mem_usage.for_lu; /* bytes used by the factor storage */ \
+ }
+
+DECL_GSSVX(SuperLU_S,sgssvx,float,float)
+DECL_GSSVX(SuperLU_C,cgssvx,float,std::complex<float>)
+DECL_GSSVX(SuperLU_D,dgssvx,double,double)
+DECL_GSSVX(SuperLU_Z,zgssvx,double,std::complex<double>)
+
+template<typename MatrixType>
+struct SluMatrixMapHelper;
+
+/** \internal
+ *
+ * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices
+ * and dense matrices. Supernodal and other fancy format are not supported by this wrapper.
+ *
+ * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure.
+ */
+struct SluMatrix : SuperMatrix
+{
+ SluMatrix()
+ {
+ Store = &storage;
+ }
+
+ SluMatrix(const SluMatrix& other)
+ : SuperMatrix(other)
+ {
+ Store = &storage;
+ storage = other.storage;
+ }
+
+ SluMatrix& operator=(const SluMatrix& other)
+ {
+ SuperMatrix::operator=(static_cast<const SuperMatrix&>(other));
+ Store = &storage;
+ storage = other.storage;
+ return *this;
+ }
+
+ struct
+ {
+ union {int nnz;int lda;};
+ void *values;
+ int *innerInd;
+ int *outerInd;
+ } storage;
+
+ void setStorageType(Stype_t t)
+ {
+ Stype = t;
+ if (t==SLU_NC || t==SLU_NR || t==SLU_DN)
+ Store = &storage;
+ else
+ {
+ ei_assert(false && "storage type not supported");
+ Store = 0;
+ }
+ }
+
+ template<typename Scalar>
+ void setScalarType()
+ {
+ if (ei_is_same_type<Scalar,float>::ret)
+ Dtype = SLU_S;
+ else if (ei_is_same_type<Scalar,double>::ret)
+ Dtype = SLU_D;
+ else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
+ Dtype = SLU_C;
+ else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
+ Dtype = SLU_Z;
+ else
+ {
+ ei_assert(false && "Scalar type not supported by SuperLU");
+ }
+ }
+
+ template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
+ static SluMatrix Map(Matrix<Scalar,Rows,Cols,Options,MRows,MCols>& mat)
+ {
+ typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
+ ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+ SluMatrix res;
+ res.setStorageType(SLU_DN);
+ res.setScalarType<Scalar>();
+ res.Mtype = SLU_GE;
+
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+
+ res.storage.lda = mat.stride();
+ res.storage.values = mat.data();
+ return res;
+ }
+
+ template<typename MatrixType>
+ static SluMatrix Map(SparseMatrixBase<MatrixType>& mat)
+ {
+ SluMatrix res;
+ if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+ {
+ res.setStorageType(SLU_NR);
+ res.nrow = mat.cols();
+ res.ncol = mat.rows();
+ }
+ else
+ {
+ res.setStorageType(SLU_NC);
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ }
+
+ res.Mtype = SLU_GE;
+
+ res.storage.nnz = mat.nonZeros();
+ res.storage.values = mat.derived()._valuePtr();
+ res.storage.innerInd = mat.derived()._innerIndexPtr();
+ res.storage.outerInd = mat.derived()._outerIndexPtr();
+
+ res.setScalarType<typename MatrixType::Scalar>();
+
+ // FIXME the following is not very accurate
+ if (MatrixType::Flags & UpperTriangular)
+ res.Mtype = SLU_TRU;
+ if (MatrixType::Flags & LowerTriangular)
+ res.Mtype = SLU_TRL;
+ if (MatrixType::Flags & SelfAdjoint)
+ ei_assert(false && "SelfAdjoint matrix shape not supported by SuperLU");
+ return res;
+ }
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
+struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
+{
+ typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
+ static void run(MatrixType& mat, SluMatrix& res)
+ {
+ ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+ res.setStorageType(SLU_DN);
+ res.setScalarType<Scalar>();
+ res.Mtype = SLU_GE;
+
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+
+ res.storage.lda = mat.stride();
+ res.storage.values = mat.data();
+ }
+};
+
+template<typename Derived>
+struct SluMatrixMapHelper<SparseMatrixBase<Derived> >
+{
+ typedef Derived MatrixType;
+ static void run(MatrixType& mat, SluMatrix& res)
+ {
+ if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+ {
+ res.setStorageType(SLU_NR);
+ res.nrow = mat.cols();
+ res.ncol = mat.rows();
+ }
+ else
+ {
+ res.setStorageType(SLU_NC);
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ }
+
+ res.Mtype = SLU_GE;
+
+ res.storage.nnz = mat.nonZeros();
+ res.storage.values = mat._valuePtr();
+ res.storage.innerInd = mat._innerIndexPtr();
+ res.storage.outerInd = mat._outerIndexPtr();
+
+ res.setScalarType<typename MatrixType::Scalar>();
+
+ // FIXME the following is not very accurate
+ if (MatrixType::Flags & UpperTriangular)
+ res.Mtype = SLU_TRU;
+ if (MatrixType::Flags & LowerTriangular)
+ res.Mtype = SLU_TRL;
+ if (MatrixType::Flags & SelfAdjoint)
+ ei_assert(false && "SelfAdjoint matrix shape not supported by SuperLU");
+ }
+};
+
+template<typename Derived>
+SluMatrix SparseMatrixBase<Derived>::asSluMatrix()
+{
+ return SluMatrix::Map(derived());
+}
+
+/** View a Super LU matrix as an Eigen expression */
+template<typename Scalar, int Flags>
+MappedSparseMatrix<Scalar,Flags>::MappedSparseMatrix(SluMatrix& sluMat)
+{
+ if ((Flags&RowMajorBit)==RowMajorBit)
+ {
+ assert(sluMat.Stype == SLU_NR);
+ m_innerSize = sluMat.ncol;
+ m_outerSize = sluMat.nrow;
+ }
+ else
+ {
+ assert(sluMat.Stype == SLU_NC);
+ m_innerSize = sluMat.nrow;
+ m_outerSize = sluMat.ncol;
+ }
+ m_outerIndex = sluMat.storage.outerInd;
+ m_innerIndices = sluMat.storage.innerInd;
+ m_values = reinterpret_cast<Scalar*>(sluMat.storage.values);
+ m_nnz = sluMat.storage.outerInd[m_outerSize];
+}
+
+template<typename MatrixType>
+class SparseLU<MatrixType,SuperLU> : public SparseLU<MatrixType>
+{
+ protected:
+ typedef SparseLU<MatrixType> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef SparseMatrix<Scalar,LowerTriangular|UnitDiagBit> LMatrixType;
+ typedef SparseMatrix<Scalar,UpperTriangular> UMatrixType;
+ using Base::m_flags;
+ using Base::m_status;
+
+ public:
+
+ SparseLU(int flags = NaturalOrdering)
+ : Base(flags)
+ {
+ }
+
+ SparseLU(const MatrixType& matrix, int flags = NaturalOrdering)
+ : Base(flags)
+ {
+ compute(matrix);
+ }
+
+ ~SparseLU()
+ {
+ }
+
+ inline const LMatrixType& matrixL() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_l;
+ }
+
+ inline const UMatrixType& matrixU() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_u;
+ }
+
+ inline const IntColVectorType& permutationP() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_p;
+ }
+
+ inline const IntRowVectorType& permutationQ() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_q;
+ }
+
+ Scalar determinant() const;
+
+ template<typename BDerived, typename XDerived>
+ bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x) const;
+
+ void compute(const MatrixType& matrix);
+
+ protected:
+
+ void extractData() const;
+
+ protected:
+ // cached data to reduce reallocation, etc.
+ mutable LMatrixType m_l;
+ mutable UMatrixType m_u;
+ mutable IntColVectorType m_p;
+ mutable IntRowVectorType m_q;
+
+ mutable SparseMatrix<Scalar> m_matrix;
+ mutable SluMatrix m_sluA;
+ mutable SuperMatrix m_sluL, m_sluU;
+ mutable SluMatrix m_sluB, m_sluX;
+ mutable SuperLUStat_t m_sluStat;
+ mutable superlu_options_t m_sluOptions;
+ mutable std::vector<int> m_sluEtree;
+ mutable std::vector<RealScalar> m_sluRscale, m_sluCscale;
+ mutable std::vector<RealScalar> m_sluFerr, m_sluBerr;
+ mutable char m_sluEqued;
+ mutable bool m_extractedDataAreDirty;
+};
+
+template<typename MatrixType>
+void SparseLU<MatrixType,SuperLU>::compute(const MatrixType& a)
+{
+ const int size = a.rows();
+ m_matrix = a;
+
+ set_default_options(&m_sluOptions);
+ m_sluOptions.ColPerm = NATURAL;
+ m_sluOptions.PrintStat = NO;
+ m_sluOptions.ConditionNumber = NO;
+ m_sluOptions.Trans = NOTRANS;
+ // m_sluOptions.Equil = NO;
+
+ switch (Base::orderingMethod())
+ {
+ case NaturalOrdering : m_sluOptions.ColPerm = NATURAL; break;
+ case MinimumDegree_AT_PLUS_A : m_sluOptions.ColPerm = MMD_AT_PLUS_A; break;
+ case MinimumDegree_ATA : m_sluOptions.ColPerm = MMD_ATA; break;
+ case ColApproxMinimumDegree : m_sluOptions.ColPerm = COLAMD; break;
+ default:
+ std::cerr << "Eigen: ordering method \"" << Base::orderingMethod() << "\" not supported by the SuperLU backend\n";
+ m_sluOptions.ColPerm = NATURAL;
+ };
+
+ m_sluA = m_matrix.asSluMatrix();
+ memset(&m_sluL,0,sizeof m_sluL);
+ memset(&m_sluU,0,sizeof m_sluU);
+ m_sluEqued = 'B';
+ int info = 0;
+
+ m_p.resize(size);
+ m_q.resize(size);
+ m_sluRscale.resize(size);
+ m_sluCscale.resize(size);
+ m_sluEtree.resize(size);
+
+ RealScalar recip_pivot_gross, rcond;
+ RealScalar ferr, berr;
+
+ // set empty B and X
+ m_sluB.setStorageType(SLU_DN);
+ m_sluB.setScalarType<Scalar>();
+ m_sluB.Mtype = SLU_GE;
+ m_sluB.storage.values = 0;
+ m_sluB.nrow = m_sluB.ncol = 0;
+ m_sluB.storage.lda = size;
+ m_sluX = m_sluB;
+
+ StatInit(&m_sluStat);
+ SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+ &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_gross, &rcond,
+ &ferr, &berr,
+ &m_sluStat, &info, Scalar());
+ StatFree(&m_sluStat);
+
+ m_extractedDataAreDirty = true;
+
+ // FIXME how to better check for errors ???
+ Base::m_succeeded = (info == 0);
+}
+
+template<typename MatrixType>
+template<typename BDerived,typename XDerived>
+bool SparseLU<MatrixType,SuperLU>::solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> *x) const
+{
+ const int size = m_matrix.rows();
+ const int rhsCols = b.cols();
+ ei_assert(size==b.rows());
+
+ m_sluOptions.Fact = FACTORED;
+ m_sluOptions.IterRefine = NOREFINE;
+
+ m_sluFerr.resize(rhsCols);
+ m_sluBerr.resize(rhsCols);
+ m_sluB = SluMatrix::Map(b.const_cast_derived());
+ m_sluX = SluMatrix::Map(x->derived());
+
+ StatInit(&m_sluStat);
+ int info = 0;
+ RealScalar recip_pivot_gross, rcond;
+ SuperLU_gssvx(
+ &m_sluOptions, &m_sluA,
+ m_q.data(), m_p.data(),
+ &m_sluEtree[0], &m_sluEqued,
+ &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_gross, &rcond,
+ &m_sluFerr[0], &m_sluBerr[0],
+ &m_sluStat, &info, Scalar());
+ StatFree(&m_sluStat);
+
+ return info==0;
+}
+
+//
+// the code of this extractData() function has been adapted from the SuperLU's Matlab support code,
+//
+// Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+//
+// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+//
+template<typename MatrixType>
+void SparseLU<MatrixType,SuperLU>::extractData() const
+{
+ if (m_extractedDataAreDirty)
+ {
+ int upper;
+ int fsupc, istart, nsupr;
+ int lastl = 0, lastu = 0;
+ SCformat *Lstore = static_cast<SCformat*>(m_sluL.Store);
+ NCformat *Ustore = static_cast<NCformat*>(m_sluU.Store);
+ Scalar *SNptr;
+
+ const int size = m_matrix.rows();
+ m_l.resize(size,size);
+ m_l.resizeNonZeros(Lstore->nnz);
+ m_u.resize(size,size);
+ m_u.resizeNonZeros(Ustore->nnz);
+
+ int* Lcol = m_l._outerIndexPtr();
+ int* Lrow = m_l._innerIndexPtr();
+ Scalar* Lval = m_l._valuePtr();
+
+ int* Ucol = m_u._outerIndexPtr();
+ int* Urow = m_u._innerIndexPtr();
+ Scalar* Uval = m_u._valuePtr();
+
+ Ucol[0] = 0;
+ Ucol[0] = 0;
+
+ /* for each supernode */
+ for (int k = 0; k <= Lstore->nsuper; ++k)
+ {
+ fsupc = L_FST_SUPC(k);
+ istart = L_SUB_START(fsupc);
+ nsupr = L_SUB_START(fsupc+1) - istart;
+ upper = 1;
+
+ /* for each column in the supernode */
+ for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)
+ {
+ SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)];
+
+ /* Extract U */
+ for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i)
+ {
+ Uval[lastu] = ((Scalar*)Ustore->nzval)[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Uval[lastu] != 0.0)
+ Urow[lastu++] = U_SUB(i);
+ }
+ for (int i = 0; i < upper; ++i)
+ {
+ /* upper triangle in the supernode */
+ Uval[lastu] = SNptr[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Uval[lastu] != 0.0)
+ Urow[lastu++] = L_SUB(istart+i);
+ }
+ Ucol[j+1] = lastu;
+
+ /* Extract L */
+ Lval[lastl] = 1.0; /* unit diagonal */
+ Lrow[lastl++] = L_SUB(istart + upper - 1);
+ for (int i = upper; i < nsupr; ++i)
+ {
+ Lval[lastl] = SNptr[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Lval[lastl] != 0.0)
+ Lrow[lastl++] = L_SUB(istart+i);
+ }
+ Lcol[j+1] = lastl;
+
+ ++upper;
+ } /* for j ... */
+
+ } /* for k ... */
+
+ // squeeze the matrices :
+ m_l.resizeNonZeros(lastl);
+ m_u.resizeNonZeros(lastu);
+
+ m_extractedDataAreDirty = false;
+ }
+}
+
+template<typename MatrixType>
+typename SparseLU<MatrixType,SuperLU>::Scalar SparseLU<MatrixType,SuperLU>::determinant() const
+{
+ if (m_extractedDataAreDirty)
+ extractData();
+
+ // TODO this code coule be moved to the default/base backend
+ // FIXME perhaps we have to take into account the scale factors m_sluRscale and m_sluCscale ???
+ Scalar det = Scalar(1);
+ for (int j=0; j<m_u.cols(); ++j)
+ {
+ if (m_u._outerIndexPtr()[j+1]-m_u._outerIndexPtr()[j] > 0)
+ {
+ int lastId = m_u._outerIndexPtr()[j+1]-1;
+ ei_assert(m_u._innerIndexPtr()[lastId]<=j);
+ if (m_u._innerIndexPtr()[lastId]==j)
+ {
+ det *= m_u._valuePtr()[lastId];
+ }
+ }
+ // std::cout << m_sluRscale[j] << " " << m_sluCscale[j] << " ";
+ }
+ return det;
+}
+
+#endif // EIGEN_SUPERLUSUPPORT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h b/extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h
new file mode 100644
index 00000000000..4dddca7b622
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h
@@ -0,0 +1,210 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TAUCSSUPPORT_H
+#define EIGEN_TAUCSSUPPORT_H
+
+template<typename Derived>
+taucs_ccs_matrix SparseMatrixBase<Derived>::asTaucsMatrix()
+{
+ taucs_ccs_matrix res;
+ res.n = cols();
+ res.m = rows();
+ res.flags = 0;
+ res.colptr = derived()._outerIndexPtr();
+ res.rowind = derived()._innerIndexPtr();
+ res.values.v = derived()._valuePtr();
+ if (ei_is_same_type<Scalar,int>::ret)
+ res.flags |= TAUCS_INT;
+ else if (ei_is_same_type<Scalar,float>::ret)
+ res.flags |= TAUCS_SINGLE;
+ else if (ei_is_same_type<Scalar,double>::ret)
+ res.flags |= TAUCS_DOUBLE;
+ else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
+ res.flags |= TAUCS_SCOMPLEX;
+ else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
+ res.flags |= TAUCS_DCOMPLEX;
+ else
+ {
+ ei_assert(false && "Scalar type not supported by TAUCS");
+ }
+
+ if (Flags & UpperTriangular)
+ res.flags |= TAUCS_UPPER;
+ if (Flags & LowerTriangular)
+ res.flags |= TAUCS_LOWER;
+ if (Flags & SelfAdjoint)
+ res.flags |= (NumTraits<Scalar>::IsComplex ? TAUCS_HERMITIAN : TAUCS_SYMMETRIC);
+ else if ((Flags & UpperTriangular) || (Flags & LowerTriangular))
+ res.flags |= TAUCS_TRIANGULAR;
+
+ return res;
+}
+
+template<typename Scalar, int Flags>
+MappedSparseMatrix<Scalar,Flags>::MappedSparseMatrix(taucs_ccs_matrix& taucsMat)
+{
+ m_innerSize = taucsMat.m;
+ m_outerSize = taucsMat.n;
+ m_outerIndex = taucsMat.colptr;
+ m_innerIndices = taucsMat.rowind;
+ m_values = reinterpret_cast<Scalar*>(taucsMat.values.v);
+ m_nnz = taucsMat.colptr[taucsMat.n];
+}
+
+template<typename MatrixType>
+class SparseLLT<MatrixType,Taucs> : public SparseLLT<MatrixType>
+{
+ protected:
+ typedef SparseLLT<MatrixType> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ using Base::MatrixLIsDirty;
+ using Base::SupernodalFactorIsDirty;
+ using Base::m_flags;
+ using Base::m_matrix;
+ using Base::m_status;
+
+ public:
+
+ SparseLLT(int flags = 0)
+ : Base(flags), m_taucsSupernodalFactor(0)
+ {
+ }
+
+ SparseLLT(const MatrixType& matrix, int flags = 0)
+ : Base(flags), m_taucsSupernodalFactor(0)
+ {
+ compute(matrix);
+ }
+
+ ~SparseLLT()
+ {
+ if (m_taucsSupernodalFactor)
+ taucs_supernodal_factor_free(m_taucsSupernodalFactor);
+ }
+
+ inline const typename Base::CholMatrixType& matrixL(void) const;
+
+ template<typename Derived>
+ void solveInPlace(MatrixBase<Derived> &b) const;
+
+ void compute(const MatrixType& matrix);
+
+ protected:
+ void* m_taucsSupernodalFactor;
+};
+
+template<typename MatrixType>
+void SparseLLT<MatrixType,Taucs>::compute(const MatrixType& a)
+{
+ if (m_taucsSupernodalFactor)
+ {
+ taucs_supernodal_factor_free(m_taucsSupernodalFactor);
+ m_taucsSupernodalFactor = 0;
+ }
+
+ if (m_flags & IncompleteFactorization)
+ {
+ taucs_ccs_matrix taucsMatA = const_cast<MatrixType&>(a).asTaucsMatrix();
+ taucs_ccs_matrix* taucsRes = taucs_ccs_factor_llt(&taucsMatA, Base::m_precision, 0);
+ // the matrix returned by Taucs is not necessarily sorted,
+ // so let's copy it in two steps
+ DynamicSparseMatrix<Scalar,RowMajor> tmp = MappedSparseMatrix<Scalar>(*taucsRes);
+ m_matrix = tmp;
+ free(taucsRes);
+ m_status = (m_status & ~(CompleteFactorization|MatrixLIsDirty))
+ | IncompleteFactorization
+ | SupernodalFactorIsDirty;
+ }
+ else
+ {
+ taucs_ccs_matrix taucsMatA = const_cast<MatrixType&>(a).asTaucsMatrix();
+ if ( (m_flags & SupernodalLeftLooking)
+ || ((!(m_flags & SupernodalMultifrontal)) && (m_flags & MemoryEfficient)) )
+ {
+ m_taucsSupernodalFactor = taucs_ccs_factor_llt_ll(&taucsMatA);
+ }
+ else
+ {
+ // use the faster Multifrontal routine
+ m_taucsSupernodalFactor = taucs_ccs_factor_llt_mf(&taucsMatA);
+ }
+ m_status = (m_status & ~IncompleteFactorization) | CompleteFactorization | MatrixLIsDirty;
+ }
+}
+
+template<typename MatrixType>
+inline const typename SparseLLT<MatrixType>::CholMatrixType&
+SparseLLT<MatrixType,Taucs>::matrixL() const
+{
+ if (m_status & MatrixLIsDirty)
+ {
+ ei_assert(!(m_status & SupernodalFactorIsDirty));
+
+ taucs_ccs_matrix* taucsL = taucs_supernodal_factor_to_ccs(m_taucsSupernodalFactor);
+
+ // the matrix returned by Taucs is not necessarily sorted,
+ // so let's copy it in two steps
+ DynamicSparseMatrix<Scalar,RowMajor> tmp = MappedSparseMatrix<Scalar>(*taucsL);
+ const_cast<typename Base::CholMatrixType&>(m_matrix) = tmp;
+ free(taucsL);
+ m_status = (m_status & ~MatrixLIsDirty);
+ }
+ return m_matrix;
+}
+
+template<typename MatrixType>
+template<typename Derived>
+void SparseLLT<MatrixType,Taucs>::solveInPlace(MatrixBase<Derived> &b) const
+{
+ bool inputIsCompatibleWithTaucs = (Derived::Flags&RowMajorBit)==0;
+
+ if (!inputIsCompatibleWithTaucs)
+ {
+ matrixL();
+ Base::solveInPlace(b);
+ }
+ else if (m_flags & IncompleteFactorization)
+ {
+ taucs_ccs_matrix taucsLLT = const_cast<typename Base::CholMatrixType&>(m_matrix).asTaucsMatrix();
+ typename ei_plain_matrix_type<Derived>::type x(b.rows());
+ for (int j=0; j<b.cols(); ++j)
+ {
+ taucs_ccs_solve_llt(&taucsLLT,x.data(),&b.col(j).coeffRef(0));
+ b.col(j) = x;
+ }
+ }
+ else
+ {
+ typename ei_plain_matrix_type<Derived>::type x(b.rows());
+ for (int j=0; j<b.cols(); ++j)
+ {
+ taucs_supernodal_solve_llt(m_taucsSupernodalFactor,x.data(),&b.col(j).coeffRef(0));
+ b.col(j) = x;
+ }
+ }
+}
+
+#endif // EIGEN_TAUCSSUPPORT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/TriangularSolver.h b/extern/Eigen2/Eigen/src/Sparse/TriangularSolver.h
new file mode 100644
index 00000000000..8948ae45e1d
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/TriangularSolver.h
@@ -0,0 +1,178 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
+#define EIGEN_SPARSETRIANGULARSOLVER_H
+
+// forward substitution, row-major
+template<typename Lhs, typename Rhs>
+struct ei_solve_triangular_selector<Lhs,Rhs,LowerTriangular,RowMajor|IsSparse>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.rows(); ++i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ Scalar lastVal = 0;
+ int lastIndex = 0;
+ for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
+ {
+ lastVal = it.value();
+ lastIndex = it.index();
+ tmp -= lastVal * other.coeff(lastIndex,col);
+ }
+ if (Lhs::Flags & UnitDiagBit)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ ei_assert(lastIndex==i);
+ other.coeffRef(i,col) = tmp/lastVal;
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, row-major
+template<typename Lhs, typename Rhs>
+struct ei_solve_triangular_selector<Lhs,Rhs,UpperTriangular,RowMajor|IsSparse>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.rows()-1 ; i>=0 ; --i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ typename Lhs::InnerIterator it(lhs, i);
+ if (it.index() == i)
+ ++it;
+ for(; it; ++it)
+ {
+ tmp -= it.value() * other.coeff(it.index(),col);
+ }
+
+ if (Lhs::Flags & UnitDiagBit)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ typename Lhs::InnerIterator it(lhs, i);
+ ei_assert(it.index() == i);
+ other.coeffRef(i,col) = tmp/it.value();
+ }
+ }
+ }
+ }
+};
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs>
+struct ei_solve_triangular_selector<Lhs,Rhs,LowerTriangular,ColMajor|IsSparse>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.cols(); ++i)
+ {
+ typename Lhs::InnerIterator it(lhs, i);
+ if(!(Lhs::Flags & UnitDiagBit))
+ {
+ // std::cerr << it.value() << " ; " << it.index() << " == " << i << "\n";
+ ei_assert(it.index()==i);
+ other.coeffRef(i,col) /= it.value();
+ }
+ Scalar tmp = other.coeffRef(i,col);
+ if (it.index()==i)
+ ++it;
+ for(; it; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+};
+
+// backward substitution, col-major
+template<typename Lhs, typename Rhs>
+struct ei_solve_triangular_selector<Lhs,Rhs,UpperTriangular,ColMajor|IsSparse>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.cols()-1; i>=0; --i)
+ {
+ if(!(Lhs::Flags & UnitDiagBit))
+ {
+ // FIXME lhs.coeff(i,i) might not be always efficient while it must simply be the
+ // last element of the column !
+ other.coeffRef(i,col) /= lhs.coeff(i,i);
+ }
+ Scalar tmp = other.coeffRef(i,col);
+ typename Lhs::InnerIterator it(lhs, i);
+ for(; it && it.index()<i; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+};
+
+template<typename Derived>
+template<typename OtherDerived>
+void SparseMatrixBase<Derived>::solveTriangularInPlace(MatrixBase<OtherDerived>& other) const
+{
+ ei_assert(derived().cols() == derived().rows());
+ ei_assert(derived().cols() == other.rows());
+ ei_assert(!(Flags & ZeroDiagBit));
+ ei_assert(Flags & (UpperTriangularBit|LowerTriangularBit));
+
+ enum { copy = ei_traits<OtherDerived>::Flags & RowMajorBit };
+
+ typedef typename ei_meta_if<copy,
+ typename ei_plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::ret OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ ei_solve_triangular_selector<Derived, typename ei_unref<OtherCopy>::type>::run(derived(), otherCopy);
+
+ if (copy)
+ other = otherCopy;
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+typename ei_plain_matrix_type_column_major<OtherDerived>::type
+SparseMatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
+{
+ typename ei_plain_matrix_type_column_major<OtherDerived>::type res(other);
+ solveTriangularInPlace(res);
+ return res;
+}
+
+#endif // EIGEN_SPARSETRIANGULARSOLVER_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h b/extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h
new file mode 100644
index 00000000000..b76ffb25248
--- /dev/null
+++ b/extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h
@@ -0,0 +1,289 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_UMFPACKSUPPORT_H
+#define EIGEN_UMFPACKSUPPORT_H
+
+/* TODO extract L, extract U, compute det, etc... */
+
+// generic double/complex<double> wrapper functions:
+
+inline void umfpack_free_numeric(void **Numeric, double)
+{ umfpack_di_free_numeric(Numeric); }
+
+inline void umfpack_free_numeric(void **Numeric, std::complex<double>)
+{ umfpack_zi_free_numeric(Numeric); }
+
+inline void umfpack_free_symbolic(void **Symbolic, double)
+{ umfpack_di_free_symbolic(Symbolic); }
+
+inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>)
+{ umfpack_zi_free_symbolic(Symbolic); }
+
+inline int umfpack_symbolic(int n_row,int n_col,
+ const int Ap[], const int Ai[], const double Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info);
+}
+
+inline int umfpack_symbolic(int n_row,int n_col,
+ const int Ap[], const int Ai[], const std::complex<double> Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&Ax[0].real(),0,Symbolic,Control,Info);
+}
+
+inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info);
+}
+
+inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<double> Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_zi_numeric(Ap,Ai,&Ax[0].real(),0,Symbolic,Numeric,Control,Info);
+}
+
+inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
+ double X[], const double B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info);
+}
+
+inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex<double> Ax[],
+ std::complex<double> X[], const std::complex<double> B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_zi_solve(sys,Ap,Ai,&Ax[0].real(),0,&X[0].real(),0,&B[0].real(),0,Numeric,Control,Info);
+}
+
+inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
+{
+ return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex<double>)
+{
+ return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[],
+ int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric)
+{
+ return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric);
+}
+
+inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], int Up[], int Ui[], std::complex<double> Ux[],
+ int P[], int Q[], std::complex<double> Dx[], int *do_recip, double Rs[], void *Numeric)
+{
+ return umfpack_zi_get_numeric(Lp,Lj,Lx?&Lx[0].real():0,0,Up,Ui,Ux?&Ux[0].real():0,0,P,Q,
+ Dx?&Dx[0].real():0,0,do_recip,Rs,Numeric);
+}
+
+inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+{
+ return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info);
+}
+
+inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+{
+ return umfpack_zi_get_determinant(&Mx->real(),0,Ex,NumericHandle,User_Info);
+}
+
+
+template<typename MatrixType>
+class SparseLU<MatrixType,UmfPack> : public SparseLU<MatrixType>
+{
+ protected:
+ typedef SparseLU<MatrixType> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef SparseMatrix<Scalar,LowerTriangular|UnitDiagBit> LMatrixType;
+ typedef SparseMatrix<Scalar,UpperTriangular> UMatrixType;
+ using Base::m_flags;
+ using Base::m_status;
+
+ public:
+
+ SparseLU(int flags = NaturalOrdering)
+ : Base(flags), m_numeric(0)
+ {
+ }
+
+ SparseLU(const MatrixType& matrix, int flags = NaturalOrdering)
+ : Base(flags), m_numeric(0)
+ {
+ compute(matrix);
+ }
+
+ ~SparseLU()
+ {
+ if (m_numeric)
+ umfpack_free_numeric(&m_numeric,Scalar());
+ }
+
+ inline const LMatrixType& matrixL() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_l;
+ }
+
+ inline const UMatrixType& matrixU() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_u;
+ }
+
+ inline const IntColVectorType& permutationP() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_p;
+ }
+
+ inline const IntRowVectorType& permutationQ() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_q;
+ }
+
+ Scalar determinant() const;
+
+ template<typename BDerived, typename XDerived>
+ bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x) const;
+
+ void compute(const MatrixType& matrix);
+
+ protected:
+
+ void extractData() const;
+
+ protected:
+ // cached data:
+ void* m_numeric;
+ const MatrixType* m_matrixRef;
+ mutable LMatrixType m_l;
+ mutable UMatrixType m_u;
+ mutable IntColVectorType m_p;
+ mutable IntRowVectorType m_q;
+ mutable bool m_extractedDataAreDirty;
+};
+
+template<typename MatrixType>
+void SparseLU<MatrixType,UmfPack>::compute(const MatrixType& a)
+{
+ const int rows = a.rows();
+ const int cols = a.cols();
+ ei_assert((MatrixType::Flags&RowMajorBit)==0 && "Row major matrices are not supported yet");
+
+ m_matrixRef = &a;
+
+ if (m_numeric)
+ umfpack_free_numeric(&m_numeric,Scalar());
+
+ void* symbolic;
+ int errorCode = 0;
+ errorCode = umfpack_symbolic(rows, cols, a._outerIndexPtr(), a._innerIndexPtr(), a._valuePtr(),
+ &symbolic, 0, 0);
+ if (errorCode==0)
+ errorCode = umfpack_numeric(a._outerIndexPtr(), a._innerIndexPtr(), a._valuePtr(),
+ symbolic, &m_numeric, 0, 0);
+
+ umfpack_free_symbolic(&symbolic,Scalar());
+
+ m_extractedDataAreDirty = true;
+
+ Base::m_succeeded = (errorCode==0);
+}
+
+template<typename MatrixType>
+void SparseLU<MatrixType,UmfPack>::extractData() const
+{
+ if (m_extractedDataAreDirty)
+ {
+ // get size of the data
+ int lnz, unz, rows, cols, nz_udiag;
+ umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
+
+ // allocate data
+ m_l.resize(rows,std::min(rows,cols));
+ m_l.resizeNonZeros(lnz);
+
+ m_u.resize(std::min(rows,cols),cols);
+ m_u.resizeNonZeros(unz);
+
+ m_p.resize(rows);
+ m_q.resize(cols);
+
+ // extract
+ umfpack_get_numeric(m_l._outerIndexPtr(), m_l._innerIndexPtr(), m_l._valuePtr(),
+ m_u._outerIndexPtr(), m_u._innerIndexPtr(), m_u._valuePtr(),
+ m_p.data(), m_q.data(), 0, 0, 0, m_numeric);
+
+ m_extractedDataAreDirty = false;
+ }
+}
+
+template<typename MatrixType>
+typename SparseLU<MatrixType,UmfPack>::Scalar SparseLU<MatrixType,UmfPack>::determinant() const
+{
+ Scalar det;
+ umfpack_get_determinant(&det, 0, m_numeric, 0);
+ return det;
+}
+
+template<typename MatrixType>
+template<typename BDerived,typename XDerived>
+bool SparseLU<MatrixType,UmfPack>::solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> *x) const
+{
+ //const int size = m_matrix.rows();
+ const int rhsCols = b.cols();
+// ei_assert(size==b.rows());
+ ei_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major rhs yet");
+ ei_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major result yet");
+
+ int errorCode;
+ for (int j=0; j<rhsCols; ++j)
+ {
+ errorCode = umfpack_solve(UMFPACK_A,
+ m_matrixRef->_outerIndexPtr(), m_matrixRef->_innerIndexPtr(), m_matrixRef->_valuePtr(),
+ &x->col(j).coeffRef(0), &b.const_cast_derived().col(j).coeffRef(0), m_numeric, 0, 0);
+ if (errorCode!=0)
+ return false;
+ }
+// errorCode = umfpack_di_solve(UMFPACK_A,
+// m_matrixRef._outerIndexPtr(), m_matrixRef._innerIndexPtr(), m_matrixRef._valuePtr(),
+// x->derived().data(), b.derived().data(), m_numeric, 0, 0);
+
+ return true;
+}
+
+#endif // EIGEN_UMFPACKSUPPORT_H