Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/extern
diff options
context:
space:
mode:
authorDaniel Genrich <daniel.genrich@gmx.net>2012-06-07 02:18:58 +0400
committerDaniel Genrich <daniel.genrich@gmx.net>2012-06-07 02:18:58 +0400
commit6489bb959336b1f169b97d062dedc554dc47654c (patch)
tree1e7103e25813f441f92bbbc4d7c188d058c0b017 /extern
parent2c7363f5d5fc3167a67ef052763a09ce353181c5 (diff)
Update Eigen3 to 3.1BETA
Diffstat (limited to 'extern')
-rw-r--r--extern/Eigen3/Eigen/CMakeLists.txt19
-rw-r--r--extern/Eigen3/Eigen/CholmodSupport40
-rw-r--r--extern/Eigen3/Eigen/Core2
-rw-r--r--extern/Eigen3/Eigen/IterativeLinearSolvers40
-rw-r--r--extern/Eigen3/Eigen/OrderingMethods23
-rw-r--r--extern/Eigen3/Eigen/PaStiXSupport34
-rw-r--r--extern/Eigen3/Eigen/PardisoSupport26
-rw-r--r--extern/Eigen3/Eigen/SparseCholesky30
-rw-r--r--extern/Eigen3/Eigen/SparseCore66
-rw-r--r--extern/Eigen3/Eigen/SuperLUSupport49
-rw-r--r--extern/Eigen3/Eigen/UmfPackSupport30
-rw-r--r--extern/Eigen3/Eigen/src/CMakeLists.txt7
-rw-r--r--extern/Eigen3/Eigen/src/Cholesky/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/Cholesky/LLT_MKL.h102
-rw-r--r--extern/Eigen3/Eigen/src/CholmodSupport/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h594
-rw-r--r--extern/Eigen3/Eigen/src/Core/Assign_MKL.h221
-rw-r--r--extern/Eigen3/Eigen/src/Core/CMakeLists.txt10
-rw-r--r--extern/Eigen3/Eigen/src/Core/GeneralProduct.h628
-rw-r--r--extern/Eigen3/Eigen/src/Core/PlainObjectBase.h2
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/AltiVec/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/CMakeLists.txt4
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/Default/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/NEON/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h3
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/SSE/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h146
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h118
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h131
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h295
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h114
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h309
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h247
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h155
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/MKL_support.h109
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Macros.h4
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Meta.h15
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/CMakeLists.txt8
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur_MKL.h94
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h83
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h92
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/CMakeLists.txt8
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/arch/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/Householder/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h163
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h269
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h266
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h476
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h269
-rw-r--r--extern/Eigen3/Eigen/src/Jacobi/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/LU/CMakeLists.txt8
-rw-r--r--extern/Eigen3/Eigen/src/LU/PartialPivLU_MKL.h85
-rw-r--r--extern/Eigen3/Eigen/src/LU/arch/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/OrderingMethods/Amd.h451
-rw-r--r--extern/Eigen3/Eigen/src/OrderingMethods/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/PaStiXSupport/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h803
-rw-r--r--extern/Eigen3/Eigen/src/PardisoSupport/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h614
-rw-r--r--extern/Eigen3/Eigen/src/QR/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR_MKL.h98
-rw-r--r--extern/Eigen3/Eigen/src/QR/HouseholderQR_MKL.h69
-rw-r--r--extern/Eigen3/Eigen/src/SVD/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/SVD/JacobiSVD_MKL.h92
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/AmbiVector.h379
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h239
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/CoreIterators.h71
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h346
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h165
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseAssign.h0
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseBlock.h465
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h375
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h146
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h231
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h195
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseDot.h97
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h41
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h651
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h706
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseProduct.h141
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseRedux.h56
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h454
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h401
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h68
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h100
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseUtil.h130
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseVector.h431
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseView.h109
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h339
-rw-r--r--extern/Eigen3/Eigen/src/SparseCholesky/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h876
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h386
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h248
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h260
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/CoreIterators.h76
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h194
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseAssign.h0
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h402
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h339
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h178
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h315
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h199
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseDot.h109
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseFuzzy.h41
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h1127
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h473
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h163
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h201
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseRedux.h60
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h495
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h164
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h76
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h179
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h188
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseVector.h413
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseView.h113
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h349
-rw-r--r--extern/Eigen3/Eigen/src/StlSupport/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/SuperLUSupport/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h1040
-rw-r--r--extern/Eigen3/Eigen/src/UmfPackSupport/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h446
-rw-r--r--extern/Eigen3/Eigen/src/misc/CMakeLists.txt6
-rw-r--r--extern/Eigen3/Eigen/src/misc/SparseSolve.h126
-rw-r--r--extern/Eigen3/Eigen/src/misc/blas.h658
-rw-r--r--extern/Eigen3/Eigen/src/plugins/CMakeLists.txt6
132 files changed, 23908 insertions, 5 deletions
diff --git a/extern/Eigen3/Eigen/CMakeLists.txt b/extern/Eigen3/Eigen/CMakeLists.txt
new file mode 100644
index 00000000000..a92dd6f6caf
--- /dev/null
+++ b/extern/Eigen3/Eigen/CMakeLists.txt
@@ -0,0 +1,19 @@
+include(RegexUtils)
+test_escape_string_as_regex()
+
+file(GLOB Eigen_directory_files "*")
+
+escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
+
+foreach(f ${Eigen_directory_files})
+ if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/src")
+ list(APPEND Eigen_directory_files_to_install ${f})
+ endif()
+endforeach(f ${Eigen_directory_files})
+
+install(FILES
+ ${Eigen_directory_files_to_install}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen COMPONENT Devel
+ )
+
+add_subdirectory(src)
diff --git a/extern/Eigen3/Eigen/CholmodSupport b/extern/Eigen3/Eigen/CholmodSupport
new file mode 100644
index 00000000000..0e1ff48e033
--- /dev/null
+++ b/extern/Eigen3/Eigen/CholmodSupport
@@ -0,0 +1,40 @@
+#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H
+#define EIGEN_CHOLMODSUPPORT_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+extern "C" {
+ #include <cholmod.h>
+}
+
+/** \ingroup Support_modules
+ * \defgroup CholmodSupport_Module CholmodSupport module
+ *
+ * This module provides an interface to the Cholmod library which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
+ * It provides the two following main factorization classes:
+ * - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization.
+ * - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial).
+ *
+ * For the sake of completeness, this module also propose the two following classes:
+ * - class CholmodSimplicialLLT
+ * - class CholmodSimplicialLDLT
+ * Note that these classes does not bring any particular advantage compared to the built-in
+ * SimplicialLLT and SimplicialLDLT factorization classes.
+ *
+ * \code
+ * #include <Eigen/CholmodSupport>
+ * \endcode
+ */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+
+#include "src/CholmodSupport/CholmodSupport.h"
+
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_CHOLMODSUPPORT_MODULE_H
+
diff --git a/extern/Eigen3/Eigen/Core b/extern/Eigen3/Eigen/Core
index 5d803490005..5e93d6b73f2 100644
--- a/extern/Eigen3/Eigen/Core
+++ b/extern/Eigen3/Eigen/Core
@@ -142,7 +142,7 @@
#endif
// MSVC for windows mobile does not have the errno.h file
-#if !(defined(_MSC_VER) && defined(_WIN32_WCE))
+#if !(defined(_MSC_VER) && defined(_WIN32_WCE)) && !defined(__ARMCC_VERSION)
#define EIGEN_HAS_ERRNO
#endif
diff --git a/extern/Eigen3/Eigen/IterativeLinearSolvers b/extern/Eigen3/Eigen/IterativeLinearSolvers
new file mode 100644
index 00000000000..315c2dd1ee7
--- /dev/null
+++ b/extern/Eigen3/Eigen/IterativeLinearSolvers
@@ -0,0 +1,40 @@
+#ifndef EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
+#define EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
+
+#include "SparseCore"
+#include "OrderingMethods"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+/** \ingroup Sparse_modules
+ * \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module
+ *
+ * This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a squared matrix, usually very large and sparse.
+ * Those solvers are accessible via the following classes:
+ * - ConjugateGradient for selfadjoint (hermitian) matrices,
+ * - BiCGSTAB for general square matrices.
+ *
+ * These iterative solvers are associated with some preconditioners:
+ * - IdentityPreconditioner - not really useful
+ * - DiagonalPreconditioner - also called JAcobi preconditioner, work very well on diagonal dominant matrices.
+ * - IncompleteILUT - incomplete LU factorization with dual thresholding
+ *
+ * Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, UmfPackSupport, SuperLUSupport.
+ *
+ * \code
+ * #include <Eigen/IterativeLinearSolvers>
+ * \endcode
+ */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+
+#include "src/IterativeLinearSolvers/IterativeSolverBase.h"
+#include "src/IterativeLinearSolvers/BasicPreconditioners.h"
+#include "src/IterativeLinearSolvers/ConjugateGradient.h"
+#include "src/IterativeLinearSolvers/BiCGSTAB.h"
+#include "src/IterativeLinearSolvers/IncompleteLUT.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
diff --git a/extern/Eigen3/Eigen/OrderingMethods b/extern/Eigen3/Eigen/OrderingMethods
new file mode 100644
index 00000000000..1e2d87452e5
--- /dev/null
+++ b/extern/Eigen3/Eigen/OrderingMethods
@@ -0,0 +1,23 @@
+#ifndef EIGEN_ORDERINGMETHODS_MODULE_H
+#define EIGEN_ORDERINGMETHODS_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+/** \ingroup Sparse_modules
+ * \defgroup OrderingMethods_Module OrderingMethods module
+ *
+ * This module is currently for internal use only.
+ *
+ *
+ * \code
+ * #include <Eigen/OrderingMethods>
+ * \endcode
+ */
+
+#include "src/OrderingMethods/Amd.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_ORDERINGMETHODS_MODULE_H
diff --git a/extern/Eigen3/Eigen/PaStiXSupport b/extern/Eigen3/Eigen/PaStiXSupport
new file mode 100644
index 00000000000..1dbb0941b75
--- /dev/null
+++ b/extern/Eigen3/Eigen/PaStiXSupport
@@ -0,0 +1,34 @@
+#ifndef EIGEN_PASTIXSUPPORT_MODULE_H
+#define EIGEN_PASTIXSUPPORT_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include <complex.h>
+extern "C" {
+#include <pastix_nompi.h>
+#include <pastix.h>
+}
+
+#ifdef complex
+#undef complex
+#endif
+
+/** \ingroup Support_modules
+ * \defgroup PaStiXSupport_Module PaStiXSupport module
+ *
+ * \code
+ * #include <Eigen/PaStiXSupport>
+ * \endcode
+ */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+
+#include "src/PaStiXSupport/PaStiXSupport.h"
+
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_PASTIXSUPPORT_MODULE_H
diff --git a/extern/Eigen3/Eigen/PardisoSupport b/extern/Eigen3/Eigen/PardisoSupport
new file mode 100644
index 00000000000..6585edb7410
--- /dev/null
+++ b/extern/Eigen3/Eigen/PardisoSupport
@@ -0,0 +1,26 @@
+#ifndef EIGEN_PARDISOSUPPORT_MODULE_H
+#define EIGEN_PARDISOSUPPORT_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include <mkl_pardiso.h>
+
+#include <unsupported/Eigen/SparseExtra>
+
+/** \ingroup Support_modules
+ * \defgroup PardisoSupport_Module PardisoSupport module
+ *
+ * This module brings support for the Intel(R) MKL PARDISO direct sparse solvers
+ *
+ * \code
+ * #include <Eigen/PardisoSupport>
+ * \endcode
+ */
+
+#include "src/PardisoSupport/PardisoSupport.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_PARDISOSUPPORT_MODULE_H
diff --git a/extern/Eigen3/Eigen/SparseCholesky b/extern/Eigen3/Eigen/SparseCholesky
new file mode 100644
index 00000000000..5f82742f7d8
--- /dev/null
+++ b/extern/Eigen3/Eigen/SparseCholesky
@@ -0,0 +1,30 @@
+#ifndef EIGEN_SPARSECHOLESKY_MODULE_H
+#define EIGEN_SPARSECHOLESKY_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+/** \ingroup Sparse_modules
+ * \defgroup SparseCholesky_Module SparseCholesky module
+ *
+ * This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) matrices.
+ * Those decompositions are accessible via the following classes:
+ * - SimplicialLLt,
+ * - SimplicialLDLt
+ *
+ * Such problems can also be solved using the ConjugateGradient solver from the IterativeLinearSolvers module.
+ *
+ * \code
+ * #include <Eigen/SparseCholesky>
+ * \endcode
+ */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+
+#include "src/SparseCholesky/SimplicialCholesky.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SPARSECHOLESKY_MODULE_H
diff --git a/extern/Eigen3/Eigen/SparseCore b/extern/Eigen3/Eigen/SparseCore
new file mode 100644
index 00000000000..41d28c92824
--- /dev/null
+++ b/extern/Eigen3/Eigen/SparseCore
@@ -0,0 +1,66 @@
+#ifndef EIGEN_SPARSECORE_MODULE_H
+#define EIGEN_SPARSECORE_MODULE_H
+
+#include "Core"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include <vector>
+#include <map>
+#include <cstdlib>
+#include <cstring>
+#include <algorithm>
+
+/** \ingroup Sparse_modules
+ * \defgroup SparseCore_Module SparseCore module
+ *
+ * This module provides a sparse matrix representation, and basic associatd matrix manipulations
+ * and operations.
+ *
+ * See the \ref TutorialSparse "Sparse tutorial"
+ *
+ * \code
+ * #include <Eigen/SparseCore>
+ * \endcode
+ *
+ * This module depends on: Core.
+ */
+
+namespace Eigen {
+
+/** The type used to identify a general sparse storage. */
+struct Sparse {};
+
+}
+
+#include "src/SparseCore/SparseUtil.h"
+#include "src/SparseCore/SparseMatrixBase.h"
+#include "src/SparseCore/CompressedStorage.h"
+#include "src/SparseCore/AmbiVector.h"
+#include "src/SparseCore/SparseMatrix.h"
+#include "src/SparseCore/MappedSparseMatrix.h"
+#include "src/SparseCore/SparseVector.h"
+#include "src/SparseCore/CoreIterators.h"
+#include "src/SparseCore/SparseBlock.h"
+#include "src/SparseCore/SparseTranspose.h"
+#include "src/SparseCore/SparseCwiseUnaryOp.h"
+#include "src/SparseCore/SparseCwiseBinaryOp.h"
+#include "src/SparseCore/SparseDot.h"
+#include "src/SparseCore/SparsePermutation.h"
+#include "src/SparseCore/SparseAssign.h"
+#include "src/SparseCore/SparseRedux.h"
+#include "src/SparseCore/SparseFuzzy.h"
+#include "src/SparseCore/ConservativeSparseSparseProduct.h"
+#include "src/SparseCore/SparseSparseProductWithPruning.h"
+#include "src/SparseCore/SparseProduct.h"
+#include "src/SparseCore/SparseDenseProduct.h"
+#include "src/SparseCore/SparseDiagonalProduct.h"
+#include "src/SparseCore/SparseTriangularView.h"
+#include "src/SparseCore/SparseSelfAdjointView.h"
+#include "src/SparseCore/TriangularSolver.h"
+#include "src/SparseCore/SparseView.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SPARSECORE_MODULE_H
+
diff --git a/extern/Eigen3/Eigen/SuperLUSupport b/extern/Eigen3/Eigen/SuperLUSupport
new file mode 100644
index 00000000000..09aa1b6cad0
--- /dev/null
+++ b/extern/Eigen3/Eigen/SuperLUSupport
@@ -0,0 +1,49 @@
+#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H
+#define EIGEN_SUPERLUSUPPORT_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#ifdef EMPTY
+#define EIGEN_EMPTY_WAS_ALREADY_DEFINED
+#endif
+
+typedef int int_t;
+#include <slu_Cnames.h>
+#include <supermatrix.h>
+#include <slu_util.h>
+
+// slu_util.h defines a preprocessor token named EMPTY which is really polluting,
+// so we remove it in favor of a SUPERLU_EMPTY token.
+// If EMPTY was already defined then we don't undef it.
+
+#if defined(EIGEN_EMPTY_WAS_ALREADY_DEFINED)
+# undef EIGEN_EMPTY_WAS_ALREADY_DEFINED
+#elif defined(EMPTY)
+# undef EMPTY
+#endif
+
+#define SUPERLU_EMPTY (-1)
+
+namespace Eigen { struct SluMatrix; }
+
+/** \ingroup Support_modules
+ * \defgroup SuperLUSupport_Module SuperLUSupport module
+ *
+ * \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined because it is too polluting.
+ *
+ * \code
+ * #include <Eigen/SuperLUSupport>
+ * \endcode
+ */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+
+#include "src/SuperLUSupport/SuperLUSupport.h"
+
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SUPERLUSUPPORT_MODULE_H
diff --git a/extern/Eigen3/Eigen/UmfPackSupport b/extern/Eigen3/Eigen/UmfPackSupport
new file mode 100644
index 00000000000..aac361e3f17
--- /dev/null
+++ b/extern/Eigen3/Eigen/UmfPackSupport
@@ -0,0 +1,30 @@
+#ifndef EIGEN_UMFPACKSUPPORT_MODULE_H
+#define EIGEN_UMFPACKSUPPORT_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+extern "C" {
+#include <umfpack.h>
+}
+
+/** \ingroup Support_modules
+ * \defgroup UmfPackSupport_Module UmfPackSupport module
+ *
+ *
+ *
+ *
+ * \code
+ * #include <Eigen/UmfPackSupport>
+ * \endcode
+ */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+
+#include "src/UmfPackSupport/UmfPackSupport.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_UMFPACKSUPPORT_MODULE_H
diff --git a/extern/Eigen3/Eigen/src/CMakeLists.txt b/extern/Eigen3/Eigen/src/CMakeLists.txt
new file mode 100644
index 00000000000..c326f374d9e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/CMakeLists.txt
@@ -0,0 +1,7 @@
+file(GLOB Eigen_src_subdirectories "*")
+escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
+foreach(f ${Eigen_src_subdirectories})
+ if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" )
+ add_subdirectory(${f})
+ endif()
+endforeach()
diff --git a/extern/Eigen3/Eigen/src/Cholesky/CMakeLists.txt b/extern/Eigen3/Eigen/src/Cholesky/CMakeLists.txt
new file mode 100644
index 00000000000..d01488b41a8
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Cholesky/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Cholesky_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Cholesky_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Cholesky COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/Cholesky/LLT_MKL.h b/extern/Eigen3/Eigen/src/Cholesky/LLT_MKL.h
new file mode 100644
index 00000000000..64daa445cf7
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Cholesky/LLT_MKL.h
@@ -0,0 +1,102 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * LLt decomposition based on LAPACKE_?potrf function.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_LLT_MKL_H
+#define EIGEN_LLT_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+#include <iostream>
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Scalar> struct mkl_llt;
+
+#define EIGEN_MKL_LLT(EIGTYPE, MKLTYPE, MKLPREFIX) \
+template<> struct mkl_llt<EIGTYPE> \
+{ \
+ template<typename MatrixType> \
+ static inline typename MatrixType::Index potrf(MatrixType& m, char uplo) \
+ { \
+ lapack_int matrix_order; \
+ lapack_int size, lda, info, StorageOrder; \
+ EIGTYPE* a; \
+ eigen_assert(m.rows()==m.cols()); \
+ /* Set up parameters for ?potrf */ \
+ size = m.rows(); \
+ StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \
+ matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
+ a = &(m.coeffRef(0,0)); \
+ lda = m.outerStride(); \
+\
+ info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \
+ info = (info==0) ? Success : NumericalIssue; \
+ return info; \
+ } \
+}; \
+template<> struct llt_inplace<EIGTYPE, Lower> \
+{ \
+ template<typename MatrixType> \
+ static typename MatrixType::Index blocked(MatrixType& m) \
+ { \
+ return mkl_llt<EIGTYPE>::potrf(m, 'L'); \
+ } \
+ template<typename MatrixType, typename VectorType> \
+ static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
+ { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \
+}; \
+template<> struct llt_inplace<EIGTYPE, Upper> \
+{ \
+ template<typename MatrixType> \
+ static typename MatrixType::Index blocked(MatrixType& m) \
+ { \
+ return mkl_llt<EIGTYPE>::potrf(m, 'U'); \
+ } \
+ template<typename MatrixType, typename VectorType> \
+ static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
+ { \
+ Transpose<MatrixType> matt(mat); \
+ return llt_inplace<EIGTYPE, Lower>::rankUpdate(matt, vec.conjugate(), sigma); \
+ } \
+};
+
+EIGEN_MKL_LLT(double, double, d)
+EIGEN_MKL_LLT(float, float, s)
+EIGEN_MKL_LLT(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_LLT(scomplex, MKL_Complex8, c)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_LLT_MKL_H
diff --git a/extern/Eigen3/Eigen/src/CholmodSupport/CMakeLists.txt b/extern/Eigen3/Eigen/src/CholmodSupport/CMakeLists.txt
new file mode 100644
index 00000000000..814dfa61313
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/CholmodSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_CholmodSupport_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_CholmodSupport_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/CholmodSupport COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h b/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h
new file mode 100644
index 00000000000..a06c429f054
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h
@@ -0,0 +1,594 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CHOLMODSUPPORT_H
+#define EIGEN_CHOLMODSUPPORT_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Scalar, typename CholmodType>
+void cholmod_configure_matrix(CholmodType& mat)
+{
+ if (internal::is_same<Scalar,float>::value)
+ {
+ mat.xtype = CHOLMOD_REAL;
+ mat.dtype = CHOLMOD_SINGLE;
+ }
+ else if (internal::is_same<Scalar,double>::value)
+ {
+ mat.xtype = CHOLMOD_REAL;
+ mat.dtype = CHOLMOD_DOUBLE;
+ }
+ else if (internal::is_same<Scalar,std::complex<float> >::value)
+ {
+ mat.xtype = CHOLMOD_COMPLEX;
+ mat.dtype = CHOLMOD_SINGLE;
+ }
+ else if (internal::is_same<Scalar,std::complex<double> >::value)
+ {
+ mat.xtype = CHOLMOD_COMPLEX;
+ mat.dtype = CHOLMOD_DOUBLE;
+ }
+ else
+ {
+ eigen_assert(false && "Scalar type not supported by CHOLMOD");
+ }
+}
+
+} // namespace internal
+
+/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object.
+ * Note that the data are shared.
+ */
+template<typename _Scalar, int _Options, typename _Index>
+cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
+{
+ typedef SparseMatrix<_Scalar,_Options,_Index> MatrixType;
+ cholmod_sparse res;
+ res.nzmax = mat.nonZeros();
+ res.nrow = mat.rows();;
+ res.ncol = mat.cols();
+ res.p = mat.outerIndexPtr();
+ res.i = mat.innerIndexPtr();
+ res.x = mat.valuePtr();
+ res.sorted = 1;
+ if(mat.isCompressed())
+ {
+ res.packed = 1;
+ }
+ else
+ {
+ res.packed = 0;
+ res.nz = mat.innerNonZeroPtr();
+ }
+
+ res.dtype = 0;
+ res.stype = -1;
+
+ if (internal::is_same<_Index,int>::value)
+ {
+ res.itype = CHOLMOD_INT;
+ }
+ else
+ {
+ eigen_assert(false && "Index type different than int is not supported yet");
+ }
+
+ // setup res.xtype
+ internal::cholmod_configure_matrix<_Scalar>(res);
+
+ res.stype = 0;
+
+ return res;
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat)
+{
+ cholmod_sparse res = viewAsCholmod(mat.const_cast_derived());
+ return res;
+}
+
+/** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix.
+ * The data are not copied but shared. */
+template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo>
+cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat)
+{
+ cholmod_sparse res = viewAsCholmod(mat.matrix().const_cast_derived());
+
+ if(UpLo==Upper) res.stype = 1;
+ if(UpLo==Lower) res.stype = -1;
+
+ return res;
+}
+
+/** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix.
+ * The data are not copied but shared. */
+template<typename Derived>
+cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)
+{
+ EIGEN_STATIC_ASSERT((internal::traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ typedef typename Derived::Scalar Scalar;
+
+ cholmod_dense res;
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ res.nzmax = res.nrow * res.ncol;
+ res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride();
+ res.x = mat.derived().data();
+ res.z = 0;
+
+ internal::cholmod_configure_matrix<Scalar>(res);
+
+ return res;
+}
+
+/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix.
+ * The data are not copied but shared. */
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> viewAsEigen(cholmod_sparse& cm)
+{
+ return MappedSparseMatrix<Scalar,Flags,Index>
+ (cm.nrow, cm.ncol, reinterpret_cast<Index*>(cm.p)[cm.ncol],
+ reinterpret_cast<Index*>(cm.p), reinterpret_cast<Index*>(cm.i),reinterpret_cast<Scalar*>(cm.x) );
+}
+
+enum CholmodMode {
+ CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt
+};
+
+
+/** \ingroup CholmodSupport_Module
+ * \class CholmodBase
+ * \brief The base class for the direct Cholesky factorization of Cholmod
+ * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT
+ */
+template<typename _MatrixType, int _UpLo, typename Derived>
+class CholmodBase : internal::noncopyable
+{
+ public:
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef MatrixType CholMatrixType;
+ typedef typename MatrixType::Index Index;
+
+ public:
+
+ CholmodBase()
+ : m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
+ {
+ cholmod_start(&m_cholmod);
+ }
+
+ CholmodBase(const MatrixType& matrix)
+ : m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
+ {
+ cholmod_start(&m_cholmod);
+ compute(matrix);
+ }
+
+ ~CholmodBase()
+ {
+ if(m_cholmodFactor)
+ cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+ cholmod_finish(&m_cholmod);
+ }
+
+ inline Index cols() const { return m_cholmodFactor->n; }
+ inline Index rows() const { return m_cholmodFactor->n; }
+
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ /** Computes the sparse Cholesky decomposition of \a matrix */
+ Derived& compute(const MatrixType& matrix)
+ {
+ analyzePattern(matrix);
+ factorize(matrix);
+ return derived();
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<CholmodBase, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<CholmodBase, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<CholmodBase, Rhs>
+ solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::sparse_solve_retval<CholmodBase, Rhs>(*this, b.derived());
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+ if(m_cholmodFactor)
+ {
+ cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+ m_cholmodFactor = 0;
+ }
+ cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
+ m_cholmodFactor = cholmod_analyze(&A, &m_cholmod);
+
+ this->m_isInitialized = true;
+ this->m_info = Success;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& matrix)
+ {
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
+ cholmod_factorize(&A, m_cholmodFactor, &m_cholmod);
+
+ this->m_info = Success;
+ m_factorizationIsOk = true;
+ }
+
+ /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations.
+ * See the Cholmod user guide for details. */
+ cholmod_common& cholmod() { return m_cholmod; }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+ {
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ const Index size = m_cholmodFactor->n;
+ eigen_assert(size==b.rows());
+
+ // note: cd stands for Cholmod Dense
+ cholmod_dense b_cd = viewAsCholmod(b.const_cast_derived());
+ cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod);
+ if(!x_cd)
+ {
+ this->m_info = NumericalIssue;
+ }
+ // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
+ dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
+ cholmod_free_dense(&x_cd, &m_cholmod);
+ }
+
+ /** \internal */
+ template<typename RhsScalar, int RhsOptions, typename RhsIndex, typename DestScalar, int DestOptions, typename DestIndex>
+ void _solve(const SparseMatrix<RhsScalar,RhsOptions,RhsIndex> &b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+ {
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ const Index size = m_cholmodFactor->n;
+ eigen_assert(size==b.rows());
+
+ // note: cs stands for Cholmod Sparse
+ cholmod_sparse b_cs = viewAsCholmod(b);
+ cholmod_sparse* x_cs = cholmod_spsolve(CHOLMOD_A, m_cholmodFactor, &b_cs, &m_cholmod);
+ if(!x_cs)
+ {
+ this->m_info = NumericalIssue;
+ }
+ // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
+ dest = viewAsEigen<DestScalar,DestOptions,DestIndex>(*x_cs);
+ cholmod_free_sparse(&x_cs, &m_cholmod);
+ }
+ #endif // EIGEN_PARSED_BY_DOXYGEN
+
+ template<typename Stream>
+ void dumpMemory(Stream& s)
+ {}
+
+ protected:
+ mutable cholmod_common m_cholmod;
+ cholmod_factor* m_cholmodFactor;
+ mutable ComputationInfo m_info;
+ bool m_isInitialized;
+ int m_factorizationIsOk;
+ int m_analysisIsOk;
+};
+
+/** \ingroup CholmodSupport_Module
+ * \class CholmodSimplicialLLT
+ * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization
+ * using the Cholmod library.
+ * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Thefore, it has little practical interest.
+ * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+ *
+ * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLLT
+ */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> >
+{
+ typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base;
+ using Base::m_cholmod;
+
+ public:
+
+ typedef _MatrixType MatrixType;
+
+ CholmodSimplicialLLT() : Base() { init(); }
+
+ CholmodSimplicialLLT(const MatrixType& matrix) : Base()
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~CholmodSimplicialLLT() {}
+ protected:
+ void init()
+ {
+ m_cholmod.final_asis = 0;
+ m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+ m_cholmod.final_ll = 1;
+ }
+};
+
+
+/** \ingroup CholmodSupport_Module
+ * \class CholmodSimplicialLDLT
+ * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization
+ * using the Cholmod library.
+ * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Thefore, it has little practical interest.
+ * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+ *
+ * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLDLT
+ */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> >
+{
+ typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base;
+ using Base::m_cholmod;
+
+ public:
+
+ typedef _MatrixType MatrixType;
+
+ CholmodSimplicialLDLT() : Base() { init(); }
+
+ CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~CholmodSimplicialLDLT() {}
+ protected:
+ void init()
+ {
+ m_cholmod.final_asis = 1;
+ m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+ }
+};
+
+/** \ingroup CholmodSupport_Module
+ * \class CholmodSupernodalLLT
+ * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization
+ * using the Cholmod library.
+ * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM.
+ * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> >
+{
+ typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base;
+ using Base::m_cholmod;
+
+ public:
+
+ typedef _MatrixType MatrixType;
+
+ CholmodSupernodalLLT() : Base() { init(); }
+
+ CholmodSupernodalLLT(const MatrixType& matrix) : Base()
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~CholmodSupernodalLLT() {}
+ protected:
+ void init()
+ {
+ m_cholmod.final_asis = 1;
+ m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
+ }
+};
+
+/** \ingroup CholmodSupport_Module
+ * \class CholmodDecomposition
+ * \brief A general Cholesky factorization and solver based on Cholmod
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization
+ * using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+ * X and B can be either dense or sparse.
+ *
+ * This variant permits to change the underlying Cholesky method at runtime.
+ * On the other hand, it does not provide access to the result of the factorization.
+ * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> >
+{
+ typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base;
+ using Base::m_cholmod;
+
+ public:
+
+ typedef _MatrixType MatrixType;
+
+ CholmodDecomposition() : Base() { init(); }
+
+ CholmodDecomposition(const MatrixType& matrix) : Base()
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~CholmodDecomposition() {}
+
+ void setMode(CholmodMode mode)
+ {
+ switch(mode)
+ {
+ case CholmodAuto:
+ m_cholmod.final_asis = 1;
+ m_cholmod.supernodal = CHOLMOD_AUTO;
+ break;
+ case CholmodSimplicialLLt:
+ m_cholmod.final_asis = 0;
+ m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+ m_cholmod.final_ll = 1;
+ break;
+ case CholmodSupernodalLLt:
+ m_cholmod.final_asis = 1;
+ m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
+ break;
+ case CholmodLDLt:
+ m_cholmod.final_asis = 1;
+ m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+ break;
+ default:
+ break;
+ }
+ }
+ protected:
+ void init()
+ {
+ m_cholmod.final_asis = 1;
+ m_cholmod.supernodal = CHOLMOD_AUTO;
+ }
+};
+
+namespace internal {
+
+template<typename _MatrixType, int _UpLo, typename Derived, typename Rhs>
+struct solve_retval<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+ : solve_retval_base<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+{
+ typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+template<typename _MatrixType, int _UpLo, typename Derived, typename Rhs>
+struct sparse_solve_retval<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+ : sparse_solve_retval_base<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+{
+ typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CHOLMODSUPPORT_H
diff --git a/extern/Eigen3/Eigen/src/Core/Assign_MKL.h b/extern/Eigen3/Eigen/src/Core/Assign_MKL.h
new file mode 100644
index 00000000000..f96b0eb229a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Assign_MKL.h
@@ -0,0 +1,221 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin()
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_ASSIGN_VML_H
+#define EIGEN_ASSIGN_VML_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Op> struct vml_call
+{ enum { IsSupported = 0 }; };
+
+template<typename Dst, typename Src, typename UnaryOp>
+class vml_assign_traits
+{
+ private:
+ enum {
+ DstHasDirectAccess = Dst::Flags & DirectAccessBit,
+ SrcHasDirectAccess = Src::Flags & DirectAccessBit,
+
+ StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)),
+ InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime)
+ : int(Dst::Flags)&RowMajorBit ? int(Dst::ColsAtCompileTime)
+ : int(Dst::RowsAtCompileTime),
+ InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime)
+ : int(Dst::Flags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime)
+ : int(Dst::MaxRowsAtCompileTime),
+ MaxSizeAtCompileTime = Dst::SizeAtCompileTime,
+
+ MightEnableVml = vml_call<UnaryOp>::IsSupported && StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess
+ && Src::InnerStrideAtCompileTime==1 && Dst::InnerStrideAtCompileTime==1,
+ MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit),
+ VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize,
+ LargeEnough = VmlSize==Dynamic || VmlSize>=EIGEN_MKL_VML_THRESHOLD,
+ MayEnableVml = MightEnableVml && LargeEnough,
+ MayLinearize = MayEnableVml && MightLinearize
+ };
+ public:
+ enum {
+ Traversal = MayLinearize ? LinearVectorizedTraversal
+ : MayEnableVml ? InnerVectorizedTraversal
+ : DefaultTraversal
+ };
+};
+
+template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling,
+ int VmlTraversal = vml_assign_traits<Derived1, Derived2, UnaryOp>::Traversal >
+struct vml_assign_impl
+ : assign_impl<Derived1, Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>
+{
+};
+
+template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling>
+struct vml_assign_impl<Derived1, Derived2, UnaryOp, Traversal, Unrolling, InnerVectorizedTraversal>
+{
+ typedef typename Derived1::Scalar Scalar;
+ typedef typename Derived1::Index Index;
+ static inline void run(Derived1& dst, const CwiseUnaryOp<UnaryOp, Derived2>& src)
+ {
+ // in case we want to (or have to) skip VML at runtime we can call:
+ // assign_impl<Derived1,Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>::run(dst,src);
+ const Index innerSize = dst.innerSize();
+ const Index outerSize = dst.outerSize();
+ for(Index outer = 0; outer < outerSize; ++outer) {
+ const Scalar *src_ptr = src.IsRowMajor ? &(src.nestedExpression().coeffRef(outer,0)) :
+ &(src.nestedExpression().coeffRef(0, outer));
+ Scalar *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer));
+ vml_call<UnaryOp>::run(src.functor(), innerSize, src_ptr, dst_ptr );
+ }
+ }
+};
+
+template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling>
+struct vml_assign_impl<Derived1, Derived2, UnaryOp, Traversal, Unrolling, LinearVectorizedTraversal>
+{
+ static inline void run(Derived1& dst, const CwiseUnaryOp<UnaryOp, Derived2>& src)
+ {
+ // in case we want to (or have to) skip VML at runtime we can call:
+ // assign_impl<Derived1,Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>::run(dst,src);
+ vml_call<UnaryOp>::run(src.functor(), dst.size(), src.nestedExpression().data(), dst.data() );
+ }
+};
+
+// Macroses
+
+#define EIGEN_MKL_VML_SPECIALIZE_ASSIGN(TRAVERSAL,UNROLLING) \
+ template<typename Derived1, typename Derived2, typename UnaryOp> \
+ struct assign_impl<Derived1, Eigen::CwiseUnaryOp<UnaryOp, Derived2>, TRAVERSAL, UNROLLING, Specialized> { \
+ static inline void run(Derived1 &dst, const Eigen::CwiseUnaryOp<UnaryOp, Derived2> &src) { \
+ vml_assign_impl<Derived1,Derived2,UnaryOp,TRAVERSAL,UNROLLING>::run(dst, src); \
+ } \
+ };
+
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,InnerUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,InnerUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(SliceVectorizedTraversal,NoUnrolling)
+
+
+#if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1)
+#define EIGEN_MKL_VML_MODE VML_HA
+#else
+#define EIGEN_MKL_VML_MODE VML_LA
+#endif
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE) \
+ template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > { \
+ enum { IsSupported = 1 }; \
+ static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& /*func*/, \
+ int size, const EIGENTYPE* src, EIGENTYPE* dst) { \
+ VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst); \
+ } \
+ };
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE) \
+ template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > { \
+ enum { IsSupported = 1 }; \
+ static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& /*func*/, \
+ int size, const EIGENTYPE* src, EIGENTYPE* dst) { \
+ MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE; \
+ VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst, vmlMode); \
+ } \
+ };
+
+#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE) \
+ template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > { \
+ enum { IsSupported = 1 }; \
+ static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& func, \
+ int size, const EIGENTYPE* src, EIGENTYPE* dst) { \
+ EIGENTYPE exponent = func.m_exponent; \
+ MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE; \
+ VMLOP(&size, (const VMLTYPE*)src, (const VMLTYPE*)&exponent, \
+ (VMLTYPE*)dst, &vmlMode); \
+ } \
+ };
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vs##VMLOP, float, float) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vd##VMLOP, double, double)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vc##VMLOP, scomplex, MKL_Complex8) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vz##VMLOP, dcomplex, MKL_Complex16)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP)
+
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vms##VMLOP, float, float) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmd##VMLOP, double, double)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmc##VMLOP, scomplex, MKL_Complex8) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmz##VMLOP, dcomplex, MKL_Complex16)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(EIGENOP, VMLOP) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP) \
+ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP)
+
+
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sin, Sin)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(asin, Asin)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(cos, Cos)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(acos, Acos)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(tan, Tan)
+//EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs, Abs)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(exp, Exp)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(log, Ln)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sqrt, Sqrt)
+
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr)
+
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmspowx_, float, float)
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdpowx_, double, double)
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcpowx_, scomplex, MKL_Complex8)
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzpowx_, dcomplex, MKL_Complex16)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_ASSIGN_VML_H
diff --git a/extern/Eigen3/Eigen/src/Core/CMakeLists.txt b/extern/Eigen3/Eigen/src/Core/CMakeLists.txt
new file mode 100644
index 00000000000..2346fc2bbca
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/CMakeLists.txt
@@ -0,0 +1,10 @@
+FILE(GLOB Eigen_Core_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Core_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core COMPONENT Devel
+ )
+
+ADD_SUBDIRECTORY(products)
+ADD_SUBDIRECTORY(util)
+ADD_SUBDIRECTORY(arch)
diff --git a/extern/Eigen3/Eigen/src/Core/GeneralProduct.h b/extern/Eigen3/Eigen/src/Core/GeneralProduct.h
new file mode 100644
index 00000000000..4fbe1f14bbe
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/GeneralProduct.h
@@ -0,0 +1,628 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_GENERAL_PRODUCT_H
+#define EIGEN_GENERAL_PRODUCT_H
+
+namespace Eigen {
+
+/** \class GeneralProduct
+ * \ingroup Core_Module
+ *
+ * \brief Expression of the product of two general matrices or vectors
+ *
+ * \param LhsNested the type used to store the left-hand side
+ * \param RhsNested the type used to store the right-hand side
+ * \param ProductMode the type of the product
+ *
+ * This class represents an expression of the product of two general matrices.
+ * We call a general matrix, a dense matrix with full storage. For instance,
+ * This excludes triangular, selfadjoint, and sparse matrices.
+ * It is the return type of the operator* between general matrices. Its template
+ * arguments are determined automatically by ProductReturnType. Therefore,
+ * GeneralProduct should never be used direclty. To determine the result type of a
+ * function which involves a matrix product, use ProductReturnType::Type.
+ *
+ * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
+ */
+template<typename Lhs, typename Rhs, int ProductType = internal::product_type<Lhs,Rhs>::value>
+class GeneralProduct;
+
+enum {
+ Large = 2,
+ Small = 3
+};
+
+namespace internal {
+
+template<int Rows, int Cols, int Depth> struct product_type_selector;
+
+template<int Size, int MaxSize> struct product_size_category
+{
+ enum { is_large = MaxSize == Dynamic ||
+ Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD,
+ value = is_large ? Large
+ : Size == 1 ? 1
+ : Small
+ };
+};
+
+template<typename Lhs, typename Rhs> struct product_type
+{
+ typedef typename remove_all<Lhs>::type _Lhs;
+ typedef typename remove_all<Rhs>::type _Rhs;
+ enum {
+ MaxRows = _Lhs::MaxRowsAtCompileTime,
+ Rows = _Lhs::RowsAtCompileTime,
+ MaxCols = _Rhs::MaxColsAtCompileTime,
+ Cols = _Rhs::ColsAtCompileTime,
+ MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime,
+ _Rhs::MaxRowsAtCompileTime),
+ Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime,
+ _Rhs::RowsAtCompileTime),
+ LargeThreshold = EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+ };
+
+ // the splitting into different lines of code here, introducing the _select enums and the typedef below,
+ // is to work around an internal compiler error with gcc 4.1 and 4.2.
+private:
+ enum {
+ rows_select = product_size_category<Rows,MaxRows>::value,
+ cols_select = product_size_category<Cols,MaxCols>::value,
+ depth_select = product_size_category<Depth,MaxDepth>::value
+ };
+ typedef product_type_selector<rows_select, cols_select, depth_select> selector;
+
+public:
+ enum {
+ value = selector::ret
+ };
+#ifdef EIGEN_DEBUG_PRODUCT
+ static void debug()
+ {
+ EIGEN_DEBUG_VAR(Rows);
+ EIGEN_DEBUG_VAR(Cols);
+ EIGEN_DEBUG_VAR(Depth);
+ EIGEN_DEBUG_VAR(rows_select);
+ EIGEN_DEBUG_VAR(cols_select);
+ EIGEN_DEBUG_VAR(depth_select);
+ EIGEN_DEBUG_VAR(value);
+ }
+#endif
+};
+
+
+/* The following allows to select the kind of product at compile time
+ * based on the three dimensions of the product.
+ * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */
+// FIXME I'm not sure the current mapping is the ideal one.
+template<int M, int N> struct product_type_selector<M,N,1> { enum { ret = OuterProduct }; };
+template<int Depth> struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; };
+template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; };
+template<> struct product_type_selector<Small,1, Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<Small,Small,Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<Small, Small, 1> { enum { ret = LazyCoeffBasedProductMode }; };
+template<> struct product_type_selector<Small, Large, 1> { enum { ret = LazyCoeffBasedProductMode }; };
+template<> struct product_type_selector<Large, Small, 1> { enum { ret = LazyCoeffBasedProductMode }; };
+template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; };
+template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<Large,1, Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<Large,1, Large> { enum { ret = GemvProduct }; };
+template<> struct product_type_selector<Small,1, Large> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<Small,Small,Large> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Large,Small,Large> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Small,Large,Large> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Large,Large,Large> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Large,Small,Small> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Small,Large,Small> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Large,Large,Small> { enum { ret = GemmProduct }; };
+
+} // end namespace internal
+
+/** \class ProductReturnType
+ * \ingroup Core_Module
+ *
+ * \brief Helper class to get the correct and optimized returned type of operator*
+ *
+ * \param Lhs the type of the left-hand side
+ * \param Rhs the type of the right-hand side
+ * \param ProductMode the type of the product (determined automatically by internal::product_mode)
+ *
+ * This class defines the typename Type representing the optimized product expression
+ * between two matrix expressions. In practice, using ProductReturnType<Lhs,Rhs>::Type
+ * is the recommended way to define the result type of a function returning an expression
+ * which involve a matrix product. The class Product should never be
+ * used directly.
+ *
+ * \sa class Product, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
+ */
+template<typename Lhs, typename Rhs, int ProductType>
+struct ProductReturnType
+{
+ // TODO use the nested type to reduce instanciations ????
+// typedef typename internal::nested<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
+// typedef typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
+
+ typedef GeneralProduct<Lhs/*Nested*/, Rhs/*Nested*/, ProductType> Type;
+};
+
+template<typename Lhs, typename Rhs>
+struct ProductReturnType<Lhs,Rhs,CoeffBasedProductMode>
+{
+ typedef typename internal::nested<Lhs, Rhs::ColsAtCompileTime, typename internal::plain_matrix_type<Lhs>::type >::type LhsNested;
+ typedef typename internal::nested<Rhs, Lhs::RowsAtCompileTime, typename internal::plain_matrix_type<Rhs>::type >::type RhsNested;
+ typedef CoeffBasedProduct<LhsNested, RhsNested, EvalBeforeAssigningBit | EvalBeforeNestingBit> Type;
+};
+
+template<typename Lhs, typename Rhs>
+struct ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
+{
+ typedef typename internal::nested<Lhs, Rhs::ColsAtCompileTime, typename internal::plain_matrix_type<Lhs>::type >::type LhsNested;
+ typedef typename internal::nested<Rhs, Lhs::RowsAtCompileTime, typename internal::plain_matrix_type<Rhs>::type >::type RhsNested;
+ typedef CoeffBasedProduct<LhsNested, RhsNested, NestByRefBit> Type;
+};
+
+// this is a workaround for sun CC
+template<typename Lhs, typename Rhs>
+struct LazyProductReturnType : public ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
+{};
+
+/***********************************************************************
+* Implementation of Inner Vector Vector Product
+***********************************************************************/
+
+// FIXME : maybe the "inner product" could return a Scalar
+// instead of a 1x1 matrix ??
+// Pro: more natural for the user
+// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix
+// product ends up to a row-vector times col-vector product... To tackle this use
+// case, we could have a specialization for Block<MatrixType,1,1> with: operator=(Scalar x);
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,InnerProduct> >
+ : traits<Matrix<typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1> >
+{};
+
+}
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, InnerProduct>
+ : internal::no_assignment_operator,
+ public Matrix<typename internal::scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1>
+{
+ typedef Matrix<typename internal::scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1> Base;
+ public:
+ GeneralProduct(const Lhs& lhs, const Rhs& rhs)
+ {
+ EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();
+ }
+
+ /** Convertion to scalar */
+ operator const typename Base::Scalar() const {
+ return Base::coeff(0,0);
+ }
+};
+
+/***********************************************************************
+* Implementation of Outer Vector Vector Product
+***********************************************************************/
+
+namespace internal {
+template<int StorageOrder> struct outer_product_selector;
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,OuterProduct> >
+ : traits<ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs> >
+{};
+
+}
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, OuterProduct>
+ : public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
+
+ GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {
+ EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ }
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha);
+ }
+};
+
+namespace internal {
+
+template<> struct outer_product_selector<ColMajor> {
+ template<typename ProductType, typename Dest>
+ static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
+ typedef typename Dest::Index Index;
+ // FIXME make sure lhs is sequentially stored
+ // FIXME not very good if rhs is real and lhs complex while alpha is real too
+ const Index cols = dest.cols();
+ for (Index j=0; j<cols; ++j)
+ dest.col(j) += (alpha * prod.rhs().coeff(j)) * prod.lhs();
+ }
+};
+
+template<> struct outer_product_selector<RowMajor> {
+ template<typename ProductType, typename Dest>
+ static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
+ typedef typename Dest::Index Index;
+ // FIXME make sure rhs is sequentially stored
+ // FIXME not very good if lhs is real and rhs complex while alpha is real too
+ const Index rows = dest.rows();
+ for (Index i=0; i<rows; ++i)
+ dest.row(i) += (alpha * prod.lhs().coeff(i)) * prod.rhs();
+ }
+};
+
+} // end namespace internal
+
+/***********************************************************************
+* Implementation of General Matrix Vector Product
+***********************************************************************/
+
+/* According to the shape/flags of the matrix we have to distinghish 3 different cases:
+ * 1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine
+ * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine
+ * 3 - all other cases are handled using a simple loop along the outer-storage direction.
+ * Therefore we need a lower level meta selector.
+ * Furthermore, if the matrix is the rhs, then the product has to be transposed.
+ */
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,GemvProduct> >
+ : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemvProduct>, Lhs, Rhs> >
+{};
+
+template<int Side, int StorageOrder, bool BlasCompatible>
+struct gemv_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, GemvProduct>
+ : public ProductBase<GeneralProduct<Lhs,Rhs,GemvProduct>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
+
+ typedef typename Lhs::Scalar LhsScalar;
+ typedef typename Rhs::Scalar RhsScalar;
+
+ GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {
+// EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::Scalar, typename Rhs::Scalar>::value),
+// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ }
+
+ enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight };
+ typedef typename internal::conditional<int(Side)==OnTheRight,_LhsNested,_RhsNested>::type MatrixType;
+
+ template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ {
+ eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols());
+ internal::gemv_selector<Side,(int(MatrixType::Flags)&RowMajorBit) ? RowMajor : ColMajor,
+ bool(internal::blas_traits<MatrixType>::HasUsableDirectAccess)>::run(*this, dst, alpha);
+ }
+};
+
+namespace internal {
+
+// The vector is on the left => transposition
+template<int StorageOrder, bool BlasCompatible>
+struct gemv_selector<OnTheLeft,StorageOrder,BlasCompatible>
+{
+ template<typename ProductType, typename Dest>
+ static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ {
+ Transpose<Dest> destT(dest);
+ enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
+ gemv_selector<OnTheRight,OtherStorageOrder,BlasCompatible>
+ ::run(GeneralProduct<Transpose<const typename ProductType::_RhsNested>,Transpose<const typename ProductType::_LhsNested>, GemvProduct>
+ (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha);
+ }
+};
+
+template<typename Scalar,int Size,int MaxSize,bool Cond> struct gemv_static_vector_if;
+
+template<typename Scalar,int Size,int MaxSize>
+struct gemv_static_vector_if<Scalar,Size,MaxSize,false>
+{
+ EIGEN_STRONG_INLINE Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; }
+};
+
+template<typename Scalar,int Size>
+struct gemv_static_vector_if<Scalar,Size,Dynamic,true>
+{
+ EIGEN_STRONG_INLINE Scalar* data() { return 0; }
+};
+
+template<typename Scalar,int Size,int MaxSize>
+struct gemv_static_vector_if<Scalar,Size,MaxSize,true>
+{
+ #if EIGEN_ALIGN_STATICALLY
+ internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize),0> m_data;
+ EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; }
+ #else
+ // Some architectures cannot align on the stack,
+ // => let's manually enforce alignment by allocating more data and return the address of the first aligned element.
+ enum {
+ ForceAlignment = internal::packet_traits<Scalar>::Vectorizable,
+ PacketSize = internal::packet_traits<Scalar>::size
+ };
+ internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize)+(ForceAlignment?PacketSize:0),0> m_data;
+ EIGEN_STRONG_INLINE Scalar* data() {
+ return ForceAlignment
+ ? reinterpret_cast<Scalar*>((reinterpret_cast<size_t>(m_data.array) & ~(size_t(15))) + 16)
+ : m_data.array;
+ }
+ #endif
+};
+
+template<> struct gemv_selector<OnTheRight,ColMajor,true>
+{
+ template<typename ProductType, typename Dest>
+ static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ {
+ typedef typename ProductType::Index Index;
+ typedef typename ProductType::LhsScalar LhsScalar;
+ typedef typename ProductType::RhsScalar RhsScalar;
+ typedef typename ProductType::Scalar ResScalar;
+ typedef typename ProductType::RealScalar RealScalar;
+ typedef typename ProductType::ActualLhsType ActualLhsType;
+ typedef typename ProductType::ActualRhsType ActualRhsType;
+ typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+ typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+ typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
+
+ ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs());
+ ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+ ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+ * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+ enum {
+ // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
+ // on, the other hand it is good for the cache to pack the vector anyways...
+ EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
+ ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
+ MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
+ };
+
+ gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
+
+ bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0));
+ bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
+
+ RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
+
+ ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
+ evalToDest ? dest.data() : static_dest.data());
+
+ if(!evalToDest)
+ {
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ int size = dest.size();
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ #endif
+ if(!alphaIsCompatible)
+ {
+ MappedDest(actualDestPtr, dest.size()).setZero();
+ compatibleAlpha = RhsScalar(1);
+ }
+ else
+ MappedDest(actualDestPtr, dest.size()) = dest;
+ }
+
+ general_matrix_vector_product
+ <Index,LhsScalar,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run(
+ actualLhs.rows(), actualLhs.cols(),
+ actualLhs.data(), actualLhs.outerStride(),
+ actualRhs.data(), actualRhs.innerStride(),
+ actualDestPtr, 1,
+ compatibleAlpha);
+
+ if (!evalToDest)
+ {
+ if(!alphaIsCompatible)
+ dest += actualAlpha * MappedDest(actualDestPtr, dest.size());
+ else
+ dest = MappedDest(actualDestPtr, dest.size());
+ }
+ }
+};
+
+template<> struct gemv_selector<OnTheRight,RowMajor,true>
+{
+ template<typename ProductType, typename Dest>
+ static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ {
+ typedef typename ProductType::LhsScalar LhsScalar;
+ typedef typename ProductType::RhsScalar RhsScalar;
+ typedef typename ProductType::Scalar ResScalar;
+ typedef typename ProductType::Index Index;
+ typedef typename ProductType::ActualLhsType ActualLhsType;
+ typedef typename ProductType::ActualRhsType ActualRhsType;
+ typedef typename ProductType::_ActualRhsType _ActualRhsType;
+ typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+ typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+
+ typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+ typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+ ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+ * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+ enum {
+ // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
+ // on, the other hand it is good for the cache to pack the vector anyways...
+ DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1
+ };
+
+ gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
+
+ ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),
+ DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
+
+ if(!DirectlyUseRhs)
+ {
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ int size = actualRhs.size();
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ #endif
+ Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
+ }
+
+ general_matrix_vector_product
+ <Index,LhsScalar,RowMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run(
+ actualLhs.rows(), actualLhs.cols(),
+ actualLhs.data(), actualLhs.outerStride(),
+ actualRhsPtr, 1,
+ dest.data(), dest.innerStride(),
+ actualAlpha);
+ }
+};
+
+template<> struct gemv_selector<OnTheRight,ColMajor,false>
+{
+ template<typename ProductType, typename Dest>
+ static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ {
+ typedef typename Dest::Index Index;
+ // TODO makes sure dest is sequentially stored in memory, otherwise use a temp
+ const Index size = prod.rhs().rows();
+ for(Index k=0; k<size; ++k)
+ dest += (alpha*prod.rhs().coeff(k)) * prod.lhs().col(k);
+ }
+};
+
+template<> struct gemv_selector<OnTheRight,RowMajor,false>
+{
+ template<typename ProductType, typename Dest>
+ static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ {
+ typedef typename Dest::Index Index;
+ // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
+ const Index rows = prod.rows();
+ for(Index i=0; i<rows; ++i)
+ dest.coeffRef(i) += alpha * (prod.lhs().row(i).cwiseProduct(prod.rhs().transpose())).sum();
+ }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of matrix base methods
+***************************************************************************/
+
+/** \returns the matrix product of \c *this and \a other.
+ *
+ * \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().
+ *
+ * \sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*()
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename ProductReturnType<Derived, OtherDerived>::Type
+MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ // A note regarding the function declaration: In MSVC, this function will sometimes
+ // not be inlined since DenseStorage is an unwindable object for dynamic
+ // matrices and product types are holding a member to store the result.
+ // Thus it does not help tagging this function with EIGEN_STRONG_INLINE.
+ enum {
+ ProductIsValid = Derived::ColsAtCompileTime==Dynamic
+ || OtherDerived::RowsAtCompileTime==Dynamic
+ || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
+ AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwiseProduct(v2)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+#ifdef EIGEN_DEBUG_PRODUCT
+ internal::product_type<Derived,OtherDerived>::debug();
+#endif
+ return typename ProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation.
+ *
+ * The returned product will behave like any other expressions: the coefficients of the product will be
+ * computed once at a time as requested. This might be useful in some extremely rare cases when only
+ * a small and no coherent fraction of the result's coefficients have to be computed.
+ *
+ * \warning This version of the matrix product can be much much slower. So use it only if you know
+ * what you are doing and that you measured a true speed improvement.
+ *
+ * \sa operator*(const MatrixBase&)
+ */
+template<typename Derived>
+template<typename OtherDerived>
+const typename LazyProductReturnType<Derived,OtherDerived>::Type
+MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
+{
+ enum {
+ ProductIsValid = Derived::ColsAtCompileTime==Dynamic
+ || OtherDerived::RowsAtCompileTime==Dynamic
+ || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
+ AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwiseProduct(v2)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+
+ return typename LazyProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_PRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h b/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
index baab5653619..f9c43273232 100644
--- a/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
+++ b/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
@@ -49,7 +49,7 @@ EIGEN_ALWAYS_INLINE void check_rows_cols_for_overflow(Index rows, Index cols)
throw_std_bad_alloc();
}
-template <typename Derived, typename OtherDerived = Derived, bool IsVector = static_cast<bool>(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl;
+template <typename Derived, typename OtherDerived = Derived, bool IsVector = bool(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl;
template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl;
diff --git a/extern/Eigen3/Eigen/src/Core/arch/AltiVec/CMakeLists.txt b/extern/Eigen3/Eigen/src/Core/arch/AltiVec/CMakeLists.txt
new file mode 100644
index 00000000000..9f8d2e9c467
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/AltiVec/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_arch_AltiVec_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Core_arch_AltiVec_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/arch/AltiVec COMPONENT Devel
+)
diff --git a/extern/Eigen3/Eigen/src/Core/arch/CMakeLists.txt b/extern/Eigen3/Eigen/src/Core/arch/CMakeLists.txt
new file mode 100644
index 00000000000..8456dec1581
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/CMakeLists.txt
@@ -0,0 +1,4 @@
+ADD_SUBDIRECTORY(SSE)
+ADD_SUBDIRECTORY(AltiVec)
+ADD_SUBDIRECTORY(NEON)
+ADD_SUBDIRECTORY(Default)
diff --git a/extern/Eigen3/Eigen/src/Core/arch/Default/CMakeLists.txt b/extern/Eigen3/Eigen/src/Core/arch/Default/CMakeLists.txt
new file mode 100644
index 00000000000..339c091d156
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/Default/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_arch_Default_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Core_arch_Default_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/arch/Default COMPONENT Devel
+)
diff --git a/extern/Eigen3/Eigen/src/Core/arch/NEON/CMakeLists.txt b/extern/Eigen3/Eigen/src/Core/arch/NEON/CMakeLists.txt
new file mode 100644
index 00000000000..fd4d4af50cb
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/NEON/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_arch_NEON_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Core_arch_NEON_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/arch/NEON COMPONENT Devel
+)
diff --git a/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h b/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
index 7868539ba36..1eb082a5b06 100644
--- a/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -160,7 +160,8 @@ template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, co
}
// for some weird raisons, it has to be overloaded for packet of integers
-template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
+template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vmlaq_f32(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); }
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }
diff --git a/extern/Eigen3/Eigen/src/Core/arch/SSE/CMakeLists.txt b/extern/Eigen3/Eigen/src/Core/arch/SSE/CMakeLists.txt
new file mode 100644
index 00000000000..46ea7cc623c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/SSE/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_arch_SSE_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Core_arch_SSE_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/arch/SSE COMPONENT Devel
+)
diff --git a/extern/Eigen3/Eigen/src/Core/products/CMakeLists.txt b/extern/Eigen3/Eigen/src/Core/products/CMakeLists.txt
new file mode 100644
index 00000000000..21fc94ae38a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_Product_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Core_Product_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/products COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h
new file mode 100644
index 00000000000..3deed068e39
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h
@@ -0,0 +1,146 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Level 3 BLAS SYRK/HERK implementation.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_MKL_H
+#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_MKL_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <typename Index, typename Scalar, int AStorageOrder, bool ConjugateA, int ResStorageOrder, int UpLo>
+struct general_matrix_matrix_rankupdate :
+ general_matrix_matrix_triangular_product<
+ Index,Scalar,AStorageOrder,ConjugateA,Scalar,AStorageOrder,ConjugateA,ResStorageOrder,UpLo,BuiltIn> {};
+
+
+// try to go to BLAS specialization
+#define EIGEN_MKL_RANKUPDATE_SPECIALIZE(Scalar) \
+template <typename Index, int LhsStorageOrder, bool ConjugateLhs, \
+ int RhsStorageOrder, bool ConjugateRhs, int UpLo> \
+struct general_matrix_matrix_triangular_product<Index,Scalar,LhsStorageOrder,ConjugateLhs, \
+ Scalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo,Specialized> { \
+ static EIGEN_STRONG_INLINE void run(Index size, Index depth,const Scalar* lhs, Index lhsStride, \
+ const Scalar* rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha) \
+ { \
+ if (lhs==rhs) { \
+ general_matrix_matrix_rankupdate<Index,Scalar,LhsStorageOrder,ConjugateLhs,ColMajor,UpLo> \
+ ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha); \
+ } else { \
+ general_matrix_matrix_triangular_product<Index, \
+ Scalar, LhsStorageOrder, ConjugateLhs, \
+ Scalar, RhsStorageOrder, ConjugateRhs, \
+ ColMajor, UpLo, BuiltIn> \
+ ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha); \
+ } \
+ } \
+};
+
+EIGEN_MKL_RANKUPDATE_SPECIALIZE(double)
+//EIGEN_MKL_RANKUPDATE_SPECIALIZE(dcomplex)
+EIGEN_MKL_RANKUPDATE_SPECIALIZE(float)
+//EIGEN_MKL_RANKUPDATE_SPECIALIZE(scomplex)
+
+// SYRK for float/double
+#define EIGEN_MKL_RANKUPDATE_R(EIGTYPE, MKLTYPE, MKLFUNC) \
+template <typename Index, int AStorageOrder, bool ConjugateA, int UpLo> \
+struct general_matrix_matrix_rankupdate<Index,EIGTYPE,AStorageOrder,ConjugateA,ColMajor,UpLo> { \
+ enum { \
+ IsLower = (UpLo&Lower) == Lower, \
+ LowUp = IsLower ? Lower : Upper, \
+ conjA = ((AStorageOrder==ColMajor) && ConjugateA) ? 1 : 0 \
+ }; \
+ static EIGEN_STRONG_INLINE void run(Index size, Index depth,const EIGTYPE* lhs, Index lhsStride, \
+ const EIGTYPE* rhs, Index rhsStride, EIGTYPE* res, Index resStride, EIGTYPE alpha) \
+ { \
+ /* typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs;*/ \
+\
+ MKL_INT lda=lhsStride, ldc=resStride, n=size, k=depth; \
+ char uplo=(IsLower) ? 'L' : 'U', trans=(AStorageOrder==RowMajor) ? 'T':'N'; \
+ MKLTYPE alpha_, beta_; \
+\
+/* Set alpha_ & beta_ */ \
+ assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); \
+ assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(beta_, EIGTYPE(1)); \
+ MKLFUNC(&uplo, &trans, &n, &k, &alpha_, lhs, &lda, &beta_, res, &ldc); \
+ } \
+};
+
+// HERK for complex data
+#define EIGEN_MKL_RANKUPDATE_C(EIGTYPE, MKLTYPE, RTYPE, MKLFUNC) \
+template <typename Index, int AStorageOrder, bool ConjugateA, int UpLo> \
+struct general_matrix_matrix_rankupdate<Index,EIGTYPE,AStorageOrder,ConjugateA,ColMajor,UpLo> { \
+ enum { \
+ IsLower = (UpLo&Lower) == Lower, \
+ LowUp = IsLower ? Lower : Upper, \
+ conjA = (((AStorageOrder==ColMajor) && ConjugateA) || ((AStorageOrder==RowMajor) && !ConjugateA)) ? 1 : 0 \
+ }; \
+ static EIGEN_STRONG_INLINE void run(Index size, Index depth,const EIGTYPE* lhs, Index lhsStride, \
+ const EIGTYPE* rhs, Index rhsStride, EIGTYPE* res, Index resStride, EIGTYPE alpha) \
+ { \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, AStorageOrder> MatrixType; \
+\
+ MKL_INT lda=lhsStride, ldc=resStride, n=size, k=depth; \
+ char uplo=(IsLower) ? 'L' : 'U', trans=(AStorageOrder==RowMajor) ? 'C':'N'; \
+ RTYPE alpha_, beta_; \
+ const EIGTYPE* a_ptr; \
+\
+/* Set alpha_ & beta_ */ \
+/* assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); */\
+/* assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(beta_, EIGTYPE(1));*/ \
+ alpha_ = alpha.real(); \
+ beta_ = 1.0; \
+/* Copy with conjugation in some cases*/ \
+ MatrixType a; \
+ if (conjA) { \
+ Map<const MatrixType, 0, OuterStride<> > mapA(lhs,n,k,OuterStride<>(lhsStride)); \
+ a = mapA.conjugate(); \
+ lda = a.outerStride(); \
+ a_ptr = a.data(); \
+ } else a_ptr=lhs; \
+ MKLFUNC(&uplo, &trans, &n, &k, &alpha_, (MKLTYPE*)a_ptr, &lda, &beta_, (MKLTYPE*)res, &ldc); \
+ } \
+};
+
+
+EIGEN_MKL_RANKUPDATE_R(double, double, dsyrk)
+EIGEN_MKL_RANKUPDATE_R(float, float, ssyrk)
+
+//EIGEN_MKL_RANKUPDATE_C(dcomplex, MKL_Complex16, double, zherk)
+//EIGEN_MKL_RANKUPDATE_C(scomplex, MKL_Complex8, double, cherk)
+
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h
new file mode 100644
index 00000000000..060af328ebe
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h
@@ -0,0 +1,118 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * General matrix-matrix product functionality based on ?GEMM.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_GENERAL_MATRIX_MATRIX_MKL_H
+#define EIGEN_GENERAL_MATRIX_MATRIX_MKL_H
+
+namespace Eigen {
+
+namespace internal {
+
+/**********************************************************************
+* This file implements general matrix-matrix multiplication using BLAS
+* gemm function via partial specialization of
+* general_matrix_matrix_product::run(..) method for float, double,
+* std::complex<float> and std::complex<double> types
+**********************************************************************/
+
+// gemm specialization
+
+#define GEMM_SPECIALIZATION(EIGTYPE, EIGPREFIX, MKLTYPE, MKLPREFIX) \
+template< \
+ typename Index, \
+ int LhsStorageOrder, bool ConjugateLhs, \
+ int RhsStorageOrder, bool ConjugateRhs> \
+struct general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor> \
+{ \
+static void run(Index rows, Index cols, Index depth, \
+ const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsStride, \
+ EIGTYPE* res, Index resStride, \
+ EIGTYPE alpha, \
+ level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/, \
+ GemmParallelInfo<Index>* /*info = 0*/) \
+{ \
+ using std::conj; \
+\
+ char transa, transb; \
+ MKL_INT m, n, k, lda, ldb, ldc; \
+ const EIGTYPE *a, *b; \
+ MKLTYPE alpha_, beta_; \
+ MatrixX##EIGPREFIX a_tmp, b_tmp; \
+ EIGTYPE myone(1);\
+\
+/* Set transpose options */ \
+ transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \
+ transb = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \
+\
+/* Set m, n, k */ \
+ m = (MKL_INT)rows; \
+ n = (MKL_INT)cols; \
+ k = (MKL_INT)depth; \
+\
+/* Set alpha_ & beta_ */ \
+ assign_scalar_eig2mkl(alpha_, alpha); \
+ assign_scalar_eig2mkl(beta_, myone); \
+\
+/* Set lda, ldb, ldc */ \
+ lda = (MKL_INT)lhsStride; \
+ ldb = (MKL_INT)rhsStride; \
+ ldc = (MKL_INT)resStride; \
+\
+/* Set a, b, c */ \
+ if ((LhsStorageOrder==ColMajor) && (ConjugateLhs)) { \
+ Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,m,k,OuterStride<>(lhsStride)); \
+ a_tmp = lhs.conjugate(); \
+ a = a_tmp.data(); \
+ lda = a_tmp.outerStride(); \
+ } else a = _lhs; \
+\
+ if ((RhsStorageOrder==ColMajor) && (ConjugateRhs)) { \
+ Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,k,n,OuterStride<>(rhsStride)); \
+ b_tmp = rhs.conjugate(); \
+ b = b_tmp.data(); \
+ ldb = b_tmp.outerStride(); \
+ } else b = _rhs; \
+\
+ MKLPREFIX##gemm(&transa, &transb, &m, &n, &k, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
+}};
+
+GEMM_SPECIALIZATION(double, d, double, d)
+GEMM_SPECIALIZATION(float, f, float, s)
+GEMM_SPECIALIZATION(dcomplex, cd, MKL_Complex16, z)
+GEMM_SPECIALIZATION(scomplex, cf, MKL_Complex8, c)
+
+} // end namespase internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_MATRIX_MATRIX_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
new file mode 100644
index 00000000000..e9de6af3ed1
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
@@ -0,0 +1,131 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * General matrix-vector product functionality based on ?GEMV.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_GENERAL_MATRIX_VECTOR_MKL_H
+#define EIGEN_GENERAL_MATRIX_VECTOR_MKL_H
+
+namespace Eigen {
+
+namespace internal {
+
+/**********************************************************************
+* This file implements general matrix-vector multiplication using BLAS
+* gemv function via partial specialization of
+* general_matrix_vector_product::run(..) method for float, double,
+* std::complex<float> and std::complex<double> types
+**********************************************************************/
+
+// gemv specialization
+
+template<typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
+struct general_matrix_vector_product_gemv :
+ general_matrix_vector_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,ConjugateRhs,BuiltIn> {};
+
+#define EIGEN_MKL_GEMV_SPECIALIZE(Scalar) \
+template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
+struct general_matrix_vector_product<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs,Specialized> { \
+static EIGEN_DONT_INLINE void run( \
+ Index rows, Index cols, \
+ const Scalar* lhs, Index lhsStride, \
+ const Scalar* rhs, Index rhsIncr, \
+ Scalar* res, Index resIncr, Scalar alpha) \
+{ \
+ if (ConjugateLhs) { \
+ general_matrix_vector_product<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs,BuiltIn>::run( \
+ rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \
+ } else { \
+ general_matrix_vector_product_gemv<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \
+ rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \
+ } \
+} \
+}; \
+template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
+struct general_matrix_vector_product<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs,Specialized> { \
+static EIGEN_DONT_INLINE void run( \
+ Index rows, Index cols, \
+ const Scalar* lhs, Index lhsStride, \
+ const Scalar* rhs, Index rhsIncr, \
+ Scalar* res, Index resIncr, Scalar alpha) \
+{ \
+ general_matrix_vector_product_gemv<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \
+ rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \
+} \
+}; \
+
+EIGEN_MKL_GEMV_SPECIALIZE(double)
+EIGEN_MKL_GEMV_SPECIALIZE(float)
+EIGEN_MKL_GEMV_SPECIALIZE(dcomplex)
+EIGEN_MKL_GEMV_SPECIALIZE(scomplex)
+
+#define EIGEN_MKL_GEMV_SPECIALIZATION(EIGTYPE,MKLTYPE,MKLPREFIX) \
+template<typename Index, int LhsStorageOrder, bool ConjugateLhs, bool ConjugateRhs> \
+struct general_matrix_vector_product_gemv<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,ConjugateRhs> \
+{ \
+typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> GEMVVector;\
+\
+static EIGEN_DONT_INLINE void run( \
+ Index rows, Index cols, \
+ const EIGTYPE* lhs, Index lhsStride, \
+ const EIGTYPE* rhs, Index rhsIncr, \
+ EIGTYPE* res, Index resIncr, EIGTYPE alpha) \
+{ \
+ MKL_INT m=rows, n=cols, lda=lhsStride, incx=rhsIncr, incy=resIncr; \
+ MKLTYPE alpha_, beta_; \
+ const EIGTYPE *x_ptr, myone(1); \
+ char trans=(LhsStorageOrder==ColMajor) ? 'N' : (ConjugateLhs) ? 'C' : 'T'; \
+ if (LhsStorageOrder==RowMajor) { \
+ m=cols; \
+ n=rows; \
+ }\
+ assign_scalar_eig2mkl(alpha_, alpha); \
+ assign_scalar_eig2mkl(beta_, myone); \
+ GEMVVector x_tmp; \
+ if (ConjugateRhs) { \
+ Map<const GEMVVector, 0, InnerStride<> > map_x(rhs,cols,1,InnerStride<>(incx)); \
+ x_tmp=map_x.conjugate(); \
+ x_ptr=x_tmp.data(); \
+ incx=1; \
+ } else x_ptr=rhs; \
+ MKLPREFIX##gemv(&trans, &m, &n, &alpha_, (const MKLTYPE*)lhs, &lda, (const MKLTYPE*)x_ptr, &incx, &beta_, (MKLTYPE*)res, &incy); \
+}\
+};
+
+EIGEN_MKL_GEMV_SPECIALIZATION(double, double, d)
+EIGEN_MKL_GEMV_SPECIALIZATION(float, float, s)
+EIGEN_MKL_GEMV_SPECIALIZATION(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_GEMV_SPECIALIZATION(scomplex, MKL_Complex8, c)
+
+} // end namespase internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_MATRIX_VECTOR_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
new file mode 100644
index 00000000000..4e5c4125c01
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
@@ -0,0 +1,295 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Self adjoint matrix * matrix product functionality based on ?SYMM/?HEMM.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_MKL_H
+#define EIGEN_SELFADJOINT_MATRIX_MATRIX_MKL_H
+
+namespace Eigen {
+
+namespace internal {
+
+
+/* Optimized selfadjoint matrix * matrix (?SYMM/?HEMM) product */
+
+#define EIGEN_MKL_SYMM_L(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, \
+ int LhsStorageOrder, bool ConjugateLhs, \
+ int RhsStorageOrder, bool ConjugateRhs> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
+{\
+\
+ static EIGEN_DONT_INLINE void run( \
+ Index rows, Index cols, \
+ const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsStride, \
+ EIGTYPE* res, Index resStride, \
+ EIGTYPE alpha) \
+ { \
+ char side='L', uplo='L'; \
+ MKL_INT m, n, lda, ldb, ldc; \
+ const EIGTYPE *a, *b; \
+ MKLTYPE alpha_, beta_; \
+ MatrixX##EIGPREFIX b_tmp; \
+ EIGTYPE myone(1);\
+\
+/* Set transpose options */ \
+/* Set m, n, k */ \
+ m = (MKL_INT)rows; \
+ n = (MKL_INT)cols; \
+\
+/* Set alpha_ & beta_ */ \
+ assign_scalar_eig2mkl(alpha_, alpha); \
+ assign_scalar_eig2mkl(beta_, myone); \
+\
+/* Set lda, ldb, ldc */ \
+ lda = (MKL_INT)lhsStride; \
+ ldb = (MKL_INT)rhsStride; \
+ ldc = (MKL_INT)resStride; \
+\
+/* Set a, b, c */ \
+ if (LhsStorageOrder==RowMajor) uplo='U'; \
+ a = _lhs; \
+\
+ if (RhsStorageOrder==RowMajor) { \
+ Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \
+ b_tmp = rhs.adjoint(); \
+ b = b_tmp.data(); \
+ ldb = b_tmp.outerStride(); \
+ } else b = _rhs; \
+\
+ MKLPREFIX##symm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
+\
+ } \
+};
+
+
+#define EIGEN_MKL_HEMM_L(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, \
+ int LhsStorageOrder, bool ConjugateLhs, \
+ int RhsStorageOrder, bool ConjugateRhs> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
+{\
+ static EIGEN_DONT_INLINE void run( \
+ Index rows, Index cols, \
+ const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsStride, \
+ EIGTYPE* res, Index resStride, \
+ EIGTYPE alpha) \
+ { \
+ char side='L', uplo='L'; \
+ MKL_INT m, n, lda, ldb, ldc; \
+ const EIGTYPE *a, *b; \
+ MKLTYPE alpha_, beta_; \
+ MatrixX##EIGPREFIX b_tmp; \
+ Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> a_tmp; \
+ EIGTYPE myone(1); \
+\
+/* Set transpose options */ \
+/* Set m, n, k */ \
+ m = (MKL_INT)rows; \
+ n = (MKL_INT)cols; \
+\
+/* Set alpha_ & beta_ */ \
+ assign_scalar_eig2mkl(alpha_, alpha); \
+ assign_scalar_eig2mkl(beta_, myone); \
+\
+/* Set lda, ldb, ldc */ \
+ lda = (MKL_INT)lhsStride; \
+ ldb = (MKL_INT)rhsStride; \
+ ldc = (MKL_INT)resStride; \
+\
+/* Set a, b, c */ \
+ if (((LhsStorageOrder==ColMajor) && ConjugateLhs) || ((LhsStorageOrder==RowMajor) && (!ConjugateLhs))) { \
+ Map<const Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder>, 0, OuterStride<> > lhs(_lhs,m,m,OuterStride<>(lhsStride)); \
+ a_tmp = lhs.conjugate(); \
+ a = a_tmp.data(); \
+ lda = a_tmp.outerStride(); \
+ } else a = _lhs; \
+ if (LhsStorageOrder==RowMajor) uplo='U'; \
+\
+ if (RhsStorageOrder==ColMajor && (!ConjugateRhs)) { \
+ b = _rhs; } \
+ else { \
+ if (RhsStorageOrder==ColMajor && ConjugateRhs) { \
+ Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,m,n,OuterStride<>(rhsStride)); \
+ b_tmp = rhs.conjugate(); \
+ } else \
+ if (ConjugateRhs) { \
+ Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \
+ b_tmp = rhs.adjoint(); \
+ } else { \
+ Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \
+ b_tmp = rhs.transpose(); \
+ } \
+ b = b_tmp.data(); \
+ ldb = b_tmp.outerStride(); \
+ } \
+\
+ MKLPREFIX##hemm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
+\
+ } \
+};
+
+EIGEN_MKL_SYMM_L(double, double, d, d)
+EIGEN_MKL_SYMM_L(float, float, f, s)
+EIGEN_MKL_HEMM_L(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_HEMM_L(scomplex, MKL_Complex8, cf, c)
+
+
+/* Optimized matrix * selfadjoint matrix (?SYMM/?HEMM) product */
+
+#define EIGEN_MKL_SYMM_R(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, \
+ int LhsStorageOrder, bool ConjugateLhs, \
+ int RhsStorageOrder, bool ConjugateRhs> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
+{\
+\
+ static EIGEN_DONT_INLINE void run( \
+ Index rows, Index cols, \
+ const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsStride, \
+ EIGTYPE* res, Index resStride, \
+ EIGTYPE alpha) \
+ { \
+ char side='R', uplo='L'; \
+ MKL_INT m, n, lda, ldb, ldc; \
+ const EIGTYPE *a, *b; \
+ MKLTYPE alpha_, beta_; \
+ MatrixX##EIGPREFIX b_tmp; \
+ EIGTYPE myone(1);\
+\
+/* Set m, n, k */ \
+ m = (MKL_INT)rows; \
+ n = (MKL_INT)cols; \
+\
+/* Set alpha_ & beta_ */ \
+ assign_scalar_eig2mkl(alpha_, alpha); \
+ assign_scalar_eig2mkl(beta_, myone); \
+\
+/* Set lda, ldb, ldc */ \
+ lda = (MKL_INT)rhsStride; \
+ ldb = (MKL_INT)lhsStride; \
+ ldc = (MKL_INT)resStride; \
+\
+/* Set a, b, c */ \
+ if (RhsStorageOrder==RowMajor) uplo='U'; \
+ a = _rhs; \
+\
+ if (LhsStorageOrder==RowMajor) { \
+ Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(rhsStride)); \
+ b_tmp = lhs.adjoint(); \
+ b = b_tmp.data(); \
+ ldb = b_tmp.outerStride(); \
+ } else b = _lhs; \
+\
+ MKLPREFIX##symm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
+\
+ } \
+};
+
+
+#define EIGEN_MKL_HEMM_R(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, \
+ int LhsStorageOrder, bool ConjugateLhs, \
+ int RhsStorageOrder, bool ConjugateRhs> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
+{\
+ static EIGEN_DONT_INLINE void run( \
+ Index rows, Index cols, \
+ const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsStride, \
+ EIGTYPE* res, Index resStride, \
+ EIGTYPE alpha) \
+ { \
+ char side='R', uplo='L'; \
+ MKL_INT m, n, lda, ldb, ldc; \
+ const EIGTYPE *a, *b; \
+ MKLTYPE alpha_, beta_; \
+ MatrixX##EIGPREFIX b_tmp; \
+ Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> a_tmp; \
+ EIGTYPE myone(1); \
+\
+/* Set m, n, k */ \
+ m = (MKL_INT)rows; \
+ n = (MKL_INT)cols; \
+\
+/* Set alpha_ & beta_ */ \
+ assign_scalar_eig2mkl(alpha_, alpha); \
+ assign_scalar_eig2mkl(beta_, myone); \
+\
+/* Set lda, ldb, ldc */ \
+ lda = (MKL_INT)rhsStride; \
+ ldb = (MKL_INT)lhsStride; \
+ ldc = (MKL_INT)resStride; \
+\
+/* Set a, b, c */ \
+ if (((RhsStorageOrder==ColMajor) && ConjugateRhs) || ((RhsStorageOrder==RowMajor) && (!ConjugateRhs))) { \
+ Map<const Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder>, 0, OuterStride<> > rhs(_rhs,n,n,OuterStride<>(rhsStride)); \
+ a_tmp = rhs.conjugate(); \
+ a = a_tmp.data(); \
+ lda = a_tmp.outerStride(); \
+ } else a = _rhs; \
+ if (RhsStorageOrder==RowMajor) uplo='U'; \
+\
+ if (LhsStorageOrder==ColMajor && (!ConjugateLhs)) { \
+ b = _lhs; } \
+ else { \
+ if (LhsStorageOrder==ColMajor && ConjugateLhs) { \
+ Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,m,n,OuterStride<>(lhsStride)); \
+ b_tmp = lhs.conjugate(); \
+ } else \
+ if (ConjugateLhs) { \
+ Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \
+ b_tmp = lhs.adjoint(); \
+ } else { \
+ Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \
+ b_tmp = lhs.transpose(); \
+ } \
+ b = b_tmp.data(); \
+ ldb = b_tmp.outerStride(); \
+ } \
+\
+ MKLPREFIX##hemm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
+ } \
+};
+
+EIGEN_MKL_SYMM_R(double, double, d, d)
+EIGEN_MKL_SYMM_R(float, float, f, s)
+EIGEN_MKL_HEMM_R(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_HEMM_R(scomplex, MKL_Complex8, cf, c)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
new file mode 100644
index 00000000000..f88d483b653
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
@@ -0,0 +1,114 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Selfadjoint matrix-vector product functionality based on ?SYMV/HEMV.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_MKL_H
+#define EIGEN_SELFADJOINT_MATRIX_VECTOR_MKL_H
+
+namespace Eigen {
+
+namespace internal {
+
+/**********************************************************************
+* This file implements selfadjoint matrix-vector multiplication using BLAS
+**********************************************************************/
+
+// symv/hemv specialization
+
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs>
+struct selfadjoint_matrix_vector_product_symv :
+ selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,BuiltIn> {};
+
+#define EIGEN_MKL_SYMV_SPECIALIZE(Scalar) \
+template<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \
+struct selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Specialized> { \
+static EIGEN_DONT_INLINE void run( \
+ Index size, const Scalar* lhs, Index lhsStride, \
+ const Scalar* _rhs, Index rhsIncr, Scalar* res, Scalar alpha) { \
+ enum {\
+ IsColMajor = StorageOrder==ColMajor \
+ }; \
+ if (IsColMajor == ConjugateLhs) {\
+ selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,BuiltIn>::run( \
+ size, lhs, lhsStride, _rhs, rhsIncr, res, alpha); \
+ } else {\
+ selfadjoint_matrix_vector_product_symv<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs>::run( \
+ size, lhs, lhsStride, _rhs, rhsIncr, res, alpha); \
+ }\
+ } \
+}; \
+
+EIGEN_MKL_SYMV_SPECIALIZE(double)
+EIGEN_MKL_SYMV_SPECIALIZE(float)
+EIGEN_MKL_SYMV_SPECIALIZE(dcomplex)
+EIGEN_MKL_SYMV_SPECIALIZE(scomplex)
+
+#define EIGEN_MKL_SYMV_SPECIALIZATION(EIGTYPE,MKLTYPE,MKLFUNC) \
+template<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \
+struct selfadjoint_matrix_vector_product_symv<EIGTYPE,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs> \
+{ \
+typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> SYMVVector;\
+\
+static EIGEN_DONT_INLINE void run( \
+Index size, const EIGTYPE* lhs, Index lhsStride, \
+const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* res, EIGTYPE alpha) \
+{ \
+ enum {\
+ IsRowMajor = StorageOrder==RowMajor ? 1 : 0, \
+ IsLower = UpLo == Lower ? 1 : 0 \
+ }; \
+ MKL_INT n=size, lda=lhsStride, incx=rhsIncr, incy=1; \
+ MKLTYPE alpha_, beta_; \
+ const EIGTYPE *x_ptr, myone(1); \
+ char uplo=(IsRowMajor) ? (IsLower ? 'U' : 'L') : (IsLower ? 'L' : 'U'); \
+ assign_scalar_eig2mkl(alpha_, alpha); \
+ assign_scalar_eig2mkl(beta_, myone); \
+ SYMVVector x_tmp; \
+ if (ConjugateRhs) { \
+ Map<const SYMVVector, 0, InnerStride<> > map_x(_rhs,size,1,InnerStride<>(incx)); \
+ x_tmp=map_x.conjugate(); \
+ x_ptr=x_tmp.data(); \
+ incx=1; \
+ } else x_ptr=_rhs; \
+ MKLFUNC(&uplo, &n, &alpha_, (const MKLTYPE*)lhs, &lda, (const MKLTYPE*)x_ptr, &incx, &beta_, (MKLTYPE*)res, &incy); \
+}\
+};
+
+EIGEN_MKL_SYMV_SPECIALIZATION(double, double, dsymv)
+EIGEN_MKL_SYMV_SPECIALIZATION(float, float, ssymv)
+EIGEN_MKL_SYMV_SPECIALIZATION(dcomplex, MKL_Complex16, zhemv)
+EIGEN_MKL_SYMV_SPECIALIZATION(scomplex, MKL_Complex8, chemv)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
new file mode 100644
index 00000000000..8173da5bb6d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
@@ -0,0 +1,309 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Triangular matrix * matrix product functionality based on ?TRMM.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_MKL_H
+#define EIGEN_TRIANGULAR_MATRIX_MATRIX_MKL_H
+
+namespace Eigen {
+
+namespace internal {
+
+
+template <typename Scalar, typename Index,
+ int Mode, bool LhsIsTriangular,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResStorageOrder>
+struct product_triangular_matrix_matrix_trmm :
+ product_triangular_matrix_matrix<Scalar,Index,Mode,
+ LhsIsTriangular,LhsStorageOrder,ConjugateLhs,
+ RhsStorageOrder, ConjugateRhs, ResStorageOrder, BuiltIn> {};
+
+
+// try to go to BLAS specialization
+#define EIGEN_MKL_TRMM_SPECIALIZE(Scalar, LhsIsTriangular) \
+template <typename Index, int Mode, \
+ int LhsStorageOrder, bool ConjugateLhs, \
+ int RhsStorageOrder, bool ConjugateRhs> \
+struct product_triangular_matrix_matrix<Scalar,Index, Mode, LhsIsTriangular, \
+ LhsStorageOrder,ConjugateLhs, RhsStorageOrder,ConjugateRhs,ColMajor,Specialized> { \
+ static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\
+ const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha) { \
+ product_triangular_matrix_matrix_trmm<Scalar,Index,Mode, \
+ LhsIsTriangular,LhsStorageOrder,ConjugateLhs, \
+ RhsStorageOrder, ConjugateRhs, ColMajor>::run( \
+ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \
+ } \
+};
+
+EIGEN_MKL_TRMM_SPECIALIZE(double, true)
+EIGEN_MKL_TRMM_SPECIALIZE(double, false)
+EIGEN_MKL_TRMM_SPECIALIZE(dcomplex, true)
+EIGEN_MKL_TRMM_SPECIALIZE(dcomplex, false)
+EIGEN_MKL_TRMM_SPECIALIZE(float, true)
+EIGEN_MKL_TRMM_SPECIALIZE(float, false)
+EIGEN_MKL_TRMM_SPECIALIZE(scomplex, true)
+EIGEN_MKL_TRMM_SPECIALIZE(scomplex, false)
+
+// implements col-major += alpha * op(triangular) * op(general)
+#define EIGEN_MKL_TRMM_L(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, int Mode, \
+ int LhsStorageOrder, bool ConjugateLhs, \
+ int RhsStorageOrder, bool ConjugateRhs> \
+struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
+ LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> \
+{ \
+ enum { \
+ IsLower = (Mode&Lower) == Lower, \
+ SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
+ IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
+ IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
+ LowUp = IsLower ? Lower : Upper, \
+ conjA = ((LhsStorageOrder==ColMajor) && ConjugateLhs) ? 1 : 0 \
+ }; \
+\
+ static EIGEN_DONT_INLINE void run( \
+ Index _rows, Index _cols, Index _depth, \
+ const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsStride, \
+ EIGTYPE* res, Index resStride, \
+ EIGTYPE alpha) \
+ { \
+ Index diagSize = (std::min)(_rows,_depth); \
+ Index rows = IsLower ? _rows : diagSize; \
+ Index depth = IsLower ? diagSize : _depth; \
+ Index cols = _cols; \
+\
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> MatrixLhs; \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs; \
+\
+/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
+ if (rows != depth) { \
+\
+ int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
+\
+ if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
+ /* Most likely no benefit to call TRMM or GEMM from MKL*/ \
+ product_triangular_matrix_matrix<EIGTYPE,Index,Mode,true, \
+ LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
+ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \
+ /*std::cout << "TRMM_L: A is not square! Go to Eigen TRMM implementation!\n";*/ \
+ } else { \
+ /* Make sense to call GEMM */ \
+ Map<const MatrixLhs, 0, OuterStride<> > lhsMap(_lhs,rows,depth,OuterStride<>(lhsStride)); \
+ MatrixLhs aa_tmp=lhsMap.template triangularView<Mode>(); \
+ MKL_INT aStride = aa_tmp.outerStride(); \
+ gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> blocking(_rows,_cols,_depth); \
+ general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
+ rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, blocking, 0); \
+\
+ /*std::cout << "TRMM_L: A is not square! Go to MKL GEMM implementation! " << nthr<<" \n";*/ \
+ } \
+ return; \
+ } \
+ char side = 'L', transa, uplo, diag = 'N'; \
+ EIGTYPE *b; \
+ const EIGTYPE *a; \
+ MKL_INT m, n, lda, ldb; \
+ MKLTYPE alpha_; \
+\
+/* Set alpha_*/ \
+ assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); \
+\
+/* Set m, n */ \
+ m = (MKL_INT)diagSize; \
+ n = (MKL_INT)cols; \
+\
+/* Set trans */ \
+ transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \
+\
+/* Set b, ldb */ \
+ Map<const MatrixRhs, 0, OuterStride<> > rhs(_rhs,depth,cols,OuterStride<>(rhsStride)); \
+ MatrixX##EIGPREFIX b_tmp; \
+\
+ if (ConjugateRhs) b_tmp = rhs.conjugate(); else b_tmp = rhs; \
+ b = b_tmp.data(); \
+ ldb = b_tmp.outerStride(); \
+\
+/* Set uplo */ \
+ uplo = IsLower ? 'L' : 'U'; \
+ if (LhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
+/* Set a, lda */ \
+ Map<const MatrixLhs, 0, OuterStride<> > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \
+ MatrixLhs a_tmp; \
+\
+ if ((conjA!=0) || (SetDiag==0)) { \
+ if (conjA) a_tmp = lhs.conjugate(); else a_tmp = lhs; \
+ if (IsZeroDiag) \
+ a_tmp.diagonal().setZero(); \
+ else if (IsUnitDiag) \
+ a_tmp.diagonal().setOnes();\
+ a = a_tmp.data(); \
+ lda = a_tmp.outerStride(); \
+ } else { \
+ a = _lhs; \
+ lda = lhsStride; \
+ } \
+ /*std::cout << "TRMM_L: A is square! Go to MKL TRMM implementation! \n";*/ \
+/* call ?trmm*/ \
+ MKLPREFIX##trmm(&side, &uplo, &transa, &diag, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (MKLTYPE*)b, &ldb); \
+\
+/* Add op(a_triangular)*b into res*/ \
+ Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \
+ res_tmp=res_tmp+b_tmp; \
+ } \
+};
+
+EIGEN_MKL_TRMM_L(double, double, d, d)
+EIGEN_MKL_TRMM_L(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_TRMM_L(float, float, f, s)
+EIGEN_MKL_TRMM_L(scomplex, MKL_Complex8, cf, c)
+
+// implements col-major += alpha * op(general) * op(triangular)
+#define EIGEN_MKL_TRMM_R(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, int Mode, \
+ int LhsStorageOrder, bool ConjugateLhs, \
+ int RhsStorageOrder, bool ConjugateRhs> \
+struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
+ LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> \
+{ \
+ enum { \
+ IsLower = (Mode&Lower) == Lower, \
+ SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
+ IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
+ IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
+ LowUp = IsLower ? Lower : Upper, \
+ conjA = ((RhsStorageOrder==ColMajor) && ConjugateRhs) ? 1 : 0 \
+ }; \
+\
+ static EIGEN_DONT_INLINE void run( \
+ Index _rows, Index _cols, Index _depth, \
+ const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsStride, \
+ EIGTYPE* res, Index resStride, \
+ EIGTYPE alpha) \
+ { \
+ Index diagSize = (std::min)(_cols,_depth); \
+ Index rows = _rows; \
+ Index depth = IsLower ? _depth : diagSize; \
+ Index cols = IsLower ? diagSize : _cols; \
+\
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> MatrixLhs; \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs; \
+\
+/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
+ if (cols != depth) { \
+\
+ int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
+\
+ if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
+ /* Most likely no benefit to call TRMM or GEMM from MKL*/ \
+ product_triangular_matrix_matrix<EIGTYPE,Index,Mode,false, \
+ LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
+ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \
+ /*std::cout << "TRMM_R: A is not square! Go to Eigen TRMM implementation!\n";*/ \
+ } else { \
+ /* Make sense to call GEMM */ \
+ Map<const MatrixRhs, 0, OuterStride<> > rhsMap(_rhs,depth,cols, OuterStride<>(rhsStride)); \
+ MatrixRhs aa_tmp=rhsMap.template triangularView<Mode>(); \
+ MKL_INT aStride = aa_tmp.outerStride(); \
+ gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> blocking(_rows,_cols,_depth); \
+ general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
+ rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, blocking, 0); \
+\
+ /*std::cout << "TRMM_R: A is not square! Go to MKL GEMM implementation! " << nthr<<" \n";*/ \
+ } \
+ return; \
+ } \
+ char side = 'R', transa, uplo, diag = 'N'; \
+ EIGTYPE *b; \
+ const EIGTYPE *a; \
+ MKL_INT m, n, lda, ldb; \
+ MKLTYPE alpha_; \
+\
+/* Set alpha_*/ \
+ assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); \
+\
+/* Set m, n */ \
+ m = (MKL_INT)rows; \
+ n = (MKL_INT)diagSize; \
+\
+/* Set trans */ \
+ transa = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \
+\
+/* Set b, ldb */ \
+ Map<const MatrixLhs, 0, OuterStride<> > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \
+ MatrixX##EIGPREFIX b_tmp; \
+\
+ if (ConjugateLhs) b_tmp = lhs.conjugate(); else b_tmp = lhs; \
+ b = b_tmp.data(); \
+ ldb = b_tmp.outerStride(); \
+\
+/* Set uplo */ \
+ uplo = IsLower ? 'L' : 'U'; \
+ if (RhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
+/* Set a, lda */ \
+ Map<const MatrixRhs, 0, OuterStride<> > rhs(_rhs,depth,cols, OuterStride<>(rhsStride)); \
+ MatrixRhs a_tmp; \
+\
+ if ((conjA!=0) || (SetDiag==0)) { \
+ if (conjA) a_tmp = rhs.conjugate(); else a_tmp = rhs; \
+ if (IsZeroDiag) \
+ a_tmp.diagonal().setZero(); \
+ else if (IsUnitDiag) \
+ a_tmp.diagonal().setOnes();\
+ a = a_tmp.data(); \
+ lda = a_tmp.outerStride(); \
+ } else { \
+ a = _rhs; \
+ lda = rhsStride; \
+ } \
+ /*std::cout << "TRMM_R: A is square! Go to MKL TRMM implementation! \n";*/ \
+/* call ?trmm*/ \
+ MKLPREFIX##trmm(&side, &uplo, &transa, &diag, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (MKLTYPE*)b, &ldb); \
+\
+/* Add op(a_triangular)*b into res*/ \
+ Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \
+ res_tmp=res_tmp+b_tmp; \
+ } \
+};
+
+EIGEN_MKL_TRMM_R(double, double, d, d)
+EIGEN_MKL_TRMM_R(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_TRMM_R(float, float, f, s)
+EIGEN_MKL_TRMM_R(scomplex, MKL_Complex8, cf, c)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
new file mode 100644
index 00000000000..3c2c3049aa3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
@@ -0,0 +1,247 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Triangular matrix-vector product functionality based on ?TRMV.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_TRIANGULAR_MATRIX_VECTOR_MKL_H
+#define EIGEN_TRIANGULAR_MATRIX_VECTOR_MKL_H
+
+namespace Eigen {
+
+namespace internal {
+
+/**********************************************************************
+* This file implements triangular matrix-vector multiplication using BLAS
+**********************************************************************/
+
+// trmv/hemv specialization
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder>
+struct triangular_matrix_vector_product_trmv :
+ triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,StorageOrder,BuiltIn> {};
+
+#define EIGEN_MKL_TRMV_SPECIALIZE(Scalar) \
+template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
+struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor,Specialized> { \
+ static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
+ const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \
+ triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor>::run( \
+ _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
+ } \
+}; \
+template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
+struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor,Specialized> { \
+ static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
+ const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \
+ triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor>::run( \
+ _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
+ } \
+};
+
+EIGEN_MKL_TRMV_SPECIALIZE(double)
+EIGEN_MKL_TRMV_SPECIALIZE(float)
+EIGEN_MKL_TRMV_SPECIALIZE(dcomplex)
+EIGEN_MKL_TRMV_SPECIALIZE(scomplex)
+
+// implements col-major: res += alpha * op(triangular) * vector
+#define EIGEN_MKL_TRMV_CM(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
+struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor> { \
+ enum { \
+ IsLower = (Mode&Lower) == Lower, \
+ SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
+ IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
+ IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
+ LowUp = IsLower ? Lower : Upper \
+ }; \
+ static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
+ { \
+ if (ConjLhs || IsZeroDiag) { \
+ triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor,BuiltIn>::run( \
+ _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
+ return; \
+ }\
+ Index size = (std::min)(_rows,_cols); \
+ Index rows = IsLower ? _rows : size; \
+ Index cols = IsLower ? size : _cols; \
+\
+ typedef VectorX##EIGPREFIX VectorRhs; \
+ EIGTYPE *x, *y;\
+\
+/* Set x*/ \
+ Map<const VectorRhs, 0, InnerStride<> > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \
+ VectorRhs x_tmp; \
+ if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
+ x = x_tmp.data(); \
+\
+/* Square part handling */\
+\
+ char trans, uplo, diag; \
+ MKL_INT m, n, lda, incx, incy; \
+ EIGTYPE const *a; \
+ MKLTYPE alpha_, beta_; \
+ assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); \
+ assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(beta_, EIGTYPE(1)); \
+\
+/* Set m, n */ \
+ n = (MKL_INT)size; \
+ lda = lhsStride; \
+ incx = 1; \
+ incy = resIncr; \
+\
+/* Set uplo, trans and diag*/ \
+ trans = 'N'; \
+ uplo = IsLower ? 'L' : 'U'; \
+ diag = IsUnitDiag ? 'U' : 'N'; \
+\
+/* call ?TRMV*/ \
+ MKLPREFIX##trmv(&uplo, &trans, &diag, &n, (const MKLTYPE*)_lhs, &lda, (MKLTYPE*)x, &incx); \
+\
+/* Add op(a_tr)rhs into res*/ \
+ MKLPREFIX##axpy(&n, &alpha_,(const MKLTYPE*)x, &incx, (MKLTYPE*)_res, &incy); \
+/* Non-square case - doesn't fit to MKL ?TRMV. Fall to default triangular product*/ \
+ if (size<(std::max)(rows,cols)) { \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic> MatrixLhs; \
+ if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
+ x = x_tmp.data(); \
+ if (size<rows) { \
+ y = _res + size*resIncr; \
+ a = _lhs + size; \
+ m = rows-size; \
+ n = size; \
+ } \
+ else { \
+ x += size; \
+ y = _res; \
+ a = _lhs + size*lda; \
+ m = size; \
+ n = cols-size; \
+ } \
+ MKLPREFIX##gemv(&trans, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)x, &incx, &beta_, (MKLTYPE*)y, &incy); \
+ } \
+ } \
+};
+
+EIGEN_MKL_TRMV_CM(double, double, d, d)
+EIGEN_MKL_TRMV_CM(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_TRMV_CM(float, float, f, s)
+EIGEN_MKL_TRMV_CM(scomplex, MKL_Complex8, cf, c)
+
+// implements row-major: res += alpha * op(triangular) * vector
+#define EIGEN_MKL_TRMV_RM(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
+struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor> { \
+ enum { \
+ IsLower = (Mode&Lower) == Lower, \
+ SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
+ IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
+ IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
+ LowUp = IsLower ? Lower : Upper \
+ }; \
+ static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
+ { \
+ if (IsZeroDiag) { \
+ triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor,BuiltIn>::run( \
+ _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
+ return; \
+ }\
+ Index size = (std::min)(_rows,_cols); \
+ Index rows = IsLower ? _rows : size; \
+ Index cols = IsLower ? size : _cols; \
+\
+ typedef VectorX##EIGPREFIX VectorRhs; \
+ EIGTYPE *x, *y;\
+\
+/* Set x*/ \
+ Map<const VectorRhs, 0, InnerStride<> > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \
+ VectorRhs x_tmp; \
+ if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
+ x = x_tmp.data(); \
+\
+/* Square part handling */\
+\
+ char trans, uplo, diag; \
+ MKL_INT m, n, lda, incx, incy; \
+ EIGTYPE const *a; \
+ MKLTYPE alpha_, beta_; \
+ assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); \
+ assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(beta_, EIGTYPE(1)); \
+\
+/* Set m, n */ \
+ n = (MKL_INT)size; \
+ lda = lhsStride; \
+ incx = 1; \
+ incy = resIncr; \
+\
+/* Set uplo, trans and diag*/ \
+ trans = ConjLhs ? 'C' : 'T'; \
+ uplo = IsLower ? 'U' : 'L'; \
+ diag = IsUnitDiag ? 'U' : 'N'; \
+\
+/* call ?TRMV*/ \
+ MKLPREFIX##trmv(&uplo, &trans, &diag, &n, (const MKLTYPE*)_lhs, &lda, (MKLTYPE*)x, &incx); \
+\
+/* Add op(a_tr)rhs into res*/ \
+ MKLPREFIX##axpy(&n, &alpha_,(const MKLTYPE*)x, &incx, (MKLTYPE*)_res, &incy); \
+/* Non-square case - doesn't fit to MKL ?TRMV. Fall to default triangular product*/ \
+ if (size<(std::max)(rows,cols)) { \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic> MatrixLhs; \
+ if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
+ x = x_tmp.data(); \
+ if (size<rows) { \
+ y = _res + size*resIncr; \
+ a = _lhs + size*lda; \
+ m = rows-size; \
+ n = size; \
+ } \
+ else { \
+ x += size; \
+ y = _res; \
+ a = _lhs + size; \
+ m = size; \
+ n = cols-size; \
+ } \
+ MKLPREFIX##gemv(&trans, &n, &m, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)x, &incx, &beta_, (MKLTYPE*)y, &incy); \
+ } \
+ } \
+};
+
+EIGEN_MKL_TRMV_RM(double, double, d, d)
+EIGEN_MKL_TRMV_RM(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_TRMV_RM(float, float, f, s)
+EIGEN_MKL_TRMV_RM(scomplex, MKL_Complex8, cf, c)
+
+} // end namespase internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULAR_MATRIX_VECTOR_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
new file mode 100644
index 00000000000..47699f2100f
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
@@ -0,0 +1,155 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Triangular matrix * matrix product functionality based on ?TRMM.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_MKL_H
+#define EIGEN_TRIANGULAR_SOLVER_MATRIX_MKL_H
+
+namespace Eigen {
+
+namespace internal {
+
+// implements LeftSide op(triangular)^-1 * general
+#define EIGEN_MKL_TRSM_L(EIGTYPE, MKLTYPE, MKLPREFIX) \
+template <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \
+struct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor> \
+{ \
+ enum { \
+ IsLower = (Mode&Lower) == Lower, \
+ IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
+ IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
+ conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \
+ }; \
+ static EIGEN_DONT_INLINE void run( \
+ Index size, Index otherSize, \
+ const EIGTYPE* _tri, Index triStride, \
+ EIGTYPE* _other, Index otherStride) \
+ { \
+ MKL_INT m = size, n = otherSize, lda, ldb; \
+ char side = 'L', uplo, diag='N', transa; \
+ /* Set alpha_ */ \
+ MKLTYPE alpha; \
+ EIGTYPE myone(1); \
+ assign_scalar_eig2mkl(alpha, myone); \
+ ldb = otherStride;\
+\
+ const EIGTYPE *a; \
+/* Set trans */ \
+ transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \
+/* Set uplo */ \
+ uplo = IsLower ? 'L' : 'U'; \
+ if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
+/* Set a, lda */ \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, TriStorageOrder> MatrixTri; \
+ Map<const MatrixTri, 0, OuterStride<> > tri(_tri,size,size,OuterStride<>(triStride)); \
+ MatrixTri a_tmp; \
+\
+ if (conjA) { \
+ a_tmp = tri.conjugate(); \
+ a = a_tmp.data(); \
+ lda = a_tmp.outerStride(); \
+ } else { \
+ a = _tri; \
+ lda = triStride; \
+ } \
+ if (IsUnitDiag) diag='U'; \
+/* call ?trsm*/ \
+ MKLPREFIX##trsm(&side, &uplo, &transa, &diag, &m, &n, &alpha, (const MKLTYPE*)a, &lda, (MKLTYPE*)_other, &ldb); \
+ } \
+};
+
+EIGEN_MKL_TRSM_L(double, double, d)
+EIGEN_MKL_TRSM_L(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_TRSM_L(float, float, s)
+EIGEN_MKL_TRSM_L(scomplex, MKL_Complex8, c)
+
+
+// implements RightSide general * op(triangular)^-1
+#define EIGEN_MKL_TRSM_R(EIGTYPE, MKLTYPE, MKLPREFIX) \
+template <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \
+struct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor> \
+{ \
+ enum { \
+ IsLower = (Mode&Lower) == Lower, \
+ IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \
+ IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
+ conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \
+ }; \
+ static EIGEN_DONT_INLINE void run( \
+ Index size, Index otherSize, \
+ const EIGTYPE* _tri, Index triStride, \
+ EIGTYPE* _other, Index otherStride) \
+ { \
+ MKL_INT m = otherSize, n = size, lda, ldb; \
+ char side = 'R', uplo, diag='N', transa; \
+ /* Set alpha_ */ \
+ MKLTYPE alpha; \
+ EIGTYPE myone(1); \
+ assign_scalar_eig2mkl(alpha, myone); \
+ ldb = otherStride;\
+\
+ const EIGTYPE *a; \
+/* Set trans */ \
+ transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \
+/* Set uplo */ \
+ uplo = IsLower ? 'L' : 'U'; \
+ if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
+/* Set a, lda */ \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, TriStorageOrder> MatrixTri; \
+ Map<const MatrixTri, 0, OuterStride<> > tri(_tri,size,size,OuterStride<>(triStride)); \
+ MatrixTri a_tmp; \
+\
+ if (conjA) { \
+ a_tmp = tri.conjugate(); \
+ a = a_tmp.data(); \
+ lda = a_tmp.outerStride(); \
+ } else { \
+ a = _tri; \
+ lda = triStride; \
+ } \
+ if (IsUnitDiag) diag='U'; \
+/* call ?trsm*/ \
+ MKLPREFIX##trsm(&side, &uplo, &transa, &diag, &m, &n, &alpha, (const MKLTYPE*)a, &lda, (MKLTYPE*)_other, &ldb); \
+ /*std::cout << "TRMS_L specialization!\n";*/ \
+ } \
+};
+
+EIGEN_MKL_TRSM_R(double, double, d)
+EIGEN_MKL_TRSM_R(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_TRSM_R(float, float, s)
+EIGEN_MKL_TRSM_R(scomplex, MKL_Complex8, c)
+
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/CMakeLists.txt b/extern/Eigen3/Eigen/src/Core/util/CMakeLists.txt
new file mode 100644
index 00000000000..a1e2e521fc1
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_util_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Core_util_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/util COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/Core/util/MKL_support.h b/extern/Eigen3/Eigen/src/Core/util/MKL_support.h
new file mode 100644
index 00000000000..1e6e355d626
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/MKL_support.h
@@ -0,0 +1,109 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Include file with common MKL declarations
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_MKL_SUPPORT_H
+#define EIGEN_MKL_SUPPORT_H
+
+#ifdef EIGEN_USE_MKL_ALL
+ #ifndef EIGEN_USE_BLAS
+ #define EIGEN_USE_BLAS
+ #endif
+ #ifndef EIGEN_USE_LAPACKE
+ #define EIGEN_USE_LAPACKE
+ #endif
+ #ifndef EIGEN_USE_MKL_VML
+ #define EIGEN_USE_MKL_VML
+ #endif
+#endif
+
+#ifdef EIGEN_USE_LAPACKE_STRICT
+ #define EIGEN_USE_LAPACKE
+#endif
+
+#if defined(EIGEN_USE_BLAS) || defined(EIGEN_USE_LAPACKE) || defined(EIGEN_USE_MKL_VML)
+ #define EIGEN_USE_MKL
+#endif
+
+#if defined EIGEN_USE_MKL
+
+#include <mkl.h>
+#include <mkl_lapacke.h>
+#define EIGEN_MKL_VML_THRESHOLD 128
+
+namespace Eigen {
+
+typedef std::complex<double> dcomplex;
+typedef std::complex<float> scomplex;
+
+namespace internal {
+
+template<typename MKLType, typename EigenType>
+static inline void assign_scalar_eig2mkl(MKLType& mklScalar, const EigenType& eigenScalar) {
+ mklScalar=eigenScalar;
+}
+
+template<typename MKLType, typename EigenType>
+static inline void assign_conj_scalar_eig2mkl(MKLType& mklScalar, const EigenType& eigenScalar) {
+ mklScalar=eigenScalar;
+}
+
+template <>
+inline void assign_scalar_eig2mkl<MKL_Complex16,dcomplex>(MKL_Complex16& mklScalar, const dcomplex& eigenScalar) {
+ mklScalar.real=eigenScalar.real();
+ mklScalar.imag=eigenScalar.imag();
+}
+
+template <>
+inline void assign_scalar_eig2mkl<MKL_Complex8,scomplex>(MKL_Complex8& mklScalar, const scomplex& eigenScalar) {
+ mklScalar.real=eigenScalar.real();
+ mklScalar.imag=eigenScalar.imag();
+}
+
+template <>
+inline void assign_conj_scalar_eig2mkl<MKL_Complex16,dcomplex>(MKL_Complex16& mklScalar, const dcomplex& eigenScalar) {
+ mklScalar.real=eigenScalar.real();
+ mklScalar.imag=-eigenScalar.imag();
+}
+
+template <>
+inline void assign_conj_scalar_eig2mkl<MKL_Complex8,scomplex>(MKL_Complex8& mklScalar, const scomplex& eigenScalar) {
+ mklScalar.real=eigenScalar.real();
+ mklScalar.imag=-eigenScalar.imag();
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif
+
+#endif // EIGEN_MKL_SUPPORT_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/Macros.h b/extern/Eigen3/Eigen/src/Core/util/Macros.h
index cbea97c310d..8f9cf0dbf53 100644
--- a/extern/Eigen3/Eigen/src/Core/util/Macros.h
+++ b/extern/Eigen3/Eigen/src/Core/util/Macros.h
@@ -28,7 +28,7 @@
#define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 0
-#define EIGEN_MINOR_VERSION 92
+#define EIGEN_MINOR_VERSION 93
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
@@ -268,7 +268,7 @@
* If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
* vectorized and non-vectorized code.
*/
-#if (defined __GNUC__) || (defined __PGI) || (defined __IBMCPP__)
+#if (defined __GNUC__) || (defined __PGI) || (defined __IBMCPP__) || (defined __ARMCC_VERSION)
#define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
#elif (defined _MSC_VER)
#define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
diff --git a/extern/Eigen3/Eigen/src/Core/util/Meta.h b/extern/Eigen3/Eigen/src/Core/util/Meta.h
index 7ece430acaa..82c93f922ae 100644
--- a/extern/Eigen3/Eigen/src/Core/util/Meta.h
+++ b/extern/Eigen3/Eigen/src/Core/util/Meta.h
@@ -103,6 +103,21 @@ template<bool Condition, typename T> struct enable_if;
template<typename T> struct enable_if<true,T>
{ typedef T type; };
+
+
+/** \internal
+ * A base class do disable default copy ctor and copy assignement operator.
+ */
+class noncopyable
+{
+ noncopyable(const noncopyable&);
+ const noncopyable& operator=(const noncopyable&);
+protected:
+ noncopyable() {}
+ ~noncopyable() {}
+};
+
+
/** \internal
* Convenient struct to get the result type of a unary or binary functor.
*
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/CMakeLists.txt b/extern/Eigen3/Eigen/src/Eigen2Support/CMakeLists.txt
new file mode 100644
index 00000000000..7ae41b3cbe9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/CMakeLists.txt
@@ -0,0 +1,8 @@
+FILE(GLOB Eigen_Eigen2Support_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Eigen2Support_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Eigen2Support COMPONENT Devel
+ )
+
+ADD_SUBDIRECTORY(Geometry) \ No newline at end of file
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt
new file mode 100644
index 00000000000..c347a8f2622
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Eigen2Support_Geometry_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Eigen2Support_Geometry_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Eigen2Support/Geometry
+ )
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/CMakeLists.txt b/extern/Eigen3/Eigen/src/Eigenvalues/CMakeLists.txt
new file mode 100644
index 00000000000..193e0268599
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_EIGENVALUES_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_EIGENVALUES_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Eigenvalues COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur_MKL.h b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur_MKL.h
new file mode 100644
index 00000000000..e21a8d8360a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur_MKL.h
@@ -0,0 +1,94 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Complex Schur needed to complex unsymmetrical eigenvalues/eigenvectors.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_COMPLEX_SCHUR_MKL_H
+#define EIGEN_COMPLEX_SCHUR_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen {
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_SCHUR_COMPLEX(EIGTYPE, MKLTYPE, MKLPREFIX, MKLPREFIX_U, EIGCOLROW, MKLCOLROW) \
+template<> \
+ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
+ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
+{ \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
+ typedef MatrixType::Scalar Scalar; \
+ typedef MatrixType::RealScalar RealScalar; \
+ typedef std::complex<RealScalar> ComplexScalar; \
+\
+ assert(matrix.cols() == matrix.rows()); \
+\
+ m_matUisUptodate = false; \
+ if(matrix.cols() == 1) \
+ { \
+ m_matT = matrix.cast<ComplexScalar>(); \
+ if(computeU) m_matU = ComplexMatrixType::Identity(1,1); \
+ m_info = Success; \
+ m_isInitialized = true; \
+ m_matUisUptodate = computeU; \
+ return *this; \
+ } \
+ lapack_int n = matrix.cols(), sdim, info; \
+ lapack_int lda = matrix.outerStride(); \
+ lapack_int matrix_order = MKLCOLROW; \
+ char jobvs, sort='N'; \
+ LAPACK_##MKLPREFIX_U##_SELECT1 select = 0; \
+ jobvs = (computeU) ? 'V' : 'N'; \
+ m_matU.resize(n, n); \
+ lapack_int ldvs = m_matU.outerStride(); \
+ m_matT = matrix; \
+ Matrix<EIGTYPE, Dynamic, Dynamic> w; \
+ w.resize(n, 1);\
+ info = LAPACKE_##MKLPREFIX##gees( matrix_order, jobvs, sort, select, n, (MKLTYPE*)m_matT.data(), lda, &sdim, (MKLTYPE*)w.data(), (MKLTYPE*)m_matU.data(), ldvs ); \
+ if(info == 0) \
+ m_info = Success; \
+ else \
+ m_info = NoConvergence; \
+\
+ m_isInitialized = true; \
+ m_matUisUptodate = computeU; \
+ return *this; \
+\
+}
+
+EIGEN_MKL_SCHUR_COMPLEX(dcomplex, MKL_Complex16, z, Z, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SCHUR_COMPLEX(scomplex, MKL_Complex8, c, C, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SCHUR_COMPLEX(dcomplex, MKL_Complex16, z, Z, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SCHUR_COMPLEX(scomplex, MKL_Complex8, c, C, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_SCHUR_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h b/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h
new file mode 100644
index 00000000000..c9689520b0d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h
@@ -0,0 +1,83 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Real Schur needed to real unsymmetrical eigenvalues/eigenvectors.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_REAL_SCHUR_MKL_H
+#define EIGEN_REAL_SCHUR_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen {
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_SCHUR_REAL(EIGTYPE, MKLTYPE, MKLPREFIX, MKLPREFIX_U, EIGCOLROW, MKLCOLROW) \
+template<> \
+RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
+RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
+{ \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
+ typedef MatrixType::Scalar Scalar; \
+ typedef MatrixType::RealScalar RealScalar; \
+\
+ assert(matrix.cols() == matrix.rows()); \
+\
+ lapack_int n = matrix.cols(), sdim, info; \
+ lapack_int lda = matrix.outerStride(); \
+ lapack_int matrix_order = MKLCOLROW; \
+ char jobvs, sort='N'; \
+ LAPACK_##MKLPREFIX_U##_SELECT2 select = 0; \
+ jobvs = (computeU) ? 'V' : 'N'; \
+ m_matU.resize(n, n); \
+ lapack_int ldvs = m_matU.outerStride(); \
+ m_matT = matrix; \
+ Matrix<EIGTYPE, Dynamic, Dynamic> wr, wi; \
+ wr.resize(n, 1); wi.resize(n, 1); \
+ info = LAPACKE_##MKLPREFIX##gees( matrix_order, jobvs, sort, select, n, (MKLTYPE*)m_matT.data(), lda, &sdim, (MKLTYPE*)wr.data(), (MKLTYPE*)wi.data(), (MKLTYPE*)m_matU.data(), ldvs ); \
+ if(info == 0) \
+ m_info = Success; \
+ else \
+ m_info = NoConvergence; \
+\
+ m_isInitialized = true; \
+ m_matUisUptodate = computeU; \
+ return *this; \
+\
+}
+
+EIGEN_MKL_SCHUR_REAL(double, double, d, D, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SCHUR_REAL(float, float, s, S, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SCHUR_REAL(double, double, d, D, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SCHUR_REAL(float, float, s, S, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_REAL_SCHUR_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h b/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
new file mode 100644
index 00000000000..5ebcd08e1cf
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
@@ -0,0 +1,92 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Self-adjoint eigenvalues/eigenvectors.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_SAEIGENSOLVER_MKL_H
+#define EIGEN_SAEIGENSOLVER_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen {
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_EIG_SELFADJ(EIGTYPE, MKLTYPE, MKLRTYPE, MKLNAME, EIGCOLROW, MKLCOLROW ) \
+template<> \
+SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
+SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, int options) \
+{ \
+ eigen_assert(matrix.cols() == matrix.rows()); \
+ eigen_assert((options&~(EigVecMask|GenEigMask))==0 \
+ && (options&EigVecMask)!=EigVecMask \
+ && "invalid option parameter"); \
+ bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; \
+ lapack_int n = matrix.cols(), lda, matrix_order, info; \
+ m_eivalues.resize(n,1); \
+ m_subdiag.resize(n-1); \
+ m_eivec = matrix; \
+\
+ if(n==1) \
+ { \
+ m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0)); \
+ if(computeEigenvectors) m_eivec.setOnes(n,n); \
+ m_info = Success; \
+ m_isInitialized = true; \
+ m_eigenvectorsOk = computeEigenvectors; \
+ return *this; \
+ } \
+\
+ lda = matrix.outerStride(); \
+ matrix_order=MKLCOLROW; \
+ char jobz, uplo='L'/*, range='A'*/; \
+ jobz = computeEigenvectors ? 'V' : 'N'; \
+\
+ info = LAPACKE_##MKLNAME( matrix_order, jobz, uplo, n, (MKLTYPE*)m_eivec.data(), lda, (MKLRTYPE*)m_eivalues.data() ); \
+ m_info = (info==0) ? Success : NoConvergence; \
+ m_isInitialized = true; \
+ m_eigenvectorsOk = computeEigenvectors; \
+ return *this; \
+}
+
+
+EIGEN_MKL_EIG_SELFADJ(double, double, double, dsyev, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(float, float, float, ssyev, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(dcomplex, MKL_Complex16, double, zheev, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(scomplex, MKL_Complex8, float, cheev, ColMajor, LAPACK_COL_MAJOR)
+
+EIGEN_MKL_EIG_SELFADJ(double, double, double, dsyev, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(float, float, float, ssyev, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(dcomplex, MKL_Complex16, double, zheev, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(scomplex, MKL_Complex8, float, cheev, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_SAEIGENSOLVER_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/CMakeLists.txt b/extern/Eigen3/Eigen/src/Geometry/CMakeLists.txt
new file mode 100644
index 00000000000..f8f728b84fa
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/CMakeLists.txt
@@ -0,0 +1,8 @@
+FILE(GLOB Eigen_Geometry_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Geometry_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Geometry COMPONENT Devel
+ )
+
+ADD_SUBDIRECTORY(arch)
diff --git a/extern/Eigen3/Eigen/src/Geometry/arch/CMakeLists.txt b/extern/Eigen3/Eigen/src/Geometry/arch/CMakeLists.txt
new file mode 100644
index 00000000000..1267a79c787
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/arch/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Geometry_arch_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Geometry_arch_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Geometry/arch COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/Householder/CMakeLists.txt b/extern/Eigen3/Eigen/src/Householder/CMakeLists.txt
new file mode 100644
index 00000000000..ce4937db061
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Householder/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Householder_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Householder_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Householder COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
new file mode 100644
index 00000000000..c9fe9c61849
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
@@ -0,0 +1,163 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BASIC_PRECONDITIONERS_H
+#define EIGEN_BASIC_PRECONDITIONERS_H
+
+namespace Eigen {
+
+/** \ingroup IterativeLinearSolvers_Module
+ * \brief A preconditioner based on the digonal entries
+ *
+ * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix.
+ * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:
+ * \code
+ * A.diagonal().asDiagonal() . x = b
+ * \endcode
+ *
+ * \tparam _Scalar the type of the scalar.
+ *
+ * This preconditioner is suitable for both selfadjoint and general problems.
+ * The diagonal entries are pre-inverted and stored into a dense vector.
+ *
+ * \note A variant that has yet to be implemented would attempt to preserve the norm of each column.
+ *
+ */
+template <typename _Scalar>
+class DiagonalPreconditioner
+{
+ typedef _Scalar Scalar;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef typename Vector::Index Index;
+
+ public:
+ typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
+
+ DiagonalPreconditioner() : m_isInitialized(false) {}
+
+ template<typename MatrixType>
+ DiagonalPreconditioner(const MatrixType& mat) : m_invdiag(mat.cols())
+ {
+ compute(mat);
+ }
+
+ Index rows() const { return m_invdiag.size(); }
+ Index cols() const { return m_invdiag.size(); }
+
+ template<typename MatrixType>
+ DiagonalPreconditioner& analyzePattern(const MatrixType& )
+ {
+ return *this;
+ }
+
+ template<typename MatrixType>
+ DiagonalPreconditioner& factorize(const MatrixType& mat)
+ {
+ m_invdiag.resize(mat.cols());
+ for(int j=0; j<mat.outerSize(); ++j)
+ {
+ typename MatrixType::InnerIterator it(mat,j);
+ while(it && it.index()!=j) ++it;
+ if(it && it.index()==j)
+ m_invdiag(j) = Scalar(1)/it.value();
+ else
+ m_invdiag(j) = 0;
+ }
+ m_isInitialized = true;
+ return *this;
+ }
+
+ template<typename MatrixType>
+ DiagonalPreconditioner& compute(const MatrixType& mat)
+ {
+ return factorize(mat);
+ }
+
+ template<typename Rhs, typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x = m_invdiag.array() * b.array() ;
+ }
+
+ template<typename Rhs> inline const internal::solve_retval<DiagonalPreconditioner, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized.");
+ eigen_assert(m_invdiag.size()==b.rows()
+ && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<DiagonalPreconditioner, Rhs>(*this, b.derived());
+ }
+
+ protected:
+ Vector m_invdiag;
+ bool m_isInitialized;
+};
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<DiagonalPreconditioner<_MatrixType>, Rhs>
+ : solve_retval_base<DiagonalPreconditioner<_MatrixType>, Rhs>
+{
+ typedef DiagonalPreconditioner<_MatrixType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+}
+
+/** \ingroup IterativeLinearSolvers_Module
+ * \brief A naive preconditioner which approximates any matrix as the identity matrix
+ *
+ * \sa class DiagonalPreconditioner
+ */
+class IdentityPreconditioner
+{
+ public:
+
+ IdentityPreconditioner() {}
+
+ template<typename MatrixType>
+ IdentityPreconditioner(const MatrixType& ) {}
+
+ template<typename MatrixType>
+ IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; }
+
+ template<typename MatrixType>
+ IdentityPreconditioner& factorize(const MatrixType& ) { return *this; }
+
+ template<typename MatrixType>
+ IdentityPreconditioner& compute(const MatrixType& ) { return *this; }
+
+ template<typename Rhs>
+ inline const Rhs& solve(const Rhs& b) const { return b; }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_BASIC_PRECONDITIONERS_H
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
new file mode 100644
index 00000000000..5f23968bc0a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -0,0 +1,269 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BICGSTAB_H
+#define EIGEN_BICGSTAB_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal Low-level bi conjugate gradient stabilized algorithm
+ * \param mat The matrix A
+ * \param rhs The right hand side vector b
+ * \param x On input and initial solution, on output the computed solution.
+ * \param precond A preconditioner being able to efficiently solve for an
+ * approximation of Ax=b (regardless of b)
+ * \param iters On input the max number of iteration, on output the number of performed iterations.
+ * \param tol_error On input the tolerance error, on output an estimation of the relative error.
+ * \return false in the case of numerical issue, for example a break down of BiCGSTAB.
+ */
+template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
+ const Preconditioner& precond, int& iters,
+ typename Dest::RealScalar& tol_error)
+{
+ using std::sqrt;
+ using std::abs;
+ typedef typename Dest::RealScalar RealScalar;
+ typedef typename Dest::Scalar Scalar;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+ RealScalar tol = tol_error;
+ int maxIters = iters;
+
+ int n = mat.cols();
+ VectorType r = rhs - mat * x;
+ VectorType r0 = r;
+
+ RealScalar r0_sqnorm = r0.squaredNorm();
+ Scalar rho = 1;
+ Scalar alpha = 1;
+ Scalar w = 1;
+
+ VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
+ VectorType y(n), z(n);
+ VectorType kt(n), ks(n);
+
+ VectorType s(n), t(n);
+
+ RealScalar tol2 = tol*tol;
+ int i = 0;
+
+ while ( r.squaredNorm()/r0_sqnorm > tol2 && i<maxIters )
+ {
+ Scalar rho_old = rho;
+
+ rho = r0.dot(r);
+ if (rho == Scalar(0)) return false; /* New search directions cannot be found */
+ Scalar beta = (rho/rho_old) * (alpha / w);
+ p = r + beta * (p - w * v);
+
+ y = precond.solve(p);
+
+ v.noalias() = mat * y;
+
+ alpha = rho / r0.dot(v);
+ s = r - alpha * v;
+
+ z = precond.solve(s);
+ t.noalias() = mat * z;
+
+ w = t.dot(s) / t.squaredNorm();
+ x += alpha * y + w * z;
+ r = s - w * t;
+ ++i;
+ }
+ tol_error = sqrt(r.squaredNorm()/r0_sqnorm);
+ iters = i;
+ return true;
+}
+
+}
+
+template< typename _MatrixType,
+ typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class BiCGSTAB;
+
+namespace internal {
+
+template< typename _MatrixType, typename _Preconditioner>
+struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
+{
+ typedef _MatrixType MatrixType;
+ typedef _Preconditioner Preconditioner;
+};
+
+}
+
+/** \ingroup IterativeLinearSolvers_Module
+ * \brief A bi conjugate gradient stabilized solver for sparse square problems
+ *
+ * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient
+ * stabilized algorithm. The vectors x and b can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+ *
+ * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+ * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+ * and NumTraits<Scalar>::epsilon() for the tolerance.
+ *
+ * This class can be used as the direct solver classes. Here is a typical usage example:
+ * \code
+ * int n = 10000;
+ * VectorXd x(n), b(n);
+ * SparseMatrix<double> A(n,n);
+ * // fill A and b
+ * BiCGSTAB<SparseMatrix<double> > solver;
+ * solver(A);
+ * x = solver.solve(b);
+ * std::cout << "#iterations: " << solver.iterations() << std::endl;
+ * std::cout << "estimated error: " << solver.error() << std::endl;
+ * // update b, and solve again
+ * x = solver.solve(b);
+ * \endcode
+ *
+ * By default the iterations start with x=0 as an initial guess of the solution.
+ * One can control the start using the solveWithGuess() method. Here is a step by
+ * step execution example starting with a random guess and printing the evolution
+ * of the estimated error:
+ * * \code
+ * x = VectorXd::Random(n);
+ * solver.setMaxIterations(1);
+ * int i = 0;
+ * do {
+ * x = solver.solveWithGuess(b,x);
+ * std::cout << i << " : " << solver.error() << std::endl;
+ * ++i;
+ * } while (solver.info()!=Success && i<100);
+ * \endcode
+ * Note that such a step by step excution is slightly slower.
+ *
+ * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+template< typename _MatrixType, typename _Preconditioner>
+class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> >
+{
+ typedef IterativeSolverBase<BiCGSTAB> Base;
+ using Base::mp_matrix;
+ using Base::m_error;
+ using Base::m_iterations;
+ using Base::m_info;
+ using Base::m_isInitialized;
+public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef _Preconditioner Preconditioner;
+
+public:
+
+ /** Default constructor. */
+ BiCGSTAB() : Base() {}
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ BiCGSTAB(const MatrixType& A) : Base(A) {}
+
+ ~BiCGSTAB() {}
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+ * \a x0 as an initial solution.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs,typename Guess>
+ inline const internal::solve_retval_with_guess<BiCGSTAB, Rhs, Guess>
+ solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+ {
+ eigen_assert(m_isInitialized && "BiCGSTAB is not initialized.");
+ eigen_assert(Base::rows()==b.rows()
+ && "BiCGSTAB::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval_with_guess
+ <BiCGSTAB, Rhs, Guess>(*this, b.derived(), x0);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solveWithGuess(const Rhs& b, Dest& x) const
+ {
+ bool failed = false;
+ for(int j=0; j<b.cols(); ++j)
+ {
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ typename Dest::ColXpr xj(x,j);
+ if(!internal::bicgstab(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))
+ failed = true;
+ }
+ m_info = failed ? NumericalIssue
+ : m_error <= Base::m_tolerance ? Success
+ : NoConvergence;
+ m_isInitialized = true;
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x.setZero();
+ _solveWithGuess(b,x);
+ }
+
+protected:
+
+};
+
+
+namespace internal {
+
+ template<typename _MatrixType, typename _Preconditioner, typename Rhs>
+struct solve_retval<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
+ : solve_retval_base<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
+{
+ typedef BiCGSTAB<_MatrixType, _Preconditioner> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_BICGSTAB_H
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/CMakeLists.txt b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/CMakeLists.txt
new file mode 100644
index 00000000000..59ccc0072cc
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_IterativeLinearSolvers_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_IterativeLinearSolvers_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/IterativeLinearSolvers COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
new file mode 100644
index 00000000000..edab2299e63
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -0,0 +1,266 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CONJUGATE_GRADIENT_H
+#define EIGEN_CONJUGATE_GRADIENT_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal Low-level conjugate gradient algorithm
+ * \param mat The matrix A
+ * \param rhs The right hand side vector b
+ * \param x On input and initial solution, on output the computed solution.
+ * \param precond A preconditioner being able to efficiently solve for an
+ * approximation of Ax=b (regardless of b)
+ * \param iters On input the max number of iteration, on output the number of performed iterations.
+ * \param tol_error On input the tolerance error, on output an estimation of the relative error.
+ */
+template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+EIGEN_DONT_INLINE
+void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
+ const Preconditioner& precond, int& iters,
+ typename Dest::RealScalar& tol_error)
+{
+ using std::sqrt;
+ using std::abs;
+ typedef typename Dest::RealScalar RealScalar;
+ typedef typename Dest::Scalar Scalar;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+
+ RealScalar tol = tol_error;
+ int maxIters = iters;
+
+ int n = mat.cols();
+
+ VectorType residual = rhs - mat * x; //initial residual
+ VectorType p(n);
+
+ p = precond.solve(residual); //initial search direction
+
+ VectorType z(n), tmp(n);
+ RealScalar absNew = internal::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
+ RealScalar rhsNorm2 = rhs.squaredNorm();
+ RealScalar residualNorm2 = 0;
+ RealScalar threshold = tol*tol*rhsNorm2;
+ int i = 0;
+ while(i < maxIters)
+ {
+ tmp.noalias() = mat * p; // the bottleneck of the algorithm
+
+ Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
+ x += alpha * p; // update solution
+ residual -= alpha * tmp; // update residue
+
+ residualNorm2 = residual.squaredNorm();
+ if(residualNorm2 < threshold)
+ break;
+
+ z = precond.solve(residual); // approximately solve for "A z = residual"
+
+ RealScalar absOld = absNew;
+ absNew = internal::real(residual.dot(z)); // update the absolute value of r
+ RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
+ p = z + beta * p; // update search direction
+ i++;
+ }
+ tol_error = sqrt(residualNorm2 / rhsNorm2);
+ iters = i;
+}
+
+}
+
+template< typename _MatrixType, int _UpLo=Lower,
+ typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class ConjugateGradient;
+
+namespace internal {
+
+template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
+{
+ typedef _MatrixType MatrixType;
+ typedef _Preconditioner Preconditioner;
+};
+
+}
+
+/** \ingroup IterativeLinearSolvers_Module
+ * \brief A conjugate gradient solver for sparse self-adjoint problems
+ *
+ * This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
+ * The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+ *
+ * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+ * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+ * and NumTraits<Scalar>::epsilon() for the tolerance.
+ *
+ * This class can be used as the direct solver classes. Here is a typical usage example:
+ * \code
+ * int n = 10000;
+ * VectorXd x(n), b(n);
+ * SparseMatrix<double> A(n,n);
+ * // fill A and b
+ * ConjugateGradient<SparseMatrix<double> > cg;
+ * cg.compute(A);
+ * x = cg.solve(b);
+ * std::cout << "#iterations: " << cg.iterations() << std::endl;
+ * std::cout << "estimated error: " << cg.error() << std::endl;
+ * // update b, and solve again
+ * x = cg.solve(b);
+ * \endcode
+ *
+ * By default the iterations start with x=0 as an initial guess of the solution.
+ * One can control the start using the solveWithGuess() method. Here is a step by
+ * step execution example starting with a random guess and printing the evolution
+ * of the estimated error:
+ * * \code
+ * x = VectorXd::Random(n);
+ * cg.setMaxIterations(1);
+ * int i = 0;
+ * do {
+ * x = cg.solveWithGuess(b,x);
+ * std::cout << i << " : " << cg.error() << std::endl;
+ * ++i;
+ * } while (cg.info()!=Success && i<100);
+ * \endcode
+ * Note that such a step by step excution is slightly slower.
+ *
+ * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
+{
+ typedef IterativeSolverBase<ConjugateGradient> Base;
+ using Base::mp_matrix;
+ using Base::m_error;
+ using Base::m_iterations;
+ using Base::m_info;
+ using Base::m_isInitialized;
+public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef _Preconditioner Preconditioner;
+
+ enum {
+ UpLo = _UpLo
+ };
+
+public:
+
+ /** Default constructor. */
+ ConjugateGradient() : Base() {}
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ ConjugateGradient(const MatrixType& A) : Base(A) {}
+
+ ~ConjugateGradient() {}
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+ * \a x0 as an initial solution.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs,typename Guess>
+ inline const internal::solve_retval_with_guess<ConjugateGradient, Rhs, Guess>
+ solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+ {
+ eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
+ eigen_assert(Base::rows()==b.rows()
+ && "ConjugateGradient::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval_with_guess
+ <ConjugateGradient, Rhs, Guess>(*this, b.derived(), x0);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solveWithGuess(const Rhs& b, Dest& x) const
+ {
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ for(int j=0; j<b.cols(); ++j)
+ {
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ typename Dest::ColXpr xj(x,j);
+ internal::conjugate_gradient(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
+ Base::m_preconditioner, m_iterations, m_error);
+ }
+
+ m_isInitialized = true;
+ m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x.setOnes();
+ _solveWithGuess(b,x);
+ }
+
+protected:
+
+};
+
+
+namespace internal {
+
+template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
+struct solve_retval<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+ : solve_retval_base<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+{
+ typedef ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CONJUGATE_GRADIENT_H
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
new file mode 100644
index 00000000000..32f152634f9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -0,0 +1,476 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_INCOMPLETE_LUT_H
+#define EIGEN_INCOMPLETE_LUT_H
+
+namespace Eigen {
+
+/**
+ * \brief Incomplete LU factorization with dual-threshold strategy
+ * During the numerical factorization, two dropping rules are used :
+ * 1) any element whose magnitude is less than some tolerance is dropped.
+ * This tolerance is obtained by multiplying the input tolerance @p droptol
+ * by the average magnitude of all the original elements in the current row.
+ * 2) After the elimination of the row, only the @p fill largest elements in
+ * the L part and the @p fill largest elements in the U part are kept
+ * (in addition to the diagonal element ). Note that @p fill is computed from
+ * the input parameter @p fillfactor which is used the ratio to control the fill_in
+ * relatively to the initial number of nonzero elements.
+ *
+ * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
+ * and when @p fill=n/2 with @p droptol being different to zero.
+ *
+ * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization,
+ * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
+ *
+ * NOTE : The following implementation is derived from the ILUT implementation
+ * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota
+ * released under the terms of the GNU LGPL;
+ * see http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README for more details.
+ */
+template <typename _Scalar>
+class IncompleteLUT : internal::noncopyable
+{
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef SparseMatrix<Scalar,RowMajor> FactorType;
+ typedef SparseMatrix<Scalar,ColMajor> PermutType;
+ typedef typename FactorType::Index Index;
+
+ public:
+ typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
+
+ IncompleteLUT()
+ : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
+ m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false)
+ {}
+
+ template<typename MatrixType>
+ IncompleteLUT(const MatrixType& mat, RealScalar droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
+ : m_droptol(droptol),m_fillfactor(fillfactor),
+ m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false)
+ {
+ eigen_assert(fillfactor != 0);
+ compute(mat);
+ }
+
+ Index rows() const { return m_lu.rows(); }
+
+ Index cols() const { return m_lu.cols(); }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
+ return m_info;
+ }
+
+ template<typename MatrixType>
+ void analyzePattern(const MatrixType& amat);
+
+ template<typename MatrixType>
+ void factorize(const MatrixType& amat);
+
+ /**
+ * Compute an incomplete LU factorization with dual threshold on the matrix mat
+ * No pivoting is done in this version
+ *
+ **/
+ template<typename MatrixType>
+ IncompleteLUT<Scalar>& compute(const MatrixType& amat)
+ {
+ analyzePattern(amat);
+ factorize(amat);
+ eigen_assert(m_factorizationIsOk == true);
+ m_isInitialized = true;
+ return *this;
+ }
+
+ void setDroptol(RealScalar droptol);
+ void setFillfactor(int fillfactor);
+
+ template<typename Rhs, typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x = m_Pinv * b;
+ x = m_lu.template triangularView<UnitLower>().solve(x);
+ x = m_lu.template triangularView<Upper>().solve(x);
+ x = m_P * x;
+ }
+
+ template<typename Rhs> inline const internal::solve_retval<IncompleteLUT, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
+ eigen_assert(cols()==b.rows()
+ && "IncompleteLUT::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<IncompleteLUT, Rhs>(*this, b.derived());
+ }
+
+protected:
+
+ template <typename VectorV, typename VectorI>
+ int QuickSplit(VectorV &row, VectorI &ind, int ncut);
+
+
+ /** keeps off-diagonal entries; drops diagonal entries */
+ struct keep_diag {
+ inline bool operator() (const Index& row, const Index& col, const Scalar&) const
+ {
+ return row!=col;
+ }
+ };
+
+protected:
+
+ FactorType m_lu;
+ RealScalar m_droptol;
+ int m_fillfactor;
+ bool m_analysisIsOk;
+ bool m_factorizationIsOk;
+ bool m_isInitialized;
+ ComputationInfo m_info;
+ PermutationMatrix<Dynamic,Dynamic,Index> m_P; // Fill-reducing permutation
+ PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv; // Inverse permutation
+};
+
+/**
+ * Set control parameter droptol
+ * \param droptol Drop any element whose magnitude is less than this tolerance
+ **/
+template<typename Scalar>
+void IncompleteLUT<Scalar>::setDroptol(RealScalar droptol)
+{
+ this->m_droptol = droptol;
+}
+
+/**
+ * Set control parameter fillfactor
+ * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row.
+ **/
+template<typename Scalar>
+void IncompleteLUT<Scalar>::setFillfactor(int fillfactor)
+{
+ this->m_fillfactor = fillfactor;
+}
+
+
+/**
+ * Compute a quick-sort split of a vector
+ * On output, the vector row is permuted such that its elements satisfy
+ * abs(row(i)) >= abs(row(ncut)) if i<ncut
+ * abs(row(i)) <= abs(row(ncut)) if i>ncut
+ * \param row The vector of values
+ * \param ind The array of index for the elements in @p row
+ * \param ncut The number of largest elements to keep
+ **/
+template <typename Scalar>
+template <typename VectorV, typename VectorI>
+int IncompleteLUT<Scalar>::QuickSplit(VectorV &row, VectorI &ind, int ncut)
+{
+ using std::swap;
+ int mid;
+ int n = row.size(); /* length of the vector */
+ int first, last ;
+
+ ncut--; /* to fit the zero-based indices */
+ first = 0;
+ last = n-1;
+ if (ncut < first || ncut > last ) return 0;
+
+ do {
+ mid = first;
+ RealScalar abskey = std::abs(row(mid));
+ for (int j = first + 1; j <= last; j++) {
+ if ( std::abs(row(j)) > abskey) {
+ ++mid;
+ swap(row(mid), row(j));
+ swap(ind(mid), ind(j));
+ }
+ }
+ /* Interchange for the pivot element */
+ swap(row(mid), row(first));
+ swap(ind(mid), ind(first));
+
+ if (mid > ncut) last = mid - 1;
+ else if (mid < ncut ) first = mid + 1;
+ } while (mid != ncut );
+
+ return 0; /* mid is equal to ncut */
+}
+
+template <typename Scalar>
+template<typename _MatrixType>
+void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
+{
+ // Compute the Fill-reducing permutation
+ SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
+ SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
+ // Symmetrize the pattern
+ // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
+ // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
+ SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
+ AtA.prune(keep_diag());
+ internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P); // Then compute the AMD ordering...
+
+ m_Pinv = m_P.inverse(); // ... and the inverse permutation
+
+ m_analysisIsOk = true;
+}
+
+template <typename Scalar>
+template<typename _MatrixType>
+void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
+{
+ using std::sqrt;
+ using std::swap;
+ using std::abs;
+
+ eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
+ int n = amat.cols(); // Size of the matrix
+ m_lu.resize(n,n);
+ // Declare Working vectors and variables
+ Vector u(n) ; // real values of the row -- maximum size is n --
+ VectorXi ju(n); // column position of the values in u -- maximum size is n
+ VectorXi jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1
+
+ // Apply the fill-reducing permutation
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ SparseMatrix<Scalar,RowMajor, Index> mat;
+ mat = amat.twistedBy(m_Pinv);
+
+ // Initialization
+ jr.fill(-1);
+ ju.fill(0);
+ u.fill(0);
+
+ // number of largest elements to keep in each row:
+ int fill_in = static_cast<int> (amat.nonZeros()*m_fillfactor)/n+1;
+ if (fill_in > n) fill_in = n;
+
+ // number of largest nonzero elements to keep in the L and the U part of the current row:
+ int nnzL = fill_in/2;
+ int nnzU = nnzL;
+ m_lu.reserve(n * (nnzL + nnzU + 1));
+
+ // global loop over the rows of the sparse matrix
+ for (int ii = 0; ii < n; ii++)
+ {
+ // 1 - copy the lower and the upper part of the row i of mat in the working vector u
+
+ int sizeu = 1; // number of nonzero elements in the upper part of the current row
+ int sizel = 0; // number of nonzero elements in the lower part of the current row
+ ju(ii) = ii;
+ u(ii) = 0;
+ jr(ii) = ii;
+ RealScalar rownorm = 0;
+
+ typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
+ for (; j_it; ++j_it)
+ {
+ int k = j_it.index();
+ if (k < ii)
+ {
+ // copy the lower part
+ ju(sizel) = k;
+ u(sizel) = j_it.value();
+ jr(k) = sizel;
+ ++sizel;
+ }
+ else if (k == ii)
+ {
+ u(ii) = j_it.value();
+ }
+ else
+ {
+ // copy the upper part
+ int jpos = ii + sizeu;
+ ju(jpos) = k;
+ u(jpos) = j_it.value();
+ jr(k) = jpos;
+ ++sizeu;
+ }
+ rownorm += internal::abs2(j_it.value());
+ }
+
+ // 2 - detect possible zero row
+ if(rownorm==0)
+ {
+ m_info = NumericalIssue;
+ return;
+ }
+ // Take the 2-norm of the current row as a relative tolerance
+ rownorm = sqrt(rownorm);
+
+ // 3 - eliminate the previous nonzero rows
+ int jj = 0;
+ int len = 0;
+ while (jj < sizel)
+ {
+ // In order to eliminate in the correct order,
+ // we must select first the smallest column index among ju(jj:sizel)
+ int k;
+ int minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
+ k += jj;
+ if (minrow != ju(jj))
+ {
+ // swap the two locations
+ int j = ju(jj);
+ swap(ju(jj), ju(k));
+ jr(minrow) = jj; jr(j) = k;
+ swap(u(jj), u(k));
+ }
+ // Reset this location
+ jr(minrow) = -1;
+
+ // Start elimination
+ typename FactorType::InnerIterator ki_it(m_lu, minrow);
+ while (ki_it && ki_it.index() < minrow) ++ki_it;
+ eigen_internal_assert(ki_it && ki_it.col()==minrow);
+ Scalar fact = u(jj) / ki_it.value();
+
+ // drop too small elements
+ if(abs(fact) <= m_droptol)
+ {
+ jj++;
+ continue;
+ }
+
+ // linear combination of the current row ii and the row minrow
+ ++ki_it;
+ for (; ki_it; ++ki_it)
+ {
+ Scalar prod = fact * ki_it.value();
+ int j = ki_it.index();
+ int jpos = jr(j);
+ if (jpos == -1) // fill-in element
+ {
+ int newpos;
+ if (j >= ii) // dealing with the upper part
+ {
+ newpos = ii + sizeu;
+ sizeu++;
+ eigen_internal_assert(sizeu<=n);
+ }
+ else // dealing with the lower part
+ {
+ newpos = sizel;
+ sizel++;
+ eigen_internal_assert(sizel<=ii);
+ }
+ ju(newpos) = j;
+ u(newpos) = -prod;
+ jr(j) = newpos;
+ }
+ else
+ u(jpos) -= prod;
+ }
+ // store the pivot element
+ u(len) = fact;
+ ju(len) = minrow;
+ ++len;
+
+ jj++;
+ } // end of the elimination on the row ii
+
+ // reset the upper part of the pointer jr to zero
+ for(int k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
+
+ // 4 - partially sort and insert the elements in the m_lu matrix
+
+ // sort the L-part of the row
+ sizel = len;
+ len = (std::min)(sizel, nnzL);
+ typename Vector::SegmentReturnType ul(u.segment(0, sizel));
+ typename VectorXi::SegmentReturnType jul(ju.segment(0, sizel));
+ QuickSplit(ul, jul, len);
+
+ // store the largest m_fill elements of the L part
+ m_lu.startVec(ii);
+ for(int k = 0; k < len; k++)
+ m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
+
+ // store the diagonal element
+ // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)
+ if (u(ii) == Scalar(0))
+ u(ii) = sqrt(m_droptol) * rownorm;
+ m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);
+
+ // sort the U-part of the row
+ // apply the dropping rule first
+ len = 0;
+ for(int k = 1; k < sizeu; k++)
+ {
+ if(abs(u(ii+k)) > m_droptol * rownorm )
+ {
+ ++len;
+ u(ii + len) = u(ii + k);
+ ju(ii + len) = ju(ii + k);
+ }
+ }
+ sizeu = len + 1; // +1 to take into account the diagonal element
+ len = (std::min)(sizeu, nnzU);
+ typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
+ typename VectorXi::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
+ QuickSplit(uu, juu, len);
+
+ // store the largest elements of the U part
+ for(int k = ii + 1; k < ii + len; k++)
+ m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
+ }
+
+ m_lu.finalize();
+ m_lu.makeCompressed();
+
+ m_factorizationIsOk = true;
+ m_info = Success;
+}
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<IncompleteLUT<_MatrixType>, Rhs>
+ : solve_retval_base<IncompleteLUT<_MatrixType>, Rhs>
+{
+ typedef IncompleteLUT<_MatrixType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_INCOMPLETE_LUT_H
+
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
new file mode 100644
index 00000000000..b27ad82ec49
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
@@ -0,0 +1,269 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H
+#define EIGEN_ITERATIVE_SOLVER_BASE_H
+
+namespace Eigen {
+
+/** \ingroup IterativeLinearSolvers_Module
+ * \brief Base class for linear iterative solvers
+ *
+ * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+template< typename Derived>
+class IterativeSolverBase : internal::noncopyable
+{
+public:
+ typedef typename internal::traits<Derived>::MatrixType MatrixType;
+ typedef typename internal::traits<Derived>::Preconditioner Preconditioner;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+
+public:
+
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+ /** Default constructor. */
+ IterativeSolverBase()
+ : mp_matrix(0)
+ {
+ init();
+ }
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ IterativeSolverBase(const MatrixType& A)
+ {
+ init();
+ compute(A);
+ }
+
+ ~IterativeSolverBase() {}
+
+ /** Initializes the iterative solver for the sparcity pattern of the matrix \a A for further solving \c Ax=b problems.
+ *
+ * Currently, this function mostly call analyzePattern on the preconditioner. In the future
+ * we might, for instance, implement column reodering for faster matrix vector products.
+ */
+ Derived& analyzePattern(const MatrixType& A)
+ {
+ m_preconditioner.analyzePattern(A);
+ m_isInitialized = true;
+ m_analysisIsOk = true;
+ m_info = Success;
+ return derived();
+ }
+
+ /** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems.
+ *
+ * Currently, this function mostly call factorize on the preconditioner.
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ Derived& factorize(const MatrixType& A)
+ {
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ mp_matrix = &A;
+ m_preconditioner.factorize(A);
+ m_factorizationIsOk = true;
+ m_info = Success;
+ return derived();
+ }
+
+ /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems.
+ *
+ * Currently, this function mostly initialized/compute the preconditioner. In the future
+ * we might, for instance, implement column reodering for faster matrix vector products.
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ Derived& compute(const MatrixType& A)
+ {
+ mp_matrix = &A;
+ m_preconditioner.compute(A);
+ m_isInitialized = true;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = true;
+ m_info = Success;
+ return derived();
+ }
+
+ /** \internal */
+ Index rows() const { return mp_matrix ? mp_matrix->rows() : 0; }
+ /** \internal */
+ Index cols() const { return mp_matrix ? mp_matrix->cols() : 0; }
+
+ /** \returns the tolerance threshold used by the stopping criteria */
+ RealScalar tolerance() const { return m_tolerance; }
+
+ /** Sets the tolerance threshold used by the stopping criteria */
+ Derived& setTolerance(RealScalar tolerance)
+ {
+ m_tolerance = tolerance;
+ return derived();
+ }
+
+ /** \returns a read-write reference to the preconditioner for custom configuration. */
+ Preconditioner& preconditioner() { return m_preconditioner; }
+
+ /** \returns a read-only reference to the preconditioner. */
+ const Preconditioner& preconditioner() const { return m_preconditioner; }
+
+ /** \returns the max number of iterations */
+ int maxIterations() const
+ {
+ return (mp_matrix && m_maxIterations<0) ? mp_matrix->cols() : m_maxIterations;
+ }
+
+ /** Sets the max number of iterations */
+ Derived& setMaxIterations(int maxIters)
+ {
+ m_maxIterations = maxIters;
+ return derived();
+ }
+
+ /** \returns the number of iterations performed during the last solve */
+ int iterations() const
+ {
+ eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
+ return m_iterations;
+ }
+
+ /** \returns the tolerance error reached during the last solve */
+ RealScalar error() const
+ {
+ eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
+ return m_error;
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs> inline const internal::solve_retval<Derived, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<Derived, Rhs>(derived(), b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<IterativeSolverBase, Rhs>
+ solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::sparse_solve_retval<IterativeSolverBase, Rhs>(*this, b.derived());
+ }
+
+ /** \returns Success if the iterations converged, and NoConvergence otherwise. */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
+ return m_info;
+ }
+
+ /** \internal */
+ template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
+ void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+ {
+ eigen_assert(rows()==b.rows());
+
+ int rhsCols = b.cols();
+ int size = b.rows();
+ Eigen::Matrix<DestScalar,Dynamic,1> tb(size);
+ Eigen::Matrix<DestScalar,Dynamic,1> tx(size);
+ for(int k=0; k<rhsCols; ++k)
+ {
+ tb = b.col(k);
+ tx = derived().solve(tb);
+ dest.col(k) = tx.sparseView(0);
+ }
+ }
+
+protected:
+ void init()
+ {
+ m_isInitialized = false;
+ m_analysisIsOk = false;
+ m_factorizationIsOk = false;
+ m_maxIterations = -1;
+ m_tolerance = NumTraits<Scalar>::epsilon();
+ }
+ const MatrixType* mp_matrix;
+ Preconditioner m_preconditioner;
+
+ int m_maxIterations;
+ RealScalar m_tolerance;
+
+ mutable RealScalar m_error;
+ mutable int m_iterations;
+ mutable ComputationInfo m_info;
+ mutable bool m_isInitialized, m_analysisIsOk, m_factorizationIsOk;
+};
+
+namespace internal {
+
+template<typename Derived, typename Rhs>
+struct sparse_solve_retval<IterativeSolverBase<Derived>, Rhs>
+ : sparse_solve_retval_base<IterativeSolverBase<Derived>, Rhs>
+{
+ typedef IterativeSolverBase<Derived> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve_sparse(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_ITERATIVE_SOLVER_BASE_H
diff --git a/extern/Eigen3/Eigen/src/Jacobi/CMakeLists.txt b/extern/Eigen3/Eigen/src/Jacobi/CMakeLists.txt
new file mode 100644
index 00000000000..490dac62643
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Jacobi/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Jacobi_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Jacobi_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Jacobi COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/LU/CMakeLists.txt b/extern/Eigen3/Eigen/src/LU/CMakeLists.txt
new file mode 100644
index 00000000000..e0d8d78c172
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/LU/CMakeLists.txt
@@ -0,0 +1,8 @@
+FILE(GLOB Eigen_LU_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_LU_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/LU COMPONENT Devel
+ )
+
+ADD_SUBDIRECTORY(arch)
diff --git a/extern/Eigen3/Eigen/src/LU/PartialPivLU_MKL.h b/extern/Eigen3/Eigen/src/LU/PartialPivLU_MKL.h
new file mode 100644
index 00000000000..9035953c82f
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/LU/PartialPivLU_MKL.h
@@ -0,0 +1,85 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * LU decomposition with partial pivoting based on LAPACKE_?getrf function.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_PARTIALLU_LAPACK_H
+#define EIGEN_PARTIALLU_LAPACK_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_LU_PARTPIV(EIGTYPE, MKLTYPE, MKLPREFIX) \
+template<int StorageOrder> \
+struct partial_lu_impl<EIGTYPE, StorageOrder, lapack_int> \
+{ \
+ /* \internal performs the LU decomposition in-place of the matrix represented */ \
+ static lapack_int blocked_lu(lapack_int rows, lapack_int cols, EIGTYPE* lu_data, lapack_int luStride, lapack_int* row_transpositions, lapack_int& nb_transpositions, lapack_int maxBlockSize=256) \
+ { \
+ EIGEN_UNUSED_VARIABLE(maxBlockSize);\
+ lapack_int matrix_order, first_zero_pivot; \
+ lapack_int m, n, lda, *ipiv, info; \
+ EIGTYPE* a; \
+/* Set up parameters for ?getrf */ \
+ matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
+ lda = luStride; \
+ a = lu_data; \
+ ipiv = row_transpositions; \
+ m = rows; \
+ n = cols; \
+ nb_transpositions = 0; \
+\
+ info = LAPACKE_##MKLPREFIX##getrf( matrix_order, m, n, (MKLTYPE*)a, lda, ipiv ); \
+\
+ for(int i=0;i<m;i++) { ipiv[i]--; if (ipiv[i]!=i) nb_transpositions++; } \
+\
+ eigen_assert(info >= 0); \
+/* something should be done with nb_transpositions */ \
+\
+ first_zero_pivot = info; \
+ return first_zero_pivot; \
+ } \
+};
+
+EIGEN_MKL_LU_PARTPIV(double, double, d)
+EIGEN_MKL_LU_PARTPIV(float, float, s)
+EIGEN_MKL_LU_PARTPIV(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_LU_PARTPIV(scomplex, MKL_Complex8, c)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARTIALLU_LAPACK_H
diff --git a/extern/Eigen3/Eigen/src/LU/arch/CMakeLists.txt b/extern/Eigen3/Eigen/src/LU/arch/CMakeLists.txt
new file mode 100644
index 00000000000..f6b7ed9ecc3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/LU/arch/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_LU_arch_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_LU_arch_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/LU/arch COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h b/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h
new file mode 100644
index 00000000000..df4e01d6cb0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h
@@ -0,0 +1,451 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+/*
+
+NOTE: this routine has been adapted from the CSparse library:
+
+Copyright (c) 2006, Timothy A. Davis.
+http://www.cise.ufl.edu/research/sparse/CSparse
+
+CSparse is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+CSparse is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this Module; if not, write to the Free Software
+Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+*/
+
+#ifndef EIGEN_SPARSE_AMD_H
+#define EIGEN_SPARSE_AMD_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename T> inline T amd_flip(const T& i) { return -i-2; }
+template<typename T> inline T amd_unflip(const T& i) { return i<0 ? amd_flip(i) : i; }
+template<typename T0, typename T1> inline bool amd_marked(const T0* w, const T1& j) { return w[j]<0; }
+template<typename T0, typename T1> inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); }
+
+/* clear w */
+template<typename Index>
+static int cs_wclear (Index mark, Index lemax, Index *w, Index n)
+{
+ Index k;
+ if(mark < 2 || (mark + lemax < 0))
+ {
+ for(k = 0; k < n; k++)
+ if(w[k] != 0)
+ w[k] = 1;
+ mark = 2;
+ }
+ return (mark); /* at this point, w[0..n-1] < mark holds */
+}
+
+/* depth-first search and postorder of a tree rooted at node j */
+template<typename Index>
+Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Index *stack)
+{
+ int i, p, top = 0;
+ if(!head || !next || !post || !stack) return (-1); /* check inputs */
+ stack[0] = j; /* place j on the stack */
+ while (top >= 0) /* while (stack is not empty) */
+ {
+ p = stack[top]; /* p = top of stack */
+ i = head[p]; /* i = youngest child of p */
+ if(i == -1)
+ {
+ top--; /* p has no unordered children left */
+ post[k++] = p; /* node p is the kth postordered node */
+ }
+ else
+ {
+ head[p] = next[i]; /* remove i from children of p */
+ stack[++top] = i; /* start dfs on child node i */
+ }
+ }
+ return k;
+}
+
+
+/** \internal
+ * Approximate minimum degree ordering algorithm.
+ * \returns the permutation P reducing the fill-in of the input matrix \a C
+ * The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional.
+ * On exit the values of C are destroyed */
+template<typename Scalar, typename Index>
+void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, PermutationMatrix<Dynamic,Dynamic,Index>& perm)
+{
+ typedef SparseMatrix<Scalar,ColMajor,Index> CCS;
+
+ int d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
+ k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
+ ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t;
+ unsigned int h;
+
+ Index n = C.cols();
+ dense = std::max<Index> (16, 10 * sqrt ((double) n)); /* find dense threshold */
+ dense = std::min<Index> (n-2, dense);
+
+ Index cnz = C.nonZeros();
+ perm.resize(n+1);
+ t = cnz + cnz/5 + 2*n; /* add elbow room to C */
+ C.resizeNonZeros(t);
+
+ Index* W = new Index[8*(n+1)]; /* get workspace */
+ Index* len = W;
+ Index* nv = W + (n+1);
+ Index* next = W + 2*(n+1);
+ Index* head = W + 3*(n+1);
+ Index* elen = W + 4*(n+1);
+ Index* degree = W + 5*(n+1);
+ Index* w = W + 6*(n+1);
+ Index* hhead = W + 7*(n+1);
+ Index* last = perm.indices().data(); /* use P as workspace for last */
+
+ /* --- Initialize quotient graph ---------------------------------------- */
+ Index* Cp = C.outerIndexPtr();
+ Index* Ci = C.innerIndexPtr();
+ for(k = 0; k < n; k++)
+ len[k] = Cp[k+1] - Cp[k];
+ len[n] = 0;
+ nzmax = t;
+
+ for(i = 0; i <= n; i++)
+ {
+ head[i] = -1; // degree list i is empty
+ last[i] = -1;
+ next[i] = -1;
+ hhead[i] = -1; // hash list i is empty
+ nv[i] = 1; // node i is just one node
+ w[i] = 1; // node i is alive
+ elen[i] = 0; // Ek of node i is empty
+ degree[i] = len[i]; // degree of node i
+ }
+ mark = internal::cs_wclear<Index>(0, 0, w, n); /* clear w */
+ elen[n] = -2; /* n is a dead element */
+ Cp[n] = -1; /* n is a root of assembly tree */
+ w[n] = 0; /* n is a dead element */
+
+ /* --- Initialize degree lists ------------------------------------------ */
+ for(i = 0; i < n; i++)
+ {
+ d = degree[i];
+ if(d == 0) /* node i is empty */
+ {
+ elen[i] = -2; /* element i is dead */
+ nel++;
+ Cp[i] = -1; /* i is a root of assembly tree */
+ w[i] = 0;
+ }
+ else if(d > dense) /* node i is dense */
+ {
+ nv[i] = 0; /* absorb i into element n */
+ elen[i] = -1; /* node i is dead */
+ nel++;
+ Cp[i] = amd_flip (n);
+ nv[n]++;
+ }
+ else
+ {
+ if(head[d] != -1) last[head[d]] = i;
+ next[i] = head[d]; /* put node i in degree list d */
+ head[d] = i;
+ }
+ }
+
+ while (nel < n) /* while (selecting pivots) do */
+ {
+ /* --- Select node of minimum approximate degree -------------------- */
+ for(k = -1; mindeg < n && (k = head[mindeg]) == -1; mindeg++) {}
+ if(next[k] != -1) last[next[k]] = -1;
+ head[mindeg] = next[k]; /* remove k from degree list */
+ elenk = elen[k]; /* elenk = |Ek| */
+ nvk = nv[k]; /* # of nodes k represents */
+ nel += nvk; /* nv[k] nodes of A eliminated */
+
+ /* --- Garbage collection ------------------------------------------- */
+ if(elenk > 0 && cnz + mindeg >= nzmax)
+ {
+ for(j = 0; j < n; j++)
+ {
+ if((p = Cp[j]) >= 0) /* j is a live node or element */
+ {
+ Cp[j] = Ci[p]; /* save first entry of object */
+ Ci[p] = amd_flip (j); /* first entry is now amd_flip(j) */
+ }
+ }
+ for(q = 0, p = 0; p < cnz; ) /* scan all of memory */
+ {
+ if((j = amd_flip (Ci[p++])) >= 0) /* found object j */
+ {
+ Ci[q] = Cp[j]; /* restore first entry of object */
+ Cp[j] = q++; /* new pointer to object j */
+ for(k3 = 0; k3 < len[j]-1; k3++) Ci[q++] = Ci[p++];
+ }
+ }
+ cnz = q; /* Ci[cnz...nzmax-1] now free */
+ }
+
+ /* --- Construct new element ---------------------------------------- */
+ dk = 0;
+ nv[k] = -nvk; /* flag k as in Lk */
+ p = Cp[k];
+ pk1 = (elenk == 0) ? p : cnz; /* do in place if elen[k] == 0 */
+ pk2 = pk1;
+ for(k1 = 1; k1 <= elenk + 1; k1++)
+ {
+ if(k1 > elenk)
+ {
+ e = k; /* search the nodes in k */
+ pj = p; /* list of nodes starts at Ci[pj]*/
+ ln = len[k] - elenk; /* length of list of nodes in k */
+ }
+ else
+ {
+ e = Ci[p++]; /* search the nodes in e */
+ pj = Cp[e];
+ ln = len[e]; /* length of list of nodes in e */
+ }
+ for(k2 = 1; k2 <= ln; k2++)
+ {
+ i = Ci[pj++];
+ if((nvi = nv[i]) <= 0) continue; /* node i dead, or seen */
+ dk += nvi; /* degree[Lk] += size of node i */
+ nv[i] = -nvi; /* negate nv[i] to denote i in Lk*/
+ Ci[pk2++] = i; /* place i in Lk */
+ if(next[i] != -1) last[next[i]] = last[i];
+ if(last[i] != -1) /* remove i from degree list */
+ {
+ next[last[i]] = next[i];
+ }
+ else
+ {
+ head[degree[i]] = next[i];
+ }
+ }
+ if(e != k)
+ {
+ Cp[e] = amd_flip (k); /* absorb e into k */
+ w[e] = 0; /* e is now a dead element */
+ }
+ }
+ if(elenk != 0) cnz = pk2; /* Ci[cnz...nzmax] is free */
+ degree[k] = dk; /* external degree of k - |Lk\i| */
+ Cp[k] = pk1; /* element k is in Ci[pk1..pk2-1] */
+ len[k] = pk2 - pk1;
+ elen[k] = -2; /* k is now an element */
+
+ /* --- Find set differences ----------------------------------------- */
+ mark = internal::cs_wclear<Index>(mark, lemax, w, n); /* clear w if necessary */
+ for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */
+ {
+ i = Ci[pk];
+ if((eln = elen[i]) <= 0) continue;/* skip if elen[i] empty */
+ nvi = -nv[i]; /* nv[i] was negated */
+ wnvi = mark - nvi;
+ for(p = Cp[i]; p <= Cp[i] + eln - 1; p++) /* scan Ei */
+ {
+ e = Ci[p];
+ if(w[e] >= mark)
+ {
+ w[e] -= nvi; /* decrement |Le\Lk| */
+ }
+ else if(w[e] != 0) /* ensure e is a live element */
+ {
+ w[e] = degree[e] + wnvi; /* 1st time e seen in scan 1 */
+ }
+ }
+ }
+
+ /* --- Degree update ------------------------------------------------ */
+ for(pk = pk1; pk < pk2; pk++) /* scan2: degree update */
+ {
+ i = Ci[pk]; /* consider node i in Lk */
+ p1 = Cp[i];
+ p2 = p1 + elen[i] - 1;
+ pn = p1;
+ for(h = 0, d = 0, p = p1; p <= p2; p++) /* scan Ei */
+ {
+ e = Ci[p];
+ if(w[e] != 0) /* e is an unabsorbed element */
+ {
+ dext = w[e] - mark; /* dext = |Le\Lk| */
+ if(dext > 0)
+ {
+ d += dext; /* sum up the set differences */
+ Ci[pn++] = e; /* keep e in Ei */
+ h += e; /* compute the hash of node i */
+ }
+ else
+ {
+ Cp[e] = amd_flip (k); /* aggressive absorb. e->k */
+ w[e] = 0; /* e is a dead element */
+ }
+ }
+ }
+ elen[i] = pn - p1 + 1; /* elen[i] = |Ei| */
+ p3 = pn;
+ p4 = p1 + len[i];
+ for(p = p2 + 1; p < p4; p++) /* prune edges in Ai */
+ {
+ j = Ci[p];
+ if((nvj = nv[j]) <= 0) continue; /* node j dead or in Lk */
+ d += nvj; /* degree(i) += |j| */
+ Ci[pn++] = j; /* place j in node list of i */
+ h += j; /* compute hash for node i */
+ }
+ if(d == 0) /* check for mass elimination */
+ {
+ Cp[i] = amd_flip (k); /* absorb i into k */
+ nvi = -nv[i];
+ dk -= nvi; /* |Lk| -= |i| */
+ nvk += nvi; /* |k| += nv[i] */
+ nel += nvi;
+ nv[i] = 0;
+ elen[i] = -1; /* node i is dead */
+ }
+ else
+ {
+ degree[i] = std::min<Index> (degree[i], d); /* update degree(i) */
+ Ci[pn] = Ci[p3]; /* move first node to end */
+ Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */
+ Ci[p1] = k; /* add k as 1st element in of Ei */
+ len[i] = pn - p1 + 1; /* new len of adj. list of node i */
+ h %= n; /* finalize hash of i */
+ next[i] = hhead[h]; /* place i in hash bucket */
+ hhead[h] = i;
+ last[i] = h; /* save hash of i in last[i] */
+ }
+ } /* scan2 is done */
+ degree[k] = dk; /* finalize |Lk| */
+ lemax = std::max<Index>(lemax, dk);
+ mark = internal::cs_wclear<Index>(mark+lemax, lemax, w, n); /* clear w */
+
+ /* --- Supernode detection ------------------------------------------ */
+ for(pk = pk1; pk < pk2; pk++)
+ {
+ i = Ci[pk];
+ if(nv[i] >= 0) continue; /* skip if i is dead */
+ h = last[i]; /* scan hash bucket of node i */
+ i = hhead[h];
+ hhead[h] = -1; /* hash bucket will be empty */
+ for(; i != -1 && next[i] != -1; i = next[i], mark++)
+ {
+ ln = len[i];
+ eln = elen[i];
+ for(p = Cp[i]+1; p <= Cp[i] + ln-1; p++) w[Ci[p]] = mark;
+ jlast = i;
+ for(j = next[i]; j != -1; ) /* compare i with all j */
+ {
+ ok = (len[j] == ln) && (elen[j] == eln);
+ for(p = Cp[j] + 1; ok && p <= Cp[j] + ln - 1; p++)
+ {
+ if(w[Ci[p]] != mark) ok = 0; /* compare i and j*/
+ }
+ if(ok) /* i and j are identical */
+ {
+ Cp[j] = amd_flip (i); /* absorb j into i */
+ nv[i] += nv[j];
+ nv[j] = 0;
+ elen[j] = -1; /* node j is dead */
+ j = next[j]; /* delete j from hash bucket */
+ next[jlast] = j;
+ }
+ else
+ {
+ jlast = j; /* j and i are different */
+ j = next[j];
+ }
+ }
+ }
+ }
+
+ /* --- Finalize new element------------------------------------------ */
+ for(p = pk1, pk = pk1; pk < pk2; pk++) /* finalize Lk */
+ {
+ i = Ci[pk];
+ if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */
+ nv[i] = nvi; /* restore nv[i] */
+ d = degree[i] + dk - nvi; /* compute external degree(i) */
+ d = std::min<Index> (d, n - nel - nvi);
+ if(head[d] != -1) last[head[d]] = i;
+ next[i] = head[d]; /* put i back in degree list */
+ last[i] = -1;
+ head[d] = i;
+ mindeg = std::min<Index> (mindeg, d); /* find new minimum degree */
+ degree[i] = d;
+ Ci[p++] = i; /* place i in Lk */
+ }
+ nv[k] = nvk; /* # nodes absorbed into k */
+ if((len[k] = p-pk1) == 0) /* length of adj list of element k*/
+ {
+ Cp[k] = -1; /* k is a root of the tree */
+ w[k] = 0; /* k is now a dead element */
+ }
+ if(elenk != 0) cnz = p; /* free unused space in Lk */
+ }
+
+ /* --- Postordering ----------------------------------------------------- */
+ for(i = 0; i < n; i++) Cp[i] = amd_flip (Cp[i]);/* fix assembly tree */
+ for(j = 0; j <= n; j++) head[j] = -1;
+ for(j = n; j >= 0; j--) /* place unordered nodes in lists */
+ {
+ if(nv[j] > 0) continue; /* skip if j is an element */
+ next[j] = head[Cp[j]]; /* place j in list of its parent */
+ head[Cp[j]] = j;
+ }
+ for(e = n; e >= 0; e--) /* place elements in lists */
+ {
+ if(nv[e] <= 0) continue; /* skip unless e is an element */
+ if(Cp[e] != -1)
+ {
+ next[e] = head[Cp[e]]; /* place e in list of its parent */
+ head[Cp[e]] = e;
+ }
+ }
+ for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */
+ {
+ if(Cp[i] == -1) k = internal::cs_tdfs<Index>(i, k, head, next, perm.indices().data(), w);
+ }
+
+ perm.indices().conservativeResize(n);
+
+ delete[] W;
+}
+
+} // namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_AMD_H
diff --git a/extern/Eigen3/Eigen/src/OrderingMethods/CMakeLists.txt b/extern/Eigen3/Eigen/src/OrderingMethods/CMakeLists.txt
new file mode 100644
index 00000000000..9f4bb275808
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/OrderingMethods/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_OrderingMethods_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_OrderingMethods_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/OrderingMethods COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/PaStiXSupport/CMakeLists.txt b/extern/Eigen3/Eigen/src/PaStiXSupport/CMakeLists.txt
new file mode 100644
index 00000000000..28c657e9bcf
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/PaStiXSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_PastixSupport_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_PastixSupport_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/PaStiXSupport COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h b/extern/Eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h
new file mode 100644
index 00000000000..f42826208e3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h
@@ -0,0 +1,803 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PASTIXSUPPORT_H
+#define EIGEN_PASTIXSUPPORT_H
+
+namespace Eigen {
+
+/** \ingroup PaStiXSupport_Module
+ * \brief Interface to the PaStix solver
+ *
+ * This class is used to solve the linear systems A.X = B via the PaStix library.
+ * The matrix can be either real or complex, symmetric or not.
+ *
+ * \sa TutorialSparseDirectSolvers
+ */
+
+template<typename _MatrixType, bool IsStrSym = false> class PastixLU;
+template<typename _MatrixType, int Options> class PastixLLT;
+template<typename _MatrixType, int Options> class PastixLDLT;
+
+namespace internal
+{
+
+ template<class Pastix> struct pastix_traits;
+
+ template<typename _MatrixType>
+ struct pastix_traits< PastixLU<_MatrixType> >
+ {
+ typedef _MatrixType MatrixType;
+ typedef typename _MatrixType::Scalar Scalar;
+ typedef typename _MatrixType::RealScalar RealScalar;
+ typedef typename _MatrixType::Index Index;
+ };
+
+ template<typename _MatrixType, int Options>
+ struct pastix_traits< PastixLLT<_MatrixType,Options> >
+ {
+ typedef _MatrixType MatrixType;
+ typedef typename _MatrixType::Scalar Scalar;
+ typedef typename _MatrixType::RealScalar RealScalar;
+ typedef typename _MatrixType::Index Index;
+ };
+
+ template<typename _MatrixType, int Options>
+ struct pastix_traits< PastixLDLT<_MatrixType,Options> >
+ {
+ typedef _MatrixType MatrixType;
+ typedef typename _MatrixType::Scalar Scalar;
+ typedef typename _MatrixType::RealScalar RealScalar;
+ typedef typename _MatrixType::Index Index;
+ };
+
+ void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)
+ {
+ if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
+ if (nbrhs == 0) x = NULL;
+ s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
+ }
+
+ void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm)
+ {
+ if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
+ if (nbrhs == 0) x = NULL;
+ d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
+ }
+
+ void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<float> *vals, int *perm, int * invp, std::complex<float> *x, int nbrhs, int *iparm, double *dparm)
+ {
+ c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<COMPLEX*>(vals), perm, invp, reinterpret_cast<COMPLEX*>(x), nbrhs, iparm, dparm);
+ }
+
+ void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)
+ {
+ if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
+ if (nbrhs == 0) x = NULL;
+ z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<DCOMPLEX*>(vals), perm, invp, reinterpret_cast<DCOMPLEX*>(x), nbrhs, iparm, dparm);
+ }
+
+ // Convert the matrix to Fortran-style Numbering
+ template <typename MatrixType>
+ void EigenToFortranNumbering (MatrixType& mat)
+ {
+ if ( !(mat.outerIndexPtr()[0]) )
+ {
+ int i;
+ for(i = 0; i <= mat.rows(); ++i)
+ ++mat.outerIndexPtr()[i];
+ for(i = 0; i < mat.nonZeros(); ++i)
+ ++mat.innerIndexPtr()[i];
+ }
+ }
+
+ // Convert to C-style Numbering
+ template <typename MatrixType>
+ void EigenToCNumbering (MatrixType& mat)
+ {
+ // Check the Numbering
+ if ( mat.outerIndexPtr()[0] == 1 )
+ { // Convert to C-style numbering
+ int i;
+ for(i = 0; i <= mat.rows(); ++i)
+ --mat.outerIndexPtr()[i];
+ for(i = 0; i < mat.nonZeros(); ++i)
+ --mat.innerIndexPtr()[i];
+ }
+ }
+
+ // Symmetrize the graph of the input matrix
+ // In : The Input matrix to symmetrize the pattern
+ // Out : The output matrix
+ // StrMatTrans : The structural pattern of the transpose of In; It is
+ // used to optimize the future symmetrization with the same matrix pattern
+ // WARNING It is assumed here that successive calls to this routine are done
+ // with matrices having the same pattern.
+ template <typename MatrixType>
+ void EigenSymmetrizeMatrixGraph (const MatrixType& In, MatrixType& Out, MatrixType& StrMatTrans, bool& hasTranspose)
+ {
+ eigen_assert(In.cols()==In.rows() && " Can only symmetrize the graph of a square matrix");
+ if (!hasTranspose)
+ { //First call to this routine, need to compute the structural pattern of In^T
+ StrMatTrans = In.transpose();
+ // Set the elements of the matrix to zero
+ for (int i = 0; i < StrMatTrans.rows(); i++)
+ {
+ for (typename MatrixType::InnerIterator it(StrMatTrans, i); it; ++it)
+ it.valueRef() = 0.0;
+ }
+ hasTranspose = true;
+ }
+ Out = (StrMatTrans + In).eval();
+ }
+
+}
+
+// This is the base class to interface with PaStiX functions.
+// Users should not used this class directly.
+template <class Derived>
+class PastixBase
+{
+ public:
+ typedef typename internal::pastix_traits<Derived>::MatrixType _MatrixType;
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+
+ public:
+
+ PastixBase():m_initisOk(false),m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false)
+ {
+ m_pastixdata = 0;
+ m_hasTranspose = false;
+ PastixInit();
+ }
+
+ ~PastixBase()
+ {
+ PastixDestroy();
+ }
+
+ // Initialize the Pastix data structure, check the matrix
+ void PastixInit();
+
+ // Compute the ordering and the symbolic factorization
+ Derived& analyzePattern (MatrixType& mat);
+
+ // Compute the numerical factorization
+ Derived& factorize (MatrixType& mat);
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<PastixBase, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "Pastix solver is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "PastixBase::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<PastixBase, Rhs>(*this, b.derived());
+ }
+
+ template<typename Rhs,typename Dest>
+ bool _solve (const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const;
+
+ /** \internal */
+ template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
+ void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+ {
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ eigen_assert(rows()==b.rows());
+
+ // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
+ static const int NbColsAtOnce = 1;
+ int rhsCols = b.cols();
+ int size = b.rows();
+ Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,rhsCols);
+ for(int k=0; k<rhsCols; k+=NbColsAtOnce)
+ {
+ int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
+ tmp.leftCols(actualCols) = b.middleCols(k,actualCols);
+ tmp.leftCols(actualCols) = derived().solve(tmp.leftCols(actualCols));
+ dest.middleCols(k,actualCols) = tmp.leftCols(actualCols).sparseView();
+ }
+ }
+
+ Derived& derived()
+ {
+ return *static_cast<Derived*>(this);
+ }
+ const Derived& derived() const
+ {
+ return *static_cast<const Derived*>(this);
+ }
+
+ /** Returns a reference to the integer vector IPARM of PaStiX parameters
+ * to modify the default parameters.
+ * The statistics related to the different phases of factorization and solve are saved here as well
+ * \sa analyzePattern() factorize()
+ */
+ Array<Index,IPARM_SIZE,1>& iparm()
+ {
+ return m_iparm;
+ }
+
+ /** Return a reference to a particular index parameter of the IPARM vector
+ * \sa iparm()
+ */
+
+ int& iparm(int idxparam)
+ {
+ return m_iparm(idxparam);
+ }
+
+ /** Returns a reference to the double vector DPARM of PaStiX parameters
+ * The statistics related to the different phases of factorization and solve are saved here as well
+ * \sa analyzePattern() factorize()
+ */
+ Array<RealScalar,IPARM_SIZE,1>& dparm()
+ {
+ return m_dparm;
+ }
+
+
+ /** Return a reference to a particular index parameter of the DPARM vector
+ * \sa dparm()
+ */
+
+ double& dparm(int idxparam)
+ {
+ return m_dparm(idxparam);
+ }
+
+ inline Index cols() const { return m_size; }
+ inline Index rows() const { return m_size; }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the PaStiX reports a problem
+ * \c InvalidInput if the input matrix is invalid
+ *
+ * \sa iparm()
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<PastixBase, Rhs>
+ solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "Pastix LU, LLT or LDLT is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "PastixBase::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::sparse_solve_retval<PastixBase, Rhs>(*this, b.derived());
+ }
+
+ protected:
+ // Free all the data allocated by Pastix
+ void PastixDestroy()
+ {
+ eigen_assert(m_initisOk && "The Pastix structure should be allocated first");
+ m_iparm(IPARM_START_TASK) = API_TASK_CLEAN;
+ m_iparm(IPARM_END_TASK) = API_TASK_CLEAN;
+ internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, m_mat_null.outerIndexPtr(), m_mat_null.innerIndexPtr(),
+ m_mat_null.valuePtr(), m_perm.data(), m_invp.data(), m_vec_null.data(), 1, m_iparm.data(), m_dparm.data());
+ }
+
+ Derived& compute (MatrixType& mat);
+
+ int m_initisOk;
+ int m_analysisIsOk;
+ int m_factorizationIsOk;
+ bool m_isInitialized;
+ mutable ComputationInfo m_info;
+ mutable pastix_data_t *m_pastixdata; // Data structure for pastix
+ mutable SparseMatrix<Scalar, ColMajor> m_mat_null; // An input null matrix
+ mutable Matrix<Scalar, Dynamic,1> m_vec_null; // An input null vector
+ mutable SparseMatrix<Scalar, ColMajor> m_StrMatTrans; // The transpose pattern of the input matrix
+ mutable bool m_hasTranspose; // The transpose of the current matrix has already been computed
+ mutable int m_comm; // The MPI communicator identifier
+ mutable Matrix<Index,IPARM_SIZE,1> m_iparm; // integer vector for the input parameters
+ mutable Matrix<double,DPARM_SIZE,1> m_dparm; // Scalar vector for the input parameters
+ mutable Matrix<Index,Dynamic,1> m_perm; // Permutation vector
+ mutable Matrix<Index,Dynamic,1> m_invp; // Inverse permutation vector
+ mutable int m_ordering; // ordering method to use
+ mutable int m_amalgamation; // level of amalgamation
+ mutable int m_size; // Size of the matrix
+
+ private:
+ PastixBase(PastixBase& ) {}
+
+};
+
+ /** Initialize the PaStiX data structure.
+ *A first call to this function fills iparm and dparm with the default PaStiX parameters
+ * \sa iparm() dparm()
+ */
+template <class Derived>
+void PastixBase<Derived>::PastixInit()
+{
+ m_size = 0;
+ m_iparm.resize(IPARM_SIZE);
+ m_dparm.resize(DPARM_SIZE);
+
+ m_iparm(IPARM_MODIFY_PARAMETER) = API_NO;
+ if(m_pastixdata)
+ { // This trick is used to reset the Pastix internal data between successive
+ // calls with (structural) different matrices
+ PastixDestroy();
+ m_pastixdata = 0;
+ m_iparm(IPARM_MODIFY_PARAMETER) = API_YES;
+ m_hasTranspose = false;
+ }
+
+ m_iparm(IPARM_START_TASK) = API_TASK_INIT;
+ m_iparm(IPARM_END_TASK) = API_TASK_INIT;
+ m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
+ internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, m_mat_null.outerIndexPtr(), m_mat_null.innerIndexPtr(),
+ m_mat_null.valuePtr(), m_perm.data(), m_invp.data(), m_vec_null.data(), 1, m_iparm.data(), m_dparm.data());
+
+ m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
+
+ // Check the returned error
+ if(m_iparm(IPARM_ERROR_NUMBER)) {
+ m_info = InvalidInput;
+ m_initisOk = false;
+ }
+ else {
+ m_info = Success;
+ m_initisOk = true;
+ }
+}
+
+template <class Derived>
+Derived& PastixBase<Derived>::compute(MatrixType& mat)
+{
+ eigen_assert(mat.rows() == mat.cols() && "The input matrix should be squared");
+ typedef typename MatrixType::Scalar Scalar;
+
+ // Save the size of the current matrix
+ m_size = mat.rows();
+ // Convert the matrix in fortran-style numbering
+ internal::EigenToFortranNumbering(mat);
+ analyzePattern(mat);
+ factorize(mat);
+ m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
+ if (m_factorizationIsOk) m_isInitialized = true;
+
+ //Convert back the matrix -- Is it really necessary here
+ internal::EigenToCNumbering(mat);
+
+ return derived();
+}
+
+
+template <class Derived>
+Derived& PastixBase<Derived>::analyzePattern(MatrixType& mat)
+{
+ eigen_assert(m_initisOk && "PastixInit should be called first to set the default parameters");
+ m_size = mat.rows();
+ m_perm.resize(m_size);
+ m_invp.resize(m_size);
+
+ // Convert the matrix in fortran-style numbering
+ internal::EigenToFortranNumbering(mat);
+
+ m_iparm(IPARM_START_TASK) = API_TASK_ORDERING;
+ m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE;
+
+ internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
+ mat.valuePtr(), m_perm.data(), m_invp.data(), m_vec_null.data(), 0, m_iparm.data(), m_dparm.data());
+
+ // Check the returned error
+ if(m_iparm(IPARM_ERROR_NUMBER)) {
+ m_info = NumericalIssue;
+ m_analysisIsOk = false;
+ }
+ else {
+ m_info = Success;
+ m_analysisIsOk = true;
+ }
+ return derived();
+}
+
+template <class Derived>
+Derived& PastixBase<Derived>::factorize(MatrixType& mat)
+{
+ eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase");
+ m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT;
+ m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT;
+ m_size = mat.rows();
+
+ // Convert the matrix in fortran-style numbering
+ internal::EigenToFortranNumbering(mat);
+
+ internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
+ mat.valuePtr(), m_perm.data(), m_invp.data(), m_vec_null.data(), 0, m_iparm.data(), m_dparm.data());
+
+ // Check the returned error
+ if(m_iparm(IPARM_ERROR_NUMBER)) {
+ m_info = NumericalIssue;
+ m_factorizationIsOk = false;
+ m_isInitialized = false;
+ }
+ else {
+ m_info = Success;
+ m_factorizationIsOk = true;
+ m_isInitialized = true;
+ }
+ return derived();
+}
+
+/* Solve the system */
+template<typename Base>
+template<typename Rhs,typename Dest>
+bool PastixBase<Base>::_solve (const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const
+{
+ eigen_assert(m_isInitialized && "The matrix should be factorized first");
+ EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
+ THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ int rhs = 1;
+
+ x = b; /* on return, x is overwritten by the computed solution */
+
+ for (int i = 0; i < b.cols(); i++){
+ m_iparm(IPARM_START_TASK) = API_TASK_SOLVE;
+ m_iparm(IPARM_END_TASK) = API_TASK_REFINE;
+ m_iparm(IPARM_RHS_MAKING) = API_RHS_B;
+ internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, x.rows(), m_mat_null.outerIndexPtr(), m_mat_null.innerIndexPtr(),
+ m_mat_null.valuePtr(), m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data());
+ }
+ // Check the returned error
+ if(m_iparm(IPARM_ERROR_NUMBER)) {
+ m_info = NumericalIssue;
+ return false;
+ }
+ else {
+ return true;
+ }
+}
+
+/** \ingroup PaStiXSupport_Module
+ * \class PastixLU
+ * \brief Sparse direct LU solver based on PaStiX library
+ *
+ * This class is used to solve the linear systems A.X = B with a supernodal LU
+ * factorization in the PaStiX library. The matrix A should be squared and nonsingular
+ * PaStiX requires that the matrix A has a symmetric structural pattern.
+ * This interface can symmetrize the input matrix otherwise.
+ * The vectors or matrices X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false
+ * NOTE : Note that if the analysis and factorization phase are called separately,
+ * the input matrix will be symmetrized at each call, hence it is advised to
+ * symmetrize the matrix in a end-user program and set \p IsStrSym to true
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ *
+ */
+template<typename _MatrixType, bool IsStrSym>
+class PastixLU : public PastixBase< PastixLU<_MatrixType> >
+{
+ public:
+ typedef _MatrixType MatrixType;
+ typedef PastixBase<PastixLU<MatrixType> > Base;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef SparseMatrix<Scalar, ColMajor> PaStiXType;
+
+ public:
+ PastixLU():Base() {}
+
+ PastixLU(const MatrixType& matrix):Base()
+ {
+ compute(matrix);
+ }
+ /** Compute the LU supernodal factorization of \p matrix.
+ * iparm and dparm can be used to tune the PaStiX parameters.
+ * see the PaStiX user's manual
+ * \sa analyzePattern() factorize()
+ */
+ void compute (const MatrixType& matrix)
+ {
+ // Pastix supports only column-major matrices with a symmetric pattern
+ Base::PastixInit();
+ PaStiXType temp(matrix.rows(), matrix.cols());
+ // Symmetrize the graph of the matrix
+ if (IsStrSym)
+ temp = matrix;
+ else
+ {
+ internal::EigenSymmetrizeMatrixGraph<PaStiXType>(matrix, temp, m_StrMatTrans, m_hasTranspose);
+ }
+ m_iparm[IPARM_SYM] = API_SYM_NO;
+ m_iparm(IPARM_FACTORIZATION) = API_FACT_LU;
+ Base::compute(temp);
+ }
+ /** Compute the LU symbolic factorization of \p matrix using its sparsity pattern.
+ * Several ordering methods can be used at this step. See the PaStiX user's manual.
+ * The result of this operation can be used with successive matrices having the same pattern as \p matrix
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+
+ Base::PastixInit();
+ /* Pastix supports only column-major matrices with symmetrized patterns */
+ SparseMatrix<Scalar, ColMajor> temp(matrix.rows(), matrix.cols());
+ // Symmetrize the graph of the matrix
+ if (IsStrSym)
+ temp = matrix;
+ else
+ {
+ internal::EigenSymmetrizeMatrixGraph<PaStiXType>(matrix, temp, m_StrMatTrans,m_hasTranspose);
+ }
+
+ m_iparm(IPARM_SYM) = API_SYM_NO;
+ m_iparm(IPARM_FACTORIZATION) = API_FACT_LU;
+ Base::analyzePattern(temp);
+ }
+
+ /** Compute the LU supernodal factorization of \p matrix
+ * WARNING The matrix \p matrix should have the same structural pattern
+ * as the same used in the analysis phase.
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& matrix)
+ {
+ /* Pastix supports only column-major matrices with symmetrized patterns */
+ SparseMatrix<Scalar, ColMajor> temp(matrix.rows(), matrix.cols());
+ // Symmetrize the graph of the matrix
+ if (IsStrSym)
+ temp = matrix;
+ else
+ {
+ internal::EigenSymmetrizeMatrixGraph<PaStiXType>(matrix, temp, m_StrMatTrans,m_hasTranspose);
+ }
+ m_iparm(IPARM_SYM) = API_SYM_NO;
+ m_iparm(IPARM_FACTORIZATION) = API_FACT_LU;
+ Base::factorize(temp);
+ }
+ protected:
+ using Base::m_iparm;
+ using Base::m_dparm;
+ using Base::m_StrMatTrans;
+ using Base::m_hasTranspose;
+
+ private:
+ PastixLU(PastixLU& ) {}
+};
+
+/** \ingroup PaStiXSupport_Module
+ * \class PastixLLT
+ * \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library
+ *
+ * This class is used to solve the linear systems A.X = B via a LL^T supernodal Cholesky factorization
+ * available in the PaStiX library. The matrix A should be symmetric and positive definite
+ * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX
+ * The vectors or matrices X and B can be either dense or sparse
+ *
+ * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ */
+template<typename _MatrixType, int _UpLo>
+class PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> >
+{
+ public:
+ typedef _MatrixType MatrixType;
+ typedef PastixBase<PastixLLT<MatrixType, _UpLo> > Base;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
+ public:
+ enum { UpLo = _UpLo };
+ PastixLLT():Base() {}
+
+ PastixLLT(const MatrixType& matrix):Base()
+ {
+ compute(matrix);
+ }
+
+ /** Compute the L factor of the LL^T supernodal factorization of \p matrix
+ * \sa analyzePattern() factorize()
+ */
+ void compute (const MatrixType& matrix)
+ {
+ // Pastix supports only lower, column-major matrices
+ Base::PastixInit(); // This is necessary to let PaStiX initialize its data structure between successive calls to compute
+ SparseMatrix<Scalar, ColMajor> temp(matrix.rows(), matrix.cols());
+ PermutationMatrix<Dynamic,Dynamic,Index> pnull;
+ temp.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>().twistedBy(pnull);
+ m_iparm(IPARM_SYM) = API_SYM_YES;
+ m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT;
+ Base::compute(temp);
+ }
+
+ /** Compute the LL^T symbolic factorization of \p matrix using its sparsity pattern
+ * The result of this operation can be used with successive matrices having the same pattern as \p matrix
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+ Base::PastixInit();
+ // Pastix supports only lower, column-major matrices
+ SparseMatrix<Scalar, ColMajor> temp(matrix.rows(), matrix.cols());
+ PermutationMatrix<Dynamic,Dynamic,Index> pnull;
+ temp.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>().twistedBy(pnull);
+ m_iparm(IPARM_SYM) = API_SYM_YES;
+ m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT;
+ Base::analyzePattern(temp);
+ }
+ /** Compute the LL^T supernodal numerical factorization of \p matrix
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& matrix)
+ {
+ // Pastix supports only lower, column-major matrices
+ SparseMatrix<Scalar, ColMajor> temp(matrix.rows(), matrix.cols());
+ PermutationMatrix<Dynamic,Dynamic,Index> pnull;
+ temp.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>().twistedBy(pnull);
+ m_iparm(IPARM_SYM) = API_SYM_YES;
+ m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT;
+ Base::factorize(temp);
+ }
+ protected:
+ using Base::m_iparm;
+
+ private:
+ PastixLLT(PastixLLT& ) {}
+};
+
+/** \ingroup PaStiXSupport_Module
+ * \class PastixLDLT
+ * \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library
+ *
+ * This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization
+ * available in the PaStiX library. The matrix A should be symmetric and positive definite
+ * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX
+ * The vectors or matrices X and B can be either dense or sparse
+ *
+ * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ */
+template<typename _MatrixType, int _UpLo>
+class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> >
+{
+public:
+ typedef _MatrixType MatrixType;
+ typedef PastixBase<PastixLDLT<MatrixType, _UpLo> > Base;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
+ public:
+ enum { UpLo = _UpLo };
+ PastixLDLT():Base() {}
+
+ PastixLDLT(const MatrixType& matrix):Base()
+ {
+ compute(matrix);
+ }
+
+ /** Compute the L and D factors of the LDL^T factorization of \p matrix
+ * \sa analyzePattern() factorize()
+ */
+ void compute (const MatrixType& matrix)
+ {
+ Base::PastixInit();
+ // Pastix supports only lower, column-major matrices
+ SparseMatrix<Scalar, ColMajor> temp(matrix.rows(), matrix.cols());
+ PermutationMatrix<Dynamic,Dynamic,Index> pnull;
+ temp.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>().twistedBy(pnull);
+ m_iparm(IPARM_SYM) = API_SYM_YES;
+ m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT;
+ Base::compute(temp);
+ }
+
+ /** Compute the LDL^T symbolic factorization of \p matrix using its sparsity pattern
+ * The result of this operation can be used with successive matrices having the same pattern as \p matrix
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+ Base::PastixInit();
+ // Pastix supports only lower, column-major matrices
+ SparseMatrix<Scalar, ColMajor> temp(matrix.rows(), matrix.cols());
+ PermutationMatrix<Dynamic,Dynamic,Index> pnull;
+ temp.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>().twistedBy(pnull);
+
+ m_iparm(IPARM_SYM) = API_SYM_YES;
+ m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT;
+ Base::analyzePattern(temp);
+ }
+ /** Compute the LDL^T supernodal numerical factorization of \p matrix
+ *
+ */
+ void factorize(const MatrixType& matrix)
+ {
+ // Pastix supports only lower, column-major matrices
+ SparseMatrix<Scalar, ColMajor> temp(matrix.rows(), matrix.cols());
+ PermutationMatrix<Dynamic,Dynamic,Index> pnull;
+ temp.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>().twistedBy(pnull);
+
+ m_iparm(IPARM_SYM) = API_SYM_YES;
+ m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT;
+ Base::factorize(temp);
+ }
+
+ protected:
+ using Base::m_iparm;
+
+ private:
+ PastixLDLT(PastixLDLT& ) {}
+};
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<PastixBase<_MatrixType>, Rhs>
+ : solve_retval_base<PastixBase<_MatrixType>, Rhs>
+{
+ typedef PastixBase<_MatrixType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+template<typename _MatrixType, typename Rhs>
+struct sparse_solve_retval<PastixBase<_MatrixType>, Rhs>
+ : sparse_solve_retval_base<PastixBase<_MatrixType>, Rhs>
+{
+ typedef PastixBase<_MatrixType> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve_sparse(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/PardisoSupport/CMakeLists.txt b/extern/Eigen3/Eigen/src/PardisoSupport/CMakeLists.txt
new file mode 100644
index 00000000000..a097ab40192
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/PardisoSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_PardisoSupport_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_PardisoSupport_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/PardisoSupport COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h b/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
new file mode 100644
index 00000000000..e6defc8c39e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
@@ -0,0 +1,614 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL PARDISO
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_PARDISOSUPPORT_H
+#define EIGEN_PARDISOSUPPORT_H
+
+namespace Eigen {
+
+template<typename _MatrixType> class PardisoLU;
+template<typename _MatrixType, int Options=Upper> class PardisoLLT;
+template<typename _MatrixType, int Options=Upper> class PardisoLDLT;
+
+namespace internal
+{
+ template<typename Index>
+ struct pardiso_run_selector
+ {
+ static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
+ Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
+ {
+ Index error = 0;
+ ::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
+ return error;
+ }
+ };
+ template<>
+ struct pardiso_run_selector<long long int>
+ {
+ typedef long long int Index;
+ static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
+ Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
+ {
+ Index error = 0;
+ ::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
+ return error;
+ }
+ };
+
+ template<class Pardiso> struct pardiso_traits;
+
+ template<typename _MatrixType>
+ struct pardiso_traits< PardisoLU<_MatrixType> >
+ {
+ typedef _MatrixType MatrixType;
+ typedef typename _MatrixType::Scalar Scalar;
+ typedef typename _MatrixType::RealScalar RealScalar;
+ typedef typename _MatrixType::Index Index;
+ };
+
+ template<typename _MatrixType, int Options>
+ struct pardiso_traits< PardisoLLT<_MatrixType, Options> >
+ {
+ typedef _MatrixType MatrixType;
+ typedef typename _MatrixType::Scalar Scalar;
+ typedef typename _MatrixType::RealScalar RealScalar;
+ typedef typename _MatrixType::Index Index;
+ };
+
+ template<typename _MatrixType, int Options>
+ struct pardiso_traits< PardisoLDLT<_MatrixType, Options> >
+ {
+ typedef _MatrixType MatrixType;
+ typedef typename _MatrixType::Scalar Scalar;
+ typedef typename _MatrixType::RealScalar RealScalar;
+ typedef typename _MatrixType::Index Index;
+ };
+
+}
+
+template<class Derived>
+class PardisoImpl
+{
+ typedef internal::pardiso_traits<Derived> Traits;
+ public:
+ typedef typename Traits::MatrixType MatrixType;
+ typedef typename Traits::Scalar Scalar;
+ typedef typename Traits::RealScalar RealScalar;
+ typedef typename Traits::Index Index;
+ typedef SparseMatrix<Scalar,RowMajor,Index> SparseMatrixType;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+ typedef Matrix<Index, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<Index, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ enum {
+ ScalarIsComplex = NumTraits<Scalar>::IsComplex
+ };
+
+ PardisoImpl()
+ {
+ eigen_assert((sizeof(Index) >= sizeof(_INTEGER_t) && sizeof(Index) <= 8) && "Non-supported index type");
+ m_iparm.setZero();
+ m_msglvl = 0; // No output
+ m_initialized = false;
+ }
+
+ ~PardisoImpl()
+ {
+ pardisoRelease();
+ }
+
+ inline Index cols() const { return m_size; }
+ inline Index rows() const { return m_size; }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_initialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ /** \warning for advanced usage only.
+ * \returns a reference to the parameter array controlling PARDISO.
+ * See the PARDISO manual to know how to use it. */
+ Array<Index,64,1>& pardisoParameterArray()
+ {
+ return m_iparm;
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ Derived& analyzePattern(const MatrixType& matrix);
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ Derived& factorize(const MatrixType& matrix);
+
+ Derived& compute(const MatrixType& matrix);
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<PardisoImpl, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_initialized && "Pardiso solver is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<PardisoImpl, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<PardisoImpl, Rhs>
+ solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_initialized && "Pardiso solver is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::sparse_solve_retval<PardisoImpl, Rhs>(*this, b.derived());
+ }
+
+ Derived& derived()
+ {
+ return *static_cast<Derived*>(this);
+ }
+ const Derived& derived() const
+ {
+ return *static_cast<const Derived*>(this);
+ }
+
+ template<typename BDerived, typename XDerived>
+ bool _solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const;
+
+ /** \internal */
+ template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
+ void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+ {
+ eigen_assert(m_size==b.rows());
+
+ // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
+ static const int NbColsAtOnce = 4;
+ int rhsCols = b.cols();
+ int size = b.rows();
+ // Pardiso cannot solve in-place,
+ // so we need two temporaries
+ Eigen::Matrix<DestScalar,Dynamic,Dynamic,ColMajor> tmp_rhs(size,rhsCols);
+ Eigen::Matrix<DestScalar,Dynamic,Dynamic,ColMajor> tmp_res(size,rhsCols);
+ for(int k=0; k<rhsCols; k+=NbColsAtOnce)
+ {
+ int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
+ tmp_rhs.leftCols(actualCols) = b.middleCols(k,actualCols);
+ tmp_res.leftCols(actualCols) = derived().solve(tmp_rhs.leftCols(actualCols));
+ dest.middleCols(k,actualCols) = tmp_res.leftCols(actualCols).sparseView();
+ }
+ }
+
+ protected:
+ void pardisoRelease()
+ {
+ if(m_initialized) // Factorization ran at least once
+ {
+ internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0,
+ m_iparm.data(), m_msglvl, 0, 0);
+ }
+ }
+
+ void pardisoInit(int type)
+ {
+ m_type = type;
+ bool symmetric = abs(m_type) < 10;
+ m_iparm[0] = 1; // No solver default
+ m_iparm[1] = 3; // use Metis for the ordering
+ m_iparm[2] = 1; // Numbers of processors, value of OMP_NUM_THREADS
+ m_iparm[3] = 0; // No iterative-direct algorithm
+ m_iparm[4] = 0; // No user fill-in reducing permutation
+ m_iparm[5] = 0; // Write solution into x
+ m_iparm[6] = 0; // Not in use
+ m_iparm[7] = 2; // Max numbers of iterative refinement steps
+ m_iparm[8] = 0; // Not in use
+ m_iparm[9] = 13; // Perturb the pivot elements with 1E-13
+ m_iparm[10] = symmetric ? 0 : 1; // Use nonsymmetric permutation and scaling MPS
+ m_iparm[11] = 0; // Not in use
+ m_iparm[12] = symmetric ? 0 : 1; // Maximum weighted matching algorithm is switched-off (default for symmetric).
+ // Try m_iparm[12] = 1 in case of inappropriate accuracy
+ m_iparm[13] = 0; // Output: Number of perturbed pivots
+ m_iparm[14] = 0; // Not in use
+ m_iparm[15] = 0; // Not in use
+ m_iparm[16] = 0; // Not in use
+ m_iparm[17] = -1; // Output: Number of nonzeros in the factor LU
+ m_iparm[18] = -1; // Output: Mflops for LU factorization
+ m_iparm[19] = 0; // Output: Numbers of CG Iterations
+
+ m_iparm[20] = 0; // 1x1 pivoting
+ m_iparm[26] = 0; // No matrix checker
+ m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0;
+ m_iparm[34] = 1; // C indexing
+ m_iparm[59] = 1; // Automatic switch between In-Core and Out-of-Core modes
+ }
+
+ protected:
+ // cached data to reduce reallocation, etc.
+
+ void manageErrorCode(Index error)
+ {
+ switch(error)
+ {
+ case 0:
+ m_info = Success;
+ break;
+ case -4:
+ case -7:
+ m_info = NumericalIssue;
+ break;
+ default:
+ m_info = InvalidInput;
+ }
+ }
+
+ mutable SparseMatrixType m_matrix;
+ ComputationInfo m_info;
+ bool m_initialized, m_analysisIsOk, m_factorizationIsOk;
+ Index m_type, m_msglvl;
+ mutable void *m_pt[64];
+ mutable Array<Index,64,1> m_iparm;
+ mutable IntColVectorType m_perm;
+ Index m_size;
+
+ private:
+ PardisoImpl(PardisoImpl &) {}
+};
+
+template<class Derived>
+Derived& PardisoImpl<Derived>::compute(const MatrixType& a)
+{
+ m_size = a.rows();
+ eigen_assert(a.rows() == a.cols());
+
+ pardisoRelease();
+ memset(m_pt, 0, sizeof(m_pt));
+ m_perm.setZero(m_size);
+ derived().getMatrix(a);
+
+ Index error;
+ error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 12, m_size,
+ m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+ m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
+
+ manageErrorCode(error);
+ m_analysisIsOk = true;
+ m_factorizationIsOk = true;
+ m_initialized = true;
+ return derived();
+}
+
+template<class Derived>
+Derived& PardisoImpl<Derived>::analyzePattern(const MatrixType& a)
+{
+ m_size = a.rows();
+ eigen_assert(m_size == a.cols());
+
+ pardisoRelease();
+ memset(m_pt, 0, sizeof(m_pt));
+ m_perm.setZero(m_size);
+ derived().getMatrix(a);
+
+ Index error;
+ error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 11, m_size,
+ m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+ m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
+
+ manageErrorCode(error);
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+ m_initialized = true;
+ return derived();
+}
+
+template<class Derived>
+Derived& PardisoImpl<Derived>::factorize(const MatrixType& a)
+{
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ eigen_assert(m_size == a.rows() && m_size == a.cols());
+
+ derived().getMatrix(a);
+
+ Index error;
+ error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 22, m_size,
+ m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+ m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
+
+ manageErrorCode(error);
+ m_factorizationIsOk = true;
+ return derived();
+}
+
+template<class Base>
+template<typename BDerived,typename XDerived>
+bool PardisoImpl<Base>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const
+{
+ if(m_iparm[0] == 0) // Factorization was not computed
+ return false;
+
+ //Index n = m_matrix.rows();
+ Index nrhs = Index(b.cols());
+ eigen_assert(m_size==b.rows());
+ eigen_assert(((MatrixBase<BDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major right hand sides are not supported");
+ eigen_assert(((MatrixBase<XDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major matrices of unknowns are not supported");
+ eigen_assert(((nrhs == 1) || b.outerStride() == b.rows()));
+
+
+// switch (transposed) {
+// case SvNoTrans : m_iparm[11] = 0 ; break;
+// case SvTranspose : m_iparm[11] = 2 ; break;
+// case SvAdjoint : m_iparm[11] = 1 ; break;
+// default:
+// //std::cerr << "Eigen: transposition option \"" << transposed << "\" not supported by the PARDISO backend\n";
+// m_iparm[11] = 0;
+// }
+
+ Scalar* rhs_ptr = const_cast<Scalar*>(b.derived().data());
+ Matrix<Scalar,Dynamic,Dynamic,ColMajor> tmp;
+
+ // Pardiso cannot solve in-place
+ if(rhs_ptr == x.derived().data())
+ {
+ tmp = b;
+ rhs_ptr = tmp.data();
+ }
+
+ Index error;
+ error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 33, m_size,
+ m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+ m_perm.data(), nrhs, m_iparm.data(), m_msglvl,
+ rhs_ptr, x.derived().data());
+
+ return error==0;
+}
+
+
+/** \ingroup PardisoSupport_Module
+ * \class PardisoLU
+ * \brief A sparse direct LU factorization and solver based on the PARDISO library
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
+ * using the Intel MKL PARDISO library. The sparse matrix A must be squared and invertible.
+ * The vectors or matrices X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ */
+template<typename MatrixType>
+class PardisoLU : public PardisoImpl< PardisoLU<MatrixType> >
+{
+ protected:
+ typedef PardisoImpl< PardisoLU<MatrixType> > Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ using Base::pardisoInit;
+ using Base::m_matrix;
+ friend class PardisoImpl< PardisoLU<MatrixType> >;
+
+ public:
+
+ using Base::compute;
+ using Base::solve;
+
+ PardisoLU()
+ : Base()
+ {
+ pardisoInit(Base::ScalarIsComplex ? 13 : 11);
+ }
+
+ PardisoLU(const MatrixType& matrix)
+ : Base()
+ {
+ pardisoInit(Base::ScalarIsComplex ? 13 : 11);
+ compute(matrix);
+ }
+ protected:
+ void getMatrix(const MatrixType& matrix)
+ {
+ m_matrix = matrix;
+ }
+
+ private:
+ PardisoLU(PardisoLU& ) {}
+};
+
+/** \ingroup PardisoSupport_Module
+ * \class PardisoLLT
+ * \brief A sparse direct Cholesky (LLT) factorization and solver based on the PARDISO library
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a LL^T Cholesky factorization
+ * using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite.
+ * The vectors or matrices X and B can be either dense or sparse.
+ *
+ * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used.
+ * Upper|Lower can be used to tell both triangular parts can be used as input.
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ */
+template<typename MatrixType, int _UpLo>
+class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
+{
+ protected:
+ typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::Index Index;
+ typedef typename Base::RealScalar RealScalar;
+ using Base::pardisoInit;
+ using Base::m_matrix;
+ friend class PardisoImpl< PardisoLLT<MatrixType,_UpLo> >;
+
+ public:
+
+ enum { UpLo = _UpLo };
+ using Base::compute;
+ using Base::solve;
+
+ PardisoLLT()
+ : Base()
+ {
+ pardisoInit(Base::ScalarIsComplex ? 4 : 2);
+ }
+
+ PardisoLLT(const MatrixType& matrix)
+ : Base()
+ {
+ pardisoInit(Base::ScalarIsComplex ? 4 : 2);
+ compute(matrix);
+ }
+
+ protected:
+
+ void getMatrix(const MatrixType& matrix)
+ {
+ // PARDISO supports only upper, row-major matrices
+ PermutationMatrix<Dynamic,Dynamic,Index> p_null;
+ m_matrix.resize(matrix.rows(), matrix.cols());
+ m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
+ }
+
+ private:
+ PardisoLLT(PardisoLLT& ) {}
+};
+
+/** \ingroup PardisoSupport_Module
+ * \class PardisoLDLT
+ * \brief A sparse direct Cholesky (LDLT) factorization and solver based on the PARDISO library
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a LDL^T Cholesky factorization
+ * using the Intel MKL PARDISO library. The sparse matrix A is assumed to be selfajoint and positive definite.
+ * For complex matrices, A can also be symmetric only, see the \a Options template parameter.
+ * The vectors or matrices X and B can be either dense or sparse.
+ *
+ * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used.
+ * Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix.
+ * Upper|Lower can be used to tell both triangular parts can be used as input.
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ */
+template<typename MatrixType, int Options>
+class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
+{
+ protected:
+ typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::Index Index;
+ typedef typename Base::RealScalar RealScalar;
+ using Base::pardisoInit;
+ using Base::m_matrix;
+ friend class PardisoImpl< PardisoLDLT<MatrixType,Options> >;
+
+ public:
+
+ using Base::compute;
+ using Base::solve;
+ enum { UpLo = Options&(Upper|Lower) };
+
+ PardisoLDLT()
+ : Base()
+ {
+ pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);
+ }
+
+ PardisoLDLT(const MatrixType& matrix)
+ : Base()
+ {
+ pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);
+ compute(matrix);
+ }
+
+ void getMatrix(const MatrixType& matrix)
+ {
+ // PARDISO supports only upper, row-major matrices
+ PermutationMatrix<Dynamic,Dynamic,Index> p_null;
+ m_matrix.resize(matrix.rows(), matrix.cols());
+ m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
+ }
+
+ private:
+ PardisoLDLT(PardisoLDLT& ) {}
+};
+
+namespace internal {
+
+template<typename _Derived, typename Rhs>
+struct solve_retval<PardisoImpl<_Derived>, Rhs>
+ : solve_retval_base<PardisoImpl<_Derived>, Rhs>
+{
+ typedef PardisoImpl<_Derived> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+template<typename Derived, typename Rhs>
+struct sparse_solve_retval<PardisoImpl<Derived>, Rhs>
+ : sparse_solve_retval_base<PardisoImpl<Derived>, Rhs>
+{
+ typedef PardisoImpl<Derived> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve_sparse(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARDISOSUPPORT_H
diff --git a/extern/Eigen3/Eigen/src/QR/CMakeLists.txt b/extern/Eigen3/Eigen/src/QR/CMakeLists.txt
new file mode 100644
index 00000000000..96f43d7f5b3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/QR/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_QR_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_QR_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/QR COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR_MKL.h b/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR_MKL.h
new file mode 100644
index 00000000000..0ad66d3f8d3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR_MKL.h
@@ -0,0 +1,98 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Householder QR decomposition of a matrix with column pivoting based on
+ * LAPACKE_?geqp3 function.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_MKL_H
+#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen {
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_QR_COLPIV(EIGTYPE, MKLTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \
+template<> \
+ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >& \
+ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >::compute( \
+ const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix) \
+\
+{ \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
+ typedef MatrixType::Scalar Scalar; \
+ typedef MatrixType::RealScalar RealScalar; \
+ Index rows = matrix.rows();\
+ Index cols = matrix.cols();\
+ Index size = matrix.diagonalSize();\
+\
+ m_qr = matrix;\
+ m_hCoeffs.resize(size);\
+\
+ m_colsTranspositions.resize(cols);\
+ /*Index number_of_transpositions = 0;*/ \
+\
+ m_nonzero_pivots = 0; \
+ m_maxpivot = RealScalar(0);\
+ m_colsPermutation.resize(cols); \
+ m_colsPermutation.indices().setZero(); \
+\
+ lapack_int lda = m_qr.outerStride(), i; \
+ lapack_int matrix_order = MKLCOLROW; \
+ LAPACKE_##MKLPREFIX##geqp3( matrix_order, rows, cols, (MKLTYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (MKLTYPE*)m_hCoeffs.data()); \
+ m_isInitialized = true; \
+ m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \
+ m_hCoeffs.adjointInPlace(); \
+ RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold(); \
+ lapack_int *perm = m_colsPermutation.indices().data(); \
+ for(i=0;i<size;i++) { \
+ m_nonzero_pivots += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold);\
+ } \
+ for(i=0;i<cols;i++) perm[i]--;\
+\
+ /*m_det_pq = (number_of_transpositions%2) ? -1 : 1; // TODO: It's not needed now; fix upon availability in Eigen */ \
+\
+ return *this; \
+}
+
+EIGEN_MKL_QR_COLPIV(double, double, d, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_QR_COLPIV(float, float, s, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_QR_COLPIV(dcomplex, MKL_Complex16, z, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_QR_COLPIV(scomplex, MKL_Complex8, c, ColMajor, LAPACK_COL_MAJOR)
+
+EIGEN_MKL_QR_COLPIV(double, double, d, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_QR_COLPIV(float, float, s, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_QR_COLPIV(dcomplex, MKL_Complex16, z, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_QR_COLPIV(scomplex, MKL_Complex8, c, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_MKL_H
diff --git a/extern/Eigen3/Eigen/src/QR/HouseholderQR_MKL.h b/extern/Eigen3/Eigen/src/QR/HouseholderQR_MKL.h
new file mode 100644
index 00000000000..5313de604d2
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/QR/HouseholderQR_MKL.h
@@ -0,0 +1,69 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Householder QR decomposition of a matrix w/o pivoting based on
+ * LAPACKE_?geqrf function.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_QR_MKL_H
+#define EIGEN_QR_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_QR_NOPIV(EIGTYPE, MKLTYPE, MKLPREFIX) \
+template<typename MatrixQR, typename HCoeffs> \
+void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, \
+ typename MatrixQR::Index maxBlockSize=32, \
+ EIGTYPE* tempData = 0) \
+{ \
+ lapack_int m = mat.rows(); \
+ lapack_int n = mat.cols(); \
+ lapack_int lda = mat.outerStride(); \
+ lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
+ LAPACKE_##MKLPREFIX##geqrf( matrix_order, m, n, (MKLTYPE*)mat.data(), lda, (MKLTYPE*)hCoeffs.data()); \
+ hCoeffs.adjointInPlace(); \
+\
+}
+
+EIGEN_MKL_QR_NOPIV(double, double, d)
+EIGEN_MKL_QR_NOPIV(float, float, s)
+EIGEN_MKL_QR_NOPIV(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_QR_NOPIV(scomplex, MKL_Complex8, c)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_QR_MKL_H
diff --git a/extern/Eigen3/Eigen/src/SVD/CMakeLists.txt b/extern/Eigen3/Eigen/src/SVD/CMakeLists.txt
new file mode 100644
index 00000000000..55efc44b129
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SVD/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SVD_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SVD_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SVD COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/SVD/JacobiSVD_MKL.h b/extern/Eigen3/Eigen/src/SVD/JacobiSVD_MKL.h
new file mode 100644
index 00000000000..1eeafca49dd
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SVD/JacobiSVD_MKL.h
@@ -0,0 +1,92 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ * Content : Eigen bindings to Intel(R) MKL
+ * Singular Value Decomposition - SVD.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_JACOBISVD_MKL_H
+#define EIGEN_JACOBISVD_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen {
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_SVD(EIGTYPE, MKLTYPE, MKLRTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \
+template<> \
+JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>& \
+JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
+{ \
+ typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
+ typedef MatrixType::Scalar Scalar; \
+ typedef MatrixType::RealScalar RealScalar; \
+ allocate(matrix.rows(), matrix.cols(), computationOptions); \
+\
+ /*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \
+ m_nonzeroSingularValues = m_diagSize; \
+\
+ lapack_int lda = matrix.outerStride(), ldu, ldvt; \
+ lapack_int matrix_order = MKLCOLROW; \
+ char jobu, jobvt; \
+ MKLTYPE *u, *vt, dummy; \
+ jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \
+ jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \
+ if (computeU()) { \
+ ldu = m_matrixU.outerStride(); \
+ u = (MKLTYPE*)m_matrixU.data(); \
+ } else { ldu=1; u=&dummy; }\
+ MatrixType localV; \
+ ldvt = (m_computeFullV) ? m_cols : (m_computeThinV) ? m_diagSize : 1; \
+ if (computeV()) { \
+ localV.resize(ldvt, m_cols); \
+ vt = (MKLTYPE*)localV.data(); \
+ } else { ldvt=1; vt=&dummy; }\
+ Matrix<MKLRTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \
+ MatrixType m_temp; m_temp = matrix; \
+ LAPACKE_##MKLPREFIX##gesvd( matrix_order, jobu, jobvt, m_rows, m_cols, (MKLTYPE*)m_temp.data(), lda, (MKLRTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \
+ if (computeV()) m_matrixV = localV.adjoint(); \
+ /* for(int i=0;i<m_diagSize;i++) if (m_singularValues.coeffRef(i) < precision) { m_nonzeroSingularValues--; m_singularValues.coeffRef(i)=RealScalar(0);}*/ \
+ m_isInitialized = true; \
+ return *this; \
+}
+
+EIGEN_MKL_SVD(double, double, double, d, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SVD(float, float, float , s, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SVD(dcomplex, MKL_Complex16, double, z, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SVD(scomplex, MKL_Complex8, float , c, ColMajor, LAPACK_COL_MAJOR)
+
+EIGEN_MKL_SVD(double, double, double, d, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SVD(float, float, float , s, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SVD(dcomplex, MKL_Complex16, double, z, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SVD(scomplex, MKL_Complex8, float , c, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBISVD_MKL_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/AmbiVector.h b/extern/Eigen3/Eigen/src/Sparse/AmbiVector.h
new file mode 100644
index 00000000000..2ea8ba3096b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/AmbiVector.h
@@ -0,0 +1,379 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_AMBIVECTOR_H
+#define EIGEN_AMBIVECTOR_H
+
+/** \internal
+ * Hybrid sparse/dense vector class designed for intensive read-write operations.
+ *
+ * See BasicSparseLLT and SparseProduct for usage examples.
+ */
+template<typename _Scalar, typename _Index>
+class AmbiVector
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ AmbiVector(Index size)
+ : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
+ {
+ resize(size);
+ }
+
+ void init(double estimatedDensity);
+ void init(int mode);
+
+ Index nonZeros() const;
+
+ /** Specifies a sub-vector to work on */
+ void setBounds(Index start, Index end) { m_start = start; m_end = end; }
+
+ void setZero();
+
+ void restart();
+ Scalar& coeffRef(Index i);
+ Scalar& coeff(Index i);
+
+ class Iterator;
+
+ ~AmbiVector() { delete[] m_buffer; }
+
+ void resize(Index size)
+ {
+ if (m_allocatedSize < size)
+ reallocate(size);
+ m_size = size;
+ }
+
+ Index size() const { return m_size; }
+
+ protected:
+
+ void reallocate(Index size)
+ {
+ // if the size of the matrix is not too large, let's allocate a bit more than needed such
+ // that we can handle dense vector even in sparse mode.
+ delete[] m_buffer;
+ if (size<1000)
+ {
+ Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
+ m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
+ m_buffer = new Scalar[allocSize];
+ }
+ else
+ {
+ m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
+ m_buffer = new Scalar[size];
+ }
+ m_size = size;
+ m_start = 0;
+ m_end = m_size;
+ }
+
+ void reallocateSparse()
+ {
+ Index copyElements = m_allocatedElements;
+ m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
+ Index allocSize = m_allocatedElements * sizeof(ListEl);
+ allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
+ Scalar* newBuffer = new Scalar[allocSize];
+ memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
+ delete[] m_buffer;
+ m_buffer = newBuffer;
+ }
+
+ protected:
+ // element type of the linked list
+ struct ListEl
+ {
+ Index next;
+ Index index;
+ Scalar value;
+ };
+
+ // used to store data in both mode
+ Scalar* m_buffer;
+ Scalar m_zero;
+ Index m_size;
+ Index m_start;
+ Index m_end;
+ Index m_allocatedSize;
+ Index m_allocatedElements;
+ Index m_mode;
+
+ // linked list mode
+ Index m_llStart;
+ Index m_llCurrent;
+ Index m_llSize;
+};
+
+/** \returns the number of non zeros in the current sub vector */
+template<typename _Scalar,typename _Index>
+_Index AmbiVector<_Scalar,_Index>::nonZeros() const
+{
+ if (m_mode==IsSparse)
+ return m_llSize;
+ else
+ return m_end - m_start;
+}
+
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
+{
+ if (estimatedDensity>0.1)
+ init(IsDense);
+ else
+ init(IsSparse);
+}
+
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(int mode)
+{
+ m_mode = mode;
+ if (m_mode==IsSparse)
+ {
+ m_llSize = 0;
+ m_llStart = -1;
+ }
+}
+
+/** Must be called whenever we might perform a write access
+ * with an index smaller than the previous one.
+ *
+ * Don't worry, this function is extremely cheap.
+ */
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::restart()
+{
+ m_llCurrent = m_llStart;
+}
+
+/** Set all coefficients of current subvector to zero */
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::setZero()
+{
+ if (m_mode==IsDense)
+ {
+ for (Index i=m_start; i<m_end; ++i)
+ m_buffer[i] = Scalar(0);
+ }
+ else
+ {
+ eigen_assert(m_mode==IsSparse);
+ m_llSize = 0;
+ m_llStart = -1;
+ }
+}
+
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
+{
+ if (m_mode==IsDense)
+ return m_buffer[i];
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+ // TODO factorize the following code to reduce code generation
+ eigen_assert(m_mode==IsSparse);
+ if (m_llSize==0)
+ {
+ // this is the first element
+ m_llStart = 0;
+ m_llCurrent = 0;
+ ++m_llSize;
+ llElements[0].value = Scalar(0);
+ llElements[0].index = i;
+ llElements[0].next = -1;
+ return llElements[0].value;
+ }
+ else if (i<llElements[m_llStart].index)
+ {
+ // this is going to be the new first element of the list
+ ListEl& el = llElements[m_llSize];
+ el.value = Scalar(0);
+ el.index = i;
+ el.next = m_llStart;
+ m_llStart = m_llSize;
+ ++m_llSize;
+ m_llCurrent = m_llStart;
+ return el.value;
+ }
+ else
+ {
+ Index nextel = llElements[m_llCurrent].next;
+ eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
+ while (nextel >= 0 && llElements[nextel].index<=i)
+ {
+ m_llCurrent = nextel;
+ nextel = llElements[nextel].next;
+ }
+
+ if (llElements[m_llCurrent].index==i)
+ {
+ // the coefficient already exists and we found it !
+ return llElements[m_llCurrent].value;
+ }
+ else
+ {
+ if (m_llSize>=m_allocatedElements)
+ {
+ reallocateSparse();
+ llElements = reinterpret_cast<ListEl*>(m_buffer);
+ }
+ eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
+ // let's insert a new coefficient
+ ListEl& el = llElements[m_llSize];
+ el.value = Scalar(0);
+ el.index = i;
+ el.next = llElements[m_llCurrent].next;
+ llElements[m_llCurrent].next = m_llSize;
+ ++m_llSize;
+ return el.value;
+ }
+ }
+ }
+}
+
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
+{
+ if (m_mode==IsDense)
+ return m_buffer[i];
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+ eigen_assert(m_mode==IsSparse);
+ if ((m_llSize==0) || (i<llElements[m_llStart].index))
+ {
+ return m_zero;
+ }
+ else
+ {
+ Index elid = m_llStart;
+ while (elid >= 0 && llElements[elid].index<i)
+ elid = llElements[elid].next;
+
+ if (llElements[elid].index==i)
+ return llElements[m_llCurrent].value;
+ else
+ return m_zero;
+ }
+ }
+}
+
+/** Iterator over the nonzero coefficients */
+template<typename _Scalar,typename _Index>
+class AmbiVector<_Scalar,_Index>::Iterator
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** Default constructor
+ * \param vec the vector on which we iterate
+ * \param epsilon the minimal value used to prune zero coefficients.
+ * In practice, all coefficients having a magnitude smaller than \a epsilon
+ * are skipped.
+ */
+ Iterator(const AmbiVector& vec, RealScalar epsilon = RealScalar(0.1)*NumTraits<RealScalar>::dummy_precision())
+ : m_vector(vec)
+ {
+ m_epsilon = epsilon;
+ m_isDense = m_vector.m_mode==IsDense;
+ if (m_isDense)
+ {
+ m_currentEl = 0; // this is to avoid a compilation warning
+ m_cachedValue = 0; // this is to avoid a compilation warning
+ m_cachedIndex = m_vector.m_start-1;
+ ++(*this);
+ }
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+ m_currentEl = m_vector.m_llStart;
+ while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon)
+ m_currentEl = llElements[m_currentEl].next;
+ if (m_currentEl<0)
+ {
+ m_cachedValue = 0; // this is to avoid a compilation warning
+ m_cachedIndex = -1;
+ }
+ else
+ {
+ m_cachedIndex = llElements[m_currentEl].index;
+ m_cachedValue = llElements[m_currentEl].value;
+ }
+ }
+ }
+
+ Index index() const { return m_cachedIndex; }
+ Scalar value() const { return m_cachedValue; }
+
+ operator bool() const { return m_cachedIndex>=0; }
+
+ Iterator& operator++()
+ {
+ if (m_isDense)
+ {
+ do {
+ ++m_cachedIndex;
+ } while (m_cachedIndex<m_vector.m_end && internal::abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
+ if (m_cachedIndex<m_vector.m_end)
+ m_cachedValue = m_vector.m_buffer[m_cachedIndex];
+ else
+ m_cachedIndex=-1;
+ }
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+ do {
+ m_currentEl = llElements[m_currentEl].next;
+ } while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon);
+ if (m_currentEl<0)
+ {
+ m_cachedIndex = -1;
+ }
+ else
+ {
+ m_cachedIndex = llElements[m_currentEl].index;
+ m_cachedValue = llElements[m_currentEl].value;
+ }
+ }
+ return *this;
+ }
+
+ protected:
+ const AmbiVector& m_vector; // the target vector
+ Index m_currentEl; // the current element in sparse/linked-list mode
+ RealScalar m_epsilon; // epsilon used to prune zero coefficients
+ Index m_cachedIndex; // current coordinate
+ Scalar m_cachedValue; // current value
+ bool m_isDense; // mode of the vector
+};
+
+
+#endif // EIGEN_AMBIVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h b/extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h
new file mode 100644
index 00000000000..b3bde272ec2
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h
@@ -0,0 +1,239 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COMPRESSED_STORAGE_H
+#define EIGEN_COMPRESSED_STORAGE_H
+
+/** Stores a sparse set of values as a list of values and a list of indices.
+ *
+ */
+template<typename _Scalar,typename _Index>
+class CompressedStorage
+{
+ public:
+
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+
+ protected:
+
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ public:
+
+ CompressedStorage()
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {}
+
+ CompressedStorage(size_t size)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {
+ resize(size);
+ }
+
+ CompressedStorage(const CompressedStorage& other)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {
+ *this = other;
+ }
+
+ CompressedStorage& operator=(const CompressedStorage& other)
+ {
+ resize(other.size());
+ memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
+ memcpy(m_indices, other.m_indices, m_size * sizeof(Index));
+ return *this;
+ }
+
+ void swap(CompressedStorage& other)
+ {
+ std::swap(m_values, other.m_values);
+ std::swap(m_indices, other.m_indices);
+ std::swap(m_size, other.m_size);
+ std::swap(m_allocatedSize, other.m_allocatedSize);
+ }
+
+ ~CompressedStorage()
+ {
+ delete[] m_values;
+ delete[] m_indices;
+ }
+
+ void reserve(size_t size)
+ {
+ size_t newAllocatedSize = m_size + size;
+ if (newAllocatedSize > m_allocatedSize)
+ reallocate(newAllocatedSize);
+ }
+
+ void squeeze()
+ {
+ if (m_allocatedSize>m_size)
+ reallocate(m_size);
+ }
+
+ void resize(size_t size, float reserveSizeFactor = 0)
+ {
+ if (m_allocatedSize<size)
+ reallocate(size + size_t(reserveSizeFactor*size));
+ m_size = size;
+ }
+
+ void append(const Scalar& v, Index i)
+ {
+ Index id = static_cast<Index>(m_size);
+ resize(m_size+1, 1);
+ m_values[id] = v;
+ m_indices[id] = i;
+ }
+
+ inline size_t size() const { return m_size; }
+ inline size_t allocatedSize() const { return m_allocatedSize; }
+ inline void clear() { m_size = 0; }
+
+ inline Scalar& value(size_t i) { return m_values[i]; }
+ inline const Scalar& value(size_t i) const { return m_values[i]; }
+
+ inline Index& index(size_t i) { return m_indices[i]; }
+ inline const Index& index(size_t i) const { return m_indices[i]; }
+
+ static CompressedStorage Map(Index* indices, Scalar* values, size_t size)
+ {
+ CompressedStorage res;
+ res.m_indices = indices;
+ res.m_values = values;
+ res.m_allocatedSize = res.m_size = size;
+ return res;
+ }
+
+ /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
+ inline Index searchLowerIndex(Index key) const
+ {
+ return searchLowerIndex(0, m_size, key);
+ }
+
+ /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
+ inline Index searchLowerIndex(size_t start, size_t end, Index key) const
+ {
+ while(end>start)
+ {
+ size_t mid = (end+start)>>1;
+ if (m_indices[mid]<key)
+ start = mid+1;
+ else
+ end = mid;
+ }
+ return static_cast<Index>(start);
+ }
+
+ /** \returns the stored value at index \a key
+ * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
+ inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (m_size==0)
+ return defaultValue;
+ else if (key==m_indices[m_size-1])
+ return m_values[m_size-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const size_t id = searchLowerIndex(0,m_size-1,key);
+ return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** Like at(), but the search is performed in the range [start,end) */
+ inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (start>=end)
+ return Scalar(0);
+ else if (end>start && key==m_indices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const size_t id = searchLowerIndex(start,end-1,key);
+ return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** \returns a reference to the value at index \a key
+ * If the value does not exist, then the value \a defaultValue is inserted
+ * such that the keys are sorted. */
+ inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0))
+ {
+ size_t id = searchLowerIndex(0,m_size,key);
+ if (id>=m_size || m_indices[id]!=key)
+ {
+ resize(m_size+1,1);
+ for (size_t j=m_size-1; j>id; --j)
+ {
+ m_indices[j] = m_indices[j-1];
+ m_values[j] = m_values[j-1];
+ }
+ m_indices[id] = key;
+ m_values[id] = defaultValue;
+ }
+ return m_values[id];
+ }
+
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ size_t k = 0;
+ size_t n = size();
+ for (size_t i=0; i<n; ++i)
+ {
+ if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
+ {
+ value(k) = value(i);
+ index(k) = index(i);
+ ++k;
+ }
+ }
+ resize(k,0);
+ }
+
+ protected:
+
+ inline void reallocate(size_t size)
+ {
+ Scalar* newValues = new Scalar[size];
+ Index* newIndices = new Index[size];
+ size_t copySize = (std::min)(size, m_size);
+ // copy
+ memcpy(newValues, m_values, copySize * sizeof(Scalar));
+ memcpy(newIndices, m_indices, copySize * sizeof(Index));
+ // delete old stuff
+ delete[] m_values;
+ delete[] m_indices;
+ m_values = newValues;
+ m_indices = newIndices;
+ m_allocatedSize = size;
+ }
+
+ protected:
+ Scalar* m_values;
+ Index* m_indices;
+ size_t m_size;
+ size_t m_allocatedSize;
+
+};
+
+#endif // EIGEN_COMPRESSED_STORAGE_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/CoreIterators.h b/extern/Eigen3/Eigen/src/Sparse/CoreIterators.h
new file mode 100644
index 00000000000..b4beaeee69e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/CoreIterators.h
@@ -0,0 +1,71 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COREITERATORS_H
+#define EIGEN_COREITERATORS_H
+
+/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
+ */
+
+/** \class InnerIterator
+ * \brief An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression
+ *
+ * todo
+ */
+
+// generic version for dense matrix and expressions
+template<typename Derived> class DenseBase<Derived>::InnerIterator
+{
+ protected:
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+
+ enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit };
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, Index outer)
+ : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.innerSize())
+ {}
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ {
+ return (IsRowMajor) ? m_expression.coeff(m_outer, m_inner)
+ : m_expression.coeff(m_inner, m_outer);
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_inner; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
+
+ protected:
+ const Derived& m_expression;
+ Index m_inner;
+ const Index m_outer;
+ const Index m_end;
+};
+
+#endif // EIGEN_COREITERATORS_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h b/extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h
new file mode 100644
index 00000000000..93e75f4c601
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h
@@ -0,0 +1,346 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H
+#define EIGEN_DYNAMIC_SPARSEMATRIX_H
+
+/** \class DynamicSparseMatrix
+ *
+ * \brief A sparse matrix class designed for matrix assembly purpose
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows
+ * random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is
+ * nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows
+ * otherwise.
+ *
+ * Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might
+ * decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance
+ * till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors.
+ *
+ * \see SparseMatrix
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<DynamicSparseMatrix<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = OuterRandomAccessPattern
+ };
+};
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+class DynamicSparseMatrix
+ : public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix)
+ // FIXME: why are these operator already alvailable ???
+ // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=)
+ // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=)
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+ using Base::IsRowMajor;
+ using Base::operator=;
+ enum {
+ Options = _Options
+ };
+
+ protected:
+
+ typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ Index m_innerSize;
+ std::vector<CompressedStorage<Scalar,Index> > m_data;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return static_cast<Index>(m_data.size()); }
+ inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
+
+ std::vector<CompressedStorage<Scalar,Index> >& _data() { return m_data; }
+ const std::vector<CompressedStorage<Scalar,Index> >& _data() const { return m_data; }
+
+ /** \returns the coefficient value at given position \a row, \a col
+ * This operation involes a log(rho*outer_size) binary search.
+ */
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return m_data[outer].at(inner);
+ }
+
+ /** \returns a reference to the coefficient value at given position \a row, \a col
+ * This operation involes a log(rho*outer_size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ */
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return m_data[outer].atWithInsertion(inner);
+ }
+
+ class InnerIterator;
+
+ void setZero()
+ {
+ for (Index j=0; j<outerSize(); ++j)
+ m_data[j].clear();
+ }
+
+ /** \returns the number of non zero coefficients */
+ Index nonZeros() const
+ {
+ Index res = 0;
+ for (Index j=0; j<outerSize(); ++j)
+ res += static_cast<Index>(m_data[j].size());
+ return res;
+ }
+
+
+
+ void reserve(Index reserveSize = 1000)
+ {
+ if (outerSize()>0)
+ {
+ Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4));
+ for (Index j=0; j<outerSize(); ++j)
+ {
+ m_data[j].reserve(reserveSizePerVector);
+ }
+ }
+ }
+
+ /** Does nothing: provided for compatibility with SparseMatrix */
+ inline void startVec(Index /*outer*/) {}
+
+ /** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+ * - the nonzero does not already exist
+ * - the new coefficient is the last one of the given inner vector.
+ *
+ * \sa insert, insertBackByOuterInner */
+ inline Scalar& insertBack(Index row, Index col)
+ {
+ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+ }
+
+ /** \sa insertBack */
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(outer<Index(m_data.size()) && inner<m_innerSize && "out of range");
+ eigen_assert(((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner))
+ && "wrong sorted insertion");
+ m_data[outer].append(0, inner);
+ return m_data[outer].value(m_data[outer].size()-1);
+ }
+
+ inline Scalar& insert(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index startId = 0;
+ Index id = static_cast<Index>(m_data[outer].size()) - 1;
+ m_data[outer].resize(id+2,1);
+
+ while ( (id >= startId) && (m_data[outer].index(id) > inner) )
+ {
+ m_data[outer].index(id+1) = m_data[outer].index(id);
+ m_data[outer].value(id+1) = m_data[outer].value(id);
+ --id;
+ }
+ m_data[outer].index(id+1) = inner;
+ m_data[outer].value(id+1) = 0;
+ return m_data[outer].value(id+1);
+ }
+
+ /** Does nothing: provided for compatibility with SparseMatrix */
+ inline void finalize() {}
+
+ /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ for (Index j=0; j<outerSize(); ++j)
+ m_data[j].prune(reference,epsilon);
+ }
+
+ /** Resize the matrix without preserving the data (the matrix is set to zero)
+ */
+ void resize(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ setZero();
+ if (Index(m_data.size()) != outerSize)
+ {
+ m_data.resize(outerSize);
+ }
+ }
+
+ void resizeAndKeepData(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ const Index innerSize = IsRowMajor ? cols : rows;
+ if (m_innerSize>innerSize)
+ {
+ // remove all coefficients with innerCoord>=innerSize
+ // TODO
+ //std::cerr << "not implemented yet\n";
+ exit(2);
+ }
+ if (m_data.size() != outerSize)
+ {
+ m_data.resize(outerSize);
+ }
+ }
+
+ inline DynamicSparseMatrix()
+ : m_innerSize(0), m_data(0)
+ {
+ eigen_assert(innerSize()==0 && outerSize()==0);
+ }
+
+ inline DynamicSparseMatrix(Index rows, Index cols)
+ : m_innerSize(0)
+ {
+ resize(rows, cols);
+ }
+
+ template<typename OtherDerived>
+ explicit inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_innerSize(0)
+ {
+ Base::operator=(other.derived());
+ }
+
+ inline DynamicSparseMatrix(const DynamicSparseMatrix& other)
+ : Base(), m_innerSize(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(DynamicSparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_innerSize, other.m_innerSize);
+ //std::swap(m_outerSize, other.m_outerSize);
+ m_data.swap(other.m_data);
+ }
+
+ inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.rows(), other.cols());
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ /** Destructor */
+ inline ~DynamicSparseMatrix() {}
+
+ public:
+
+ /** \deprecated
+ * Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
+ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
+ {
+ setZero();
+ reserve(reserveSize);
+ }
+
+ /** \deprecated use insert()
+ * inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
+ * 1 - the coefficient does not exist yet
+ * 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
+ * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
+ * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
+ *
+ * \see fillrand(), coeffRef()
+ */
+ EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return insertBack(outer,inner);
+ }
+
+ /** \deprecated use insert()
+ * Like fill() but with random inner coordinates.
+ * Compared to the generic coeffRef(), the unique limitation is that we assume
+ * the coefficient does not exist yet.
+ */
+ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
+ {
+ return insert(row,col);
+ }
+
+ /** \deprecated use finalize()
+ * Does nothing. Provided for compatibility with SparseMatrix. */
+ EIGEN_DEPRECATED void endFill() {}
+
+# ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
+# include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
+# endif
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class DynamicSparseMatrix<Scalar,_Options,_Index>::InnerIterator : public SparseVector<Scalar,_Options>::InnerIterator
+{
+ typedef typename SparseVector<Scalar,_Options>::InnerIterator Base;
+ public:
+ InnerIterator(const DynamicSparseMatrix& mat, Index outer)
+ : Base(mat.m_data[outer]), m_outer(outer)
+ {}
+
+ inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
+ inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
+
+ protected:
+ const Index m_outer;
+};
+
+#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h b/extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h
new file mode 100644
index 00000000000..31a431fb224
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h
@@ -0,0 +1,165 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
+#define EIGEN_MAPPED_SPARSEMATRIX_H
+
+/** \class MappedSparseMatrix
+ *
+ * \brief Sparse matrix
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ */
+namespace internal {
+template<typename _Scalar, int _Flags, typename _Index>
+struct traits<MappedSparseMatrix<_Scalar, _Flags, _Index> > : traits<SparseMatrix<_Scalar, _Flags, _Index> >
+{};
+}
+
+template<typename _Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix
+ : public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix)
+
+ protected:
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Index m_nnz;
+ Index* m_outerIndex;
+ Index* m_innerIndices;
+ Scalar* m_values;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+ inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
+
+ //----------------------------------------
+ // direct access interface
+ inline const Scalar* _valuePtr() const { return m_values; }
+ inline Scalar* _valuePtr() { return m_values; }
+
+ inline const Index* _innerIndexPtr() const { return m_innerIndices; }
+ inline Index* _innerIndexPtr() { return m_innerIndices; }
+
+ inline const Index* _outerIndexPtr() const { return m_outerIndex; }
+ inline Index* _outerIndexPtr() { return m_outerIndex; }
+ //----------------------------------------
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ if (start==end)
+ return Scalar(0);
+ else if (end>0 && inner==m_innerIndices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+
+ const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
+ const Index id = r-&m_innerIndices[0];
+ return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
+ const Index id = r-&m_innerIndices[0];
+ eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
+ return m_values[id];
+ }
+
+ class InnerIterator;
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return m_nnz; }
+
+ inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr)
+ : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
+ m_innerIndices(innerIndexPtr), m_values(valuePtr)
+ {}
+
+ /** Empty destructor */
+ inline ~MappedSparseMatrix() {}
+};
+
+template<typename Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const MappedSparseMatrix& mat, Index outer)
+ : m_matrix(mat),
+ m_outer(outer),
+ m_id(mat._outerIndexPtr()[outer]),
+ m_start(m_id),
+ m_end(mat._outerIndexPtr()[outer+1])
+ {}
+
+ template<unsigned int Added, unsigned int Removed>
+ InnerIterator(const Flagged<MappedSparseMatrix,Added,Removed>& mat, Index outer)
+ : m_matrix(mat._expression()), m_id(m_matrix._outerIndexPtr()[outer]),
+ m_start(m_id), m_end(m_matrix._outerIndexPtr()[outer+1])
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_matrix._valuePtr()[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix._valuePtr()[m_id]); }
+
+ inline Index index() const { return m_matrix._innerIndexPtr()[m_id]; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
+
+ protected:
+ const MappedSparseMatrix& m_matrix;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
+};
+
+#endif // EIGEN_MAPPED_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseAssign.h b/extern/Eigen3/Eigen/src/Sparse/SparseAssign.h
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseAssign.h
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseBlock.h b/extern/Eigen3/Eigen/src/Sparse/SparseBlock.h
new file mode 100644
index 00000000000..8079c999994
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseBlock.h
@@ -0,0 +1,465 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_BLOCK_H
+#define EIGEN_SPARSE_BLOCK_H
+
+namespace internal {
+template<typename MatrixType, int Size>
+struct traits<SparseInnerVectorSet<MatrixType, Size> >
+{
+ typedef typename traits<MatrixType>::Scalar Scalar;
+ typedef typename traits<MatrixType>::Index Index;
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit,
+ Flags = MatrixType::Flags,
+ RowsAtCompileTime = IsRowMajor ? Size : MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = IsRowMajor ? MatrixType::ColsAtCompileTime : Size,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ CoeffReadCost = MatrixType::CoeffReadCost
+ };
+};
+} // end namespace internal
+
+template<typename MatrixType, int Size>
+class SparseInnerVectorSet : internal::no_assignment_operator,
+ public SparseMatrixBase<SparseInnerVectorSet<MatrixType, Size> >
+{
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size!=Dynamic);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+// template<typename OtherDerived>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+};
+
+/***************************************************************************
+* specialisation for DynamicSparseMatrix
+***************************************************************************/
+
+template<typename _Scalar, int _Options, int Size>
+class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size>
+ : public SparseMatrixBase<SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size> >
+{
+ typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType;
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size!=Dynamic);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+ template<typename OtherDerived>
+ inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit))
+ {
+ // need to transpose => perform a block evaluation followed by a big swap
+ DynamicSparseMatrix<Scalar,IsRowMajor?RowMajorBit:0> aux(other);
+ *this = aux.markAsRValue();
+ }
+ else
+ {
+ // evaluate/copy vector per vector
+ for (Index j=0; j<m_outerSize.value(); ++j)
+ {
+ SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
+ m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
+ }
+ }
+ return *this;
+ }
+
+ inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+ {
+ return operator=<SparseInnerVectorSet>(other);
+ }
+
+ Index nonZeros() const
+ {
+ Index count = 0;
+ for (Index j=0; j<m_outerSize.value(); ++j)
+ count += m_matrix._data()[m_outerStart+j].size();
+ return count;
+ }
+
+ const Scalar& lastCoeff() const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+ eigen_assert(m_matrix.data()[m_outerStart].size()>0);
+ return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1);
+ }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+
+};
+
+
+/***************************************************************************
+* specialisation for SparseMatrix
+***************************************************************************/
+
+template<typename _Scalar, int _Options, typename _Index, int Size>
+class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
+ : public SparseMatrixBase<SparseInnerVectorSet<SparseMatrix<_Scalar, _Options>, Size> >
+{
+ typedef SparseMatrix<_Scalar, _Options> MatrixType;
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size==1);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+ template<typename OtherDerived>
+ inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ typedef typename internal::remove_all<typename MatrixType::Nested>::type _NestedMatrixType;
+ _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);;
+ // This assignement is slow if this vector set not empty
+ // and/or it is not at the end of the nonzeros of the underlying matrix.
+
+ // 1 - eval to a temporary to avoid transposition and/or aliasing issues
+ SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, Index> tmp(other);
+
+ // 2 - let's check whether there is enough allocated memory
+ Index nnz = tmp.nonZeros();
+ Index nnz_previous = nonZeros();
+ Index free_size = matrix.data().allocatedSize() - nnz_previous;
+ std::size_t nnz_head = m_outerStart==0 ? 0 : matrix._outerIndexPtr()[m_outerStart];
+ std::size_t tail = m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()];
+ std::size_t nnz_tail = matrix.nonZeros() - tail;
+
+ if(nnz>free_size)
+ {
+ // realloc manually to reduce copies
+ typename MatrixType::Storage newdata(m_matrix.nonZeros() - nnz_previous + nnz);
+
+ std::memcpy(&newdata.value(0), &m_matrix.data().value(0), nnz_head*sizeof(Scalar));
+ std::memcpy(&newdata.index(0), &m_matrix.data().index(0), nnz_head*sizeof(Index));
+
+ std::memcpy(&newdata.value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&newdata.index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+
+ std::memcpy(&newdata.value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
+ std::memcpy(&newdata.index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+
+ matrix.data().swap(newdata);
+ }
+ else
+ {
+ // no need to realloc, simply copy the tail at its respective position and insert tmp
+ matrix.data().resize(nnz_head + nnz + nnz_tail);
+
+ if(nnz<nnz_previous)
+ {
+ std::memcpy(&matrix.data().value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+ }
+ else
+ {
+ for(Index i=nnz_tail-1; i>=0; --i)
+ {
+ matrix.data().value(nnz_head+nnz+i) = matrix.data().value(tail+i);
+ matrix.data().index(nnz_head+nnz+i) = matrix.data().index(tail+i);
+ }
+ }
+
+ std::memcpy(&matrix.data().value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+ }
+
+ // update outer index pointers
+ Index p = nnz_head;
+ for(Index k=1; k<m_outerSize.value(); ++k)
+ {
+ matrix._outerIndexPtr()[m_outerStart+k] = p;
+ p += tmp.innerVector(k).nonZeros();
+ }
+ std::ptrdiff_t offset = nnz - nnz_previous;
+ for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
+ {
+ matrix._outerIndexPtr()[k] += offset;
+ }
+
+ return *this;
+ }
+
+ inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+ {
+ return operator=<SparseInnerVectorSet>(other);
+ }
+
+ inline const Scalar* _valuePtr() const
+ { return m_matrix._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+ inline Scalar* _valuePtr()
+ { return m_matrix.const_cast_derived()._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* _innerIndexPtr() const
+ { return m_matrix._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+ inline Index* _innerIndexPtr()
+ { return m_matrix.const_cast_derived()._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* _outerIndexPtr() const
+ { return m_matrix._outerIndexPtr() + m_outerStart; }
+ inline Index* _outerIndexPtr()
+ { return m_matrix.const_cast_derived()._outerIndexPtr() + m_outerStart; }
+
+ Index nonZeros() const
+ {
+ return std::size_t(m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()])
+ - std::size_t(m_matrix._outerIndexPtr()[m_outerStart]);
+ }
+
+ const Scalar& lastCoeff() const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+ eigen_assert(nonZeros()>0);
+ return m_matrix._valuePtr()[m_matrix._outerIndexPtr()[m_outerStart+1]-1];
+ }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+
+};
+
+//----------
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i)
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i) const
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i)
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i) const
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer)
+{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) const
+{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+
+//----------
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(Index start, Index size)
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(Index start, Index size) const
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(Index start, Index size)
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(Index start, Index size) const
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
+{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
+{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+
+#endif // EIGEN_SPARSE_BLOCK_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h
new file mode 100644
index 00000000000..cde5bbc0300
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h
@@ -0,0 +1,375 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
+#define EIGEN_SPARSE_CWISE_BINARY_OP_H
+
+// Here we have to handle 3 cases:
+// 1 - sparse op dense
+// 2 - dense op sparse
+// 3 - sparse op sparse
+// We also need to implement a 4th iterator for:
+// 4 - dense op dense
+// Finally, we also need to distinguish between the product and other operations :
+// configuration returned mode
+// 1 - sparse op dense product sparse
+// generic dense
+// 2 - dense op sparse product sparse
+// generic dense
+// 3 - sparse op sparse product sparse
+// generic sparse
+// 4 - dense op dense product dense
+// generic dense
+
+namespace internal {
+
+template<> struct promote_storage_type<Dense,Sparse>
+{ typedef Sparse ret; };
+
+template<> struct promote_storage_type<Sparse,Dense>
+{ typedef Sparse ret; };
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
+ typename _LhsStorageMode = typename traits<Lhs>::StorageKind,
+ typename _RhsStorageMode = typename traits<Rhs>::StorageKind>
+class sparse_cwise_binary_op_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
+ : public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+ public:
+ class InnerIterator;
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
+ : public internal::sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs,typename CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator>
+{
+ public:
+ typedef typename Lhs::Index Index;
+ typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
+ BinaryOp,Lhs,Rhs, InnerIterator> Base;
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer)
+ : Base(binOp.derived(),outer)
+ {}
+};
+
+/***************************************************************************
+* Implementation of inner-iterators
+***************************************************************************/
+
+// template<typename T> struct internal::func_is_conjunction { enum { ret = false }; };
+// template<typename T> struct internal::func_is_conjunction<internal::scalar_product_op<T> > { enum { ret = true }; };
+
+// TODO generalize the internal::scalar_product_op specialization to all conjunctions if any !
+
+namespace internal {
+
+// sparse - sparse (generic)
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename traits<CwiseBinaryXpr>::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
+ ++m_lhsIter;
+ ++m_rhsIter;
+ }
+ else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), Scalar(0));
+ ++m_lhsIter;
+ }
+ else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
+ {
+ m_id = m_rhsIter.index();
+ m_value = m_functor(Scalar(0), m_rhsIter.value());
+ ++m_rhsIter;
+ }
+ else
+ {
+ m_value = 0; // this is to avoid a compilation warning
+ m_id = -1;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
+ EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ Index m_id;
+};
+
+// sparse - sparse (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ ++m_rhsIter;
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Dense>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename traits<CwiseBinaryXpr>::RhsNested RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename Lhs::Index Index;
+ enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_lhsIter.value(),
+ m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
+
+ protected:
+ const RhsNested m_rhs;
+ LhsIterator m_lhsIter;
+ const BinaryFunc m_functor;
+ const Index m_outer;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Dense, Sparse>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
+ enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_rhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
+
+ protected:
+ const CwiseBinaryXpr& m_xpr;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+ const Index m_outer;
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of SparseMatrixBase and SparseCwise functions/operators
+***************************************************************************/
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename internal::traits<Derived>::Scalar>,
+// Derived, OtherDerived>
+// SparseMatrixBase<Derived>::operator-(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
+// Derived, OtherDerived>(derived(), other.derived());
+// }
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
+{
+ return *this = derived() - other.derived();
+}
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
+// SparseMatrixBase<Derived>::operator+(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return CwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived>(derived(), other.derived());
+// }
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
+{
+ return *this = derived() + other.derived();
+}
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+// SparseCwise<ExpressionType>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived());
+// }
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived());
+}
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
+// SparseCwise<ExpressionType>::operator/(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived());
+// }
+//
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
+// SparseCwise<ExpressionType>::operator/(const MatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived());
+// }
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// inline ExpressionType& SparseCwise<ExpressionType>::operator*=(const SparseMatrixBase<OtherDerived> &other)
+// {
+// return m_matrix.const_cast_derived() = _expression() * other.derived();
+// }
+
+
+#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h
new file mode 100644
index 00000000000..aa068835fbb
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h
@@ -0,0 +1,146 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
+#define EIGEN_SPARSE_CWISE_UNARY_OP_H
+
+// template<typename UnaryOp, typename MatrixType>
+// struct internal::traits<SparseCwiseUnaryOp<UnaryOp, MatrixType> > : internal::traits<MatrixType>
+// {
+// typedef typename internal::result_of<
+// UnaryOp(typename MatrixType::Scalar)
+// >::type Scalar;
+// typedef typename MatrixType::Nested MatrixTypeNested;
+// typedef typename internal::remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+// enum {
+// CoeffReadCost = _MatrixTypeNested::CoeffReadCost + internal::functor_traits<UnaryOp>::Cost
+// };
+// };
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>
+ : public SparseMatrixBase<CwiseUnaryOp<UnaryOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+// typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
+
+ typedef CwiseUnaryOp<UnaryOp, MatrixType> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+};
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::InnerIterator
+{
+ typedef typename CwiseUnaryOpImpl::Scalar Scalar;
+ typedef typename internal::traits<Derived>::_XprTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename MatrixType::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, Index outer)
+ : m_iter(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { ++m_iter; return *this; }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
+
+ protected:
+ MatrixTypeIterator m_iter;
+ const UnaryOp m_functor;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>
+ : public SparseMatrixBase<CwiseUnaryView<ViewOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+// typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
+
+ typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::InnerIterator
+{
+ typedef typename CwiseUnaryViewImpl::Scalar Scalar;
+ typedef typename internal::traits<Derived>::_MatrixTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename MatrixType::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryView, Index outer)
+ : m_iter(unaryView.derived().nestedExpression(),outer), m_functor(unaryView.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { ++m_iter; return *this; }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); }
+ EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(m_iter.valueRef()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
+
+ protected:
+ MatrixTypeIterator m_iter;
+ const ViewOp m_functor;
+};
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator*=(const Scalar& other)
+{
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() *= other;
+ return derived();
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator/=(const Scalar& other)
+{
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() /= other;
+ return derived();
+}
+
+#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h
new file mode 100644
index 00000000000..0f77aa5be99
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h
@@ -0,0 +1,231 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEDENSEPRODUCT_H
+#define EIGEN_SPARSEDENSEPRODUCT_H
+
+template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType
+{
+ typedef SparseTimeDenseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
+{
+ typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type;
+};
+
+template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
+{
+ typedef DenseTimeSparseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
+{
+ typedef SparseDenseOuterProduct<Rhs,Lhs,true> Type;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, bool Tr>
+struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+ typedef Sparse StorageKind;
+ typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
+ typename traits<Rhs>::Scalar>::ReturnType Scalar;
+ typedef typename Lhs::Index Index;
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+
+ enum {
+ LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost,
+ RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost,
+
+ RowsAtCompileTime = Tr ? int(traits<Rhs>::RowsAtCompileTime) : int(traits<Lhs>::RowsAtCompileTime),
+ ColsAtCompileTime = Tr ? int(traits<Lhs>::ColsAtCompileTime) : int(traits<Rhs>::ColsAtCompileTime),
+ MaxRowsAtCompileTime = Tr ? int(traits<Rhs>::MaxRowsAtCompileTime) : int(traits<Lhs>::MaxRowsAtCompileTime),
+ MaxColsAtCompileTime = Tr ? int(traits<Lhs>::MaxColsAtCompileTime) : int(traits<Rhs>::MaxColsAtCompileTime),
+
+ Flags = Tr ? RowMajorBit : 0,
+
+ CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits<Scalar>::MulCost
+ };
+};
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs, bool Tr>
+class SparseDenseOuterProduct
+ : public SparseMatrixBase<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+ public:
+
+ typedef SparseMatrixBase<SparseDenseOuterProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct)
+ typedef internal::traits<SparseDenseOuterProduct> Traits;
+
+ private:
+
+ typedef typename Traits::LhsNested LhsNested;
+ typedef typename Traits::RhsNested RhsNested;
+ typedef typename Traits::_LhsNested _LhsNested;
+ typedef typename Traits::_RhsNested _RhsNested;
+
+ public:
+
+ class InnerIterator;
+
+ EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+template<typename Lhs, typename Rhs, bool Transpose>
+class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
+{
+ typedef typename _LhsNested::InnerIterator Base;
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
+ : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
+ {
+ }
+
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return Transpose ? Base::row() : m_outer; }
+ inline Index col() const { return Transpose ? m_outer : Base::row(); }
+
+ inline Scalar value() const { return Base::value() * m_factor; }
+
+ protected:
+ int m_outer;
+ Scalar m_factor;
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<SparseTimeDenseProduct<Lhs,Rhs> >
+ : traits<ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+ typedef MatrixXpr XprKind;
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseTimeDenseProduct
+ : public ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct)
+
+ SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ typedef typename internal::remove_all<Lhs>::type _Lhs;
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::InnerIterator LhsInnerIterator;
+ enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit };
+ for(Index j=0; j<m_lhs.outerSize(); ++j)
+ {
+ typename Rhs::Scalar rhs_j = alpha * m_rhs.coeff(LhsIsRowMajor ? 0 : j,0);
+ typename Dest::RowXpr dest_j(dest.row(LhsIsRowMajor ? j : 0));
+ for(LhsInnerIterator it(m_lhs,j); it ;++it)
+ {
+ if(LhsIsRowMajor) dest_j += (alpha*it.value()) * m_rhs.row(it.index());
+ else if(Rhs::ColsAtCompileTime==1) dest.coeffRef(it.index()) += it.value() * rhs_j;
+ else dest.row(it.index()) += (alpha*it.value()) * m_rhs.row(j);
+ }
+ }
+ }
+
+ private:
+ SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&);
+};
+
+
+// dense = dense * sparse
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<DenseTimeSparseProduct<Lhs,Rhs> >
+ : traits<ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class DenseTimeSparseProduct
+ : public ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct)
+
+ DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Rhs::InnerIterator RhsInnerIterator;
+ enum { RhsIsRowMajor = (_Rhs::Flags&RowMajorBit)==RowMajorBit };
+ for(Index j=0; j<m_rhs.outerSize(); ++j)
+ for(RhsInnerIterator i(m_rhs,j); i; ++i)
+ dest.col(RhsIsRowMajor ? i.index() : j) += (alpha*i.value()) * m_lhs.col(RhsIsRowMajor ? j : i.index());
+ }
+
+ private:
+ DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
+};
+
+// sparse * dense
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h
new file mode 100644
index 00000000000..fb9a29c051b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h
@@ -0,0 +1,195 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+
+// The product of a diagonal matrix with a sparse matrix can be easily
+// implemented using expression template.
+// We have two consider very different cases:
+// 1 - diag * row-major sparse
+// => each inner vector <=> scalar * sparse vector product
+// => so we can reuse CwiseUnaryOp::InnerIterator
+// 2 - diag * col-major sparse
+// => each inner vector <=> densevector * sparse vector cwise product
+// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
+// for that particular case
+// The two other cases are symmetric.
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<SparseDiagonalProduct<Lhs, Rhs> >
+{
+ typedef typename remove_all<Lhs>::type _Lhs;
+ typedef typename remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = _Lhs::RowsAtCompileTime,
+ ColsAtCompileTime = _Rhs::ColsAtCompileTime,
+
+ MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime,
+
+ SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags),
+ Flags = (SparseFlags&RowMajorBit),
+ CoeffReadCost = Dynamic
+ };
+};
+
+enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
+class sparse_diagonal_product_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseDiagonalProduct
+ : public SparseMatrixBase<SparseDiagonalProduct<Lhs,Rhs> >,
+ internal::no_assignment_operator
+{
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+
+ typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+ typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+
+ enum {
+ LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal
+ : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor,
+ RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal
+ : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor
+ };
+
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
+
+ typedef internal::sparse_diagonal_product_inner_iterator_selector
+ <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+
+ EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
+ : public CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator
+{
+ typedef typename CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
+ : public CwiseBinaryOp<
+ scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::DiagonalVectorType>::InnerIterator
+{
+ typedef typename CwiseBinaryOp<
+ scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::DiagonalVectorType>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
+ : public CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator
+{
+ typedef typename CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
+ : public CwiseBinaryOp<
+ scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
+{
+ typedef typename CwiseBinaryOp<
+ scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0)
+ {}
+};
+
+} // end namespace internal
+
+// SparseMatrixBase functions
+
+template<typename Derived>
+template<typename OtherDerived>
+const SparseDiagonalProduct<Derived,OtherDerived>
+SparseMatrixBase<Derived>::operator*(const DiagonalBase<OtherDerived> &other) const
+{
+ return SparseDiagonalProduct<Derived,OtherDerived>(this->derived(), other.derived());
+}
+
+#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseDot.h b/extern/Eigen3/Eigen/src/Sparse/SparseDot.h
new file mode 100644
index 00000000000..1f10f71a402
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseDot.h
@@ -0,0 +1,97 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_DOT_H
+#define EIGEN_SPARSE_DOT_H
+
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ eigen_assert(size() == other.size());
+ eigen_assert(other.size()>0 && "you are using a non initialized vector");
+
+ typename Derived::InnerIterator i(derived(),0);
+ Scalar res = 0;
+ while (i)
+ {
+ res += internal::conj(i.value()) * other.coeff(i.index());
+ ++i;
+ }
+ return res;
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ eigen_assert(size() == other.size());
+
+ typename Derived::InnerIterator i(derived(),0);
+ typename OtherDerived::InnerIterator j(other.derived(),0);
+ Scalar res = 0;
+ while (i && j)
+ {
+ if (i.index()==j.index())
+ {
+ res += internal::conj(i.value()) * j.value();
+ ++i; ++j;
+ }
+ else if (i.index()<j.index())
+ ++i;
+ else
+ ++j;
+ }
+ return res;
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::squaredNorm() const
+{
+ return internal::real((*this).cwiseAbs2().sum());
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::norm() const
+{
+ return internal::sqrt(squaredNorm());
+}
+
+#endif // EIGEN_SPARSE_DOT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h b/extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h
new file mode 100644
index 00000000000..f00b3d6469b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h
@@ -0,0 +1,41 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_FUZZY_H
+#define EIGEN_SPARSE_FUZZY_H
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// bool SparseMatrixBase<Derived>::isApprox(
+// const OtherDerived& other,
+// typename NumTraits<Scalar>::Real prec
+// ) const
+// {
+// const typename internal::nested<Derived,2>::type nested(derived());
+// const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
+// return (nested - otherNested).cwise().abs2().sum()
+// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
+// }
+
+#endif // EIGEN_SPARSE_FUZZY_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h b/extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h
new file mode 100644
index 00000000000..0e175ec6e71
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h
@@ -0,0 +1,651 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEMATRIX_H
+#define EIGEN_SPARSEMATRIX_H
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseMatrix
+ *
+ * \brief The main sparse matrix class
+ *
+ * This class implements a sparse matrix using the very common compressed row/column storage
+ * scheme.
+ *
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+ * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
+ * is RowMajor. The default is 0 which means column-major.
+ * \tparam _Index the type of the indices. Default is \c int.
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseMatrix<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+
+} // end namespace internal
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseMatrix
+ : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
+// using Base::operator=;
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
+ // FIXME: why are these operator already alvailable ???
+ // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, *=)
+ // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, /=)
+
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+ using Base::IsRowMajor;
+ typedef CompressedStorage<Scalar,Index> Storage;
+ enum {
+ Options = _Options
+ };
+
+ protected:
+
+ typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Index* m_outerIndex;
+ CompressedStorage<Scalar,Index> m_data;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+ inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
+
+ inline const Scalar* _valuePtr() const { return &m_data.value(0); }
+ inline Scalar* _valuePtr() { return &m_data.value(0); }
+
+ inline const Index* _innerIndexPtr() const { return &m_data.index(0); }
+ inline Index* _innerIndexPtr() { return &m_data.index(0); }
+
+ inline const Index* _outerIndexPtr() const { return m_outerIndex; }
+ inline Index* _outerIndexPtr() { return m_outerIndex; }
+
+ inline Storage& data() { return m_data; }
+ inline const Storage& data() const { return m_data; }
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ const Index p = m_data.searchLowerIndex(start,end-1,inner);
+ eigen_assert((p<end) && (m_data.index(p)==inner) && "coeffRef cannot be called on a zero coefficient");
+ return m_data.value(p);
+ }
+
+ public:
+
+ class InnerIterator;
+
+ /** Removes all non zeros */
+ inline void setZero()
+ {
+ m_data.clear();
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
+
+ /** Preallocates \a reserveSize non zeros */
+ inline void reserve(Index reserveSize)
+ {
+ m_data.reserve(reserveSize);
+ }
+
+ //--- low level purely coherent filling ---
+
+ /** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+ * - the nonzero does not already exist
+ * - the new coefficient is the last one according to the storage order
+ *
+ * Before filling a given inner vector you must call the statVec(Index) function.
+ *
+ * After an insertion session, you should call the finalize() function.
+ *
+ * \sa insert, insertBackByOuterInner, startVec */
+ inline Scalar& insertBack(Index row, Index col)
+ {
+ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+ }
+
+ /** \sa insertBack, startVec */
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
+ eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \warning use it only if you know what you are doing */
+ inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
+ {
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \sa insertBack, insertBackByOuterInner */
+ inline void startVec(Index outer)
+ {
+ eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
+ eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ //---
+
+ /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
+ * The non zero coefficient must \b not already exist.
+ *
+ * \warning This function can be extremely slow if the non zero coefficients
+ * are not inserted in a coherent order.
+ *
+ * After an insertion session, you should call the finalize() function.
+ */
+ EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index previousOuter = outer;
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
+ {
+ m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+ --previousOuter;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ // here we have to handle the tricky case where the outerIndex array
+ // starts with: [ 0 0 0 0 0 1 ...] and we are inserting in, e.g.,
+ // the 2nd inner vector...
+ bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
+ && (size_t(m_outerIndex[outer+1]) == m_data.size());
+
+ size_t startId = m_outerIndex[outer];
+ // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+ size_t p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ float reallocRatio = 1;
+ if (m_data.allocatedSize()<=m_data.size())
+ {
+ // if there is no preallocated memory, let's reserve a minimum of 32 elements
+ if (m_data.size()==0)
+ {
+ m_data.reserve(32);
+ }
+ else
+ {
+ // we need to reallocate the data, to reduce multiple reallocations
+ // we use a smart resize algorithm based on the current filling ratio
+ // in addition, we use float to avoid integers overflows
+ float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
+ reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
+ // furthermore we bound the realloc ratio to:
+ // 1) reduce multiple minor realloc when the matrix is almost filled
+ // 2) avoid to allocate too much memory when the matrix is almost empty
+ reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
+ }
+ }
+ m_data.resize(m_data.size()+1,reallocRatio);
+
+ if (!isLastVec)
+ {
+ if (previousOuter==-1)
+ {
+ // oops wrong guess.
+ // let's correct the outer offsets
+ for (Index k=0; k<=(outer+1); ++k)
+ m_outerIndex[k] = 0;
+ Index k=outer+1;
+ while(m_outerIndex[k]==0)
+ m_outerIndex[k++] = 1;
+ while (k<=m_outerSize && m_outerIndex[k]!=0)
+ m_outerIndex[k++]++;
+ p = 0;
+ --k;
+ k = m_outerIndex[k]-1;
+ while (k>0)
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ else
+ {
+ // we are not inserting into the last inner vec
+ // update outer indices:
+ Index j = outer+2;
+ while (j<=m_outerSize && m_outerIndex[j]!=0)
+ m_outerIndex[j++]++;
+ --j;
+ // shift data of last vecs:
+ Index k = m_outerIndex[j]-1;
+ while (k>=Index(p))
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ }
+
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+ }
+
+
+
+
+ /** Must be called after inserting a set of non zero entries.
+ */
+ inline void finalize()
+ {
+ Index size = static_cast<Index>(m_data.size());
+ Index i = m_outerSize;
+ // find the last filled column
+ while (i>=0 && m_outerIndex[i]==0)
+ --i;
+ ++i;
+ while (i<=m_outerSize)
+ {
+ m_outerIndex[i] = size;
+ ++i;
+ }
+ }
+
+ /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ prune(default_prunning_func(reference,epsilon));
+ }
+
+ /** Suppress all nonzeros which do not satisfy the predicate \a keep.
+ * The functor type \a KeepFunc must implement the following function:
+ * \code
+ * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
+ * \endcode
+ * \sa prune(Scalar,RealScalar)
+ */
+ template<typename KeepFunc>
+ void prune(const KeepFunc& keep = KeepFunc())
+ {
+ Index k = 0;
+ for(Index j=0; j<m_outerSize; ++j)
+ {
+ Index previousStart = m_outerIndex[j];
+ m_outerIndex[j] = k;
+ Index end = m_outerIndex[j+1];
+ for(Index i=previousStart; i<end; ++i)
+ {
+ if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
+ {
+ m_data.value(k) = m_data.value(i);
+ m_data.index(k) = m_data.index(i);
+ ++k;
+ }
+ }
+ }
+ m_outerIndex[m_outerSize] = k;
+ m_data.resize(k,0);
+ }
+
+ /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero
+ * \sa resizeNonZeros(Index), reserve(), setZero()
+ */
+ void resize(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ m_data.clear();
+ if (m_outerSize != outerSize || m_outerSize==0)
+ {
+ delete[] m_outerIndex;
+ m_outerIndex = new Index [outerSize+1];
+ m_outerSize = outerSize;
+ }
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ }
+
+ /** Low level API
+ * Resize the nonzero vector to \a size */
+ void resizeNonZeros(Index size)
+ {
+ m_data.resize(size);
+ }
+
+ /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
+ inline SparseMatrix()
+ : m_outerSize(-1), m_innerSize(0), m_outerIndex(0)
+ {
+ resize(0, 0);
+ }
+
+ /** Constructs a \a rows \c x \a cols empty matrix */
+ inline SparseMatrix(Index rows, Index cols)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ resize(rows, cols);
+ }
+
+ /** Constructs a sparse matrix from the sparse expression \a other */
+ template<typename OtherDerived>
+ inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ *this = other.derived();
+ }
+
+ /** Copy constructor */
+ inline SparseMatrix(const SparseMatrix& other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ *this = other.derived();
+ }
+
+ /** Swap the content of two sparse matrices of same type (optimization) */
+ inline void swap(SparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_outerIndex, other.m_outerIndex);
+ std::swap(m_innerSize, other.m_innerSize);
+ std::swap(m_outerSize, other.m_outerSize);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseMatrix& operator=(const SparseMatrix& other)
+ {
+// std::cout << "SparseMatrix& operator=(const SparseMatrix& other)\n";
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.rows(), other.cols());
+ memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Lhs, typename Rhs>
+ inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+ { return Base::operator=(product); }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
+ { return Base::operator=(other); }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
+ { return Base::operator=(other); }
+ #endif
+
+ template<typename OtherDerived>
+ EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if (needToTranspose)
+ {
+ // two passes algorithm:
+ // 1 - compute the number of coeffs per dest inner vector
+ // 2 - do the actual copy/eval
+ // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+ typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+ typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ resize(other.rows(), other.cols());
+ Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
+ // pass 1
+ // FIXME the above copy could be merged with that pass
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ ++m_outerIndex[it.index()];
+
+ // prefix sum
+ Index count = 0;
+ VectorXi positions(outerSize());
+ for (Index j=0; j<outerSize(); ++j)
+ {
+ Index tmp = m_outerIndex[j];
+ m_outerIndex[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ m_outerIndex[outerSize()] = count;
+ // alloc
+ m_data.resize(count);
+ // pass 2
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ {
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ {
+ Index pos = positions[it.index()]++;
+ m_data.index(pos) = j;
+ m_data.value(pos) = it.value();
+ }
+ }
+ return *this;
+ }
+ else
+ {
+ // there is no special optimization
+ return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
+ }
+ }
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
+ {
+ EIGEN_DBG_SPARSE(
+ s << "Nonzero entries:\n";
+ for (Index i=0; i<m.nonZeros(); ++i)
+ {
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ }
+ s << std::endl;
+ s << std::endl;
+ s << "Column pointers:\n";
+ for (Index i=0; i<m.outerSize(); ++i)
+ {
+ s << m.m_outerIndex[i] << " ";
+ }
+ s << " $" << std::endl;
+ s << std::endl;
+ );
+ s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
+ return s;
+ }
+
+ /** Destructor */
+ inline ~SparseMatrix()
+ {
+ delete[] m_outerIndex;
+ }
+
+ /** Overloaded for performance */
+ Scalar sum() const;
+
+ public:
+
+ /** \deprecated use setZero() and reserve()
+ * Initializes the filling process of \c *this.
+ * \param reserveSize approximate number of nonzeros
+ * Note that the matrix \c *this is zero-ed.
+ */
+ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
+ {
+ setZero();
+ m_data.reserve(reserveSize);
+ }
+
+ /** \deprecated use insert()
+ * Like fill() but with random inner coordinates.
+ */
+ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
+ {
+ return insert(row,col);
+ }
+
+ /** \deprecated use insert()
+ */
+ EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ Index i = outer;
+ while (i>=0 && m_outerIndex[i]==0)
+ {
+ m_outerIndex[i] = m_data.size();
+ --i;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+ else
+ {
+ eigen_assert(m_data.index(m_data.size()-1)<inner && "wrong sorted insertion");
+ }
+// std::cerr << size_t(m_outerIndex[outer+1]) << " == " << m_data.size() << "\n";
+ assert(size_t(m_outerIndex[outer+1]) == m_data.size());
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \deprecated use finalize */
+ EIGEN_DEPRECATED void endFill() { finalize(); }
+
+# ifdef EIGEN_SPARSEMATRIX_PLUGIN
+# include EIGEN_SPARSEMATRIX_PLUGIN
+# endif
+
+private:
+ struct default_prunning_func {
+ default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
+ inline bool operator() (const Index&, const Index&, const Scalar& value) const
+ {
+ return !internal::isMuchSmallerThan(value, reference, epsilon);
+ }
+ Scalar reference;
+ RealScalar epsilon;
+ };
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseMatrix& mat, Index outer)
+ : m_values(mat._valuePtr()), m_indices(mat._innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_end(mat.m_outerIndex[outer+1])
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline const Scalar& value() const { return m_values[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
+
+ inline Index index() const { return m_indices[m_id]; }
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const Scalar* m_values;
+ const Index* m_indices;
+ const Index m_outer;
+ Index m_id;
+ const Index m_end;
+};
+
+#endif // EIGEN_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h b/extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h
new file mode 100644
index 00000000000..c01981bc935
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h
@@ -0,0 +1,706 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEMATRIXBASE_H
+#define EIGEN_SPARSEMATRIXBASE_H
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseMatrixBase
+ *
+ * \brief Base class of any sparse matrices or sparse expressions
+ *
+ * \tparam Derived
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
+ */
+template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
+{
+ public:
+
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+
+ typedef SparseMatrixBase StorageBaseType;
+ typedef EigenBase<Derived> Base;
+
+ template<typename OtherDerived>
+ Derived& operator=(const EigenBase<OtherDerived> &other)
+ {
+ other.derived().evalTo(derived());
+ return derived();
+ }
+
+// using Base::operator=;
+
+ enum {
+
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+ /**< The number of rows at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ /**< The number of columns at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+ SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+ internal::traits<Derived>::ColsAtCompileTime>::ret),
+ /**< This is equal to the number of coefficients, i.e. the number of
+ * rows times the number of columns, or to \a Dynamic if this is not
+ * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+
+ MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
+ MaxColsAtCompileTime>::ret),
+
+ IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
+ /**< This is set to true if either the number of rows or the number of
+ * columns is known at compile-time to be equal to 1. Indeed, in that case,
+ * we are dealing with a column-vector (if there is only one column) or with
+ * a row-vector (if there is only one row). */
+
+ Flags = internal::traits<Derived>::Flags,
+ /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+ * constructed from this one. See the \ref flags "list of flags".
+ */
+
+ CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+ /**< This is a rough measure of how expensive it is to read one coefficient from
+ * this expression.
+ */
+
+ IsRowMajor = Flags&RowMajorBit ? 1 : 0,
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
+ #endif
+ };
+
+ /* \internal the return type of MatrixBase::conjugate() */
+// typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+// const SparseCwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Derived>,
+// const Derived&
+// >::type ConjugateReturnType;
+ /* \internal the return type of MatrixBase::real() */
+// typedef SparseCwiseUnaryOp<internal::scalar_real_op<Scalar>, Derived> RealReturnType;
+ /* \internal the return type of MatrixBase::imag() */
+// typedef SparseCwiseUnaryOp<internal::scalar_imag_op<Scalar>, Derived> ImagReturnType;
+ /** \internal the return type of MatrixBase::adjoint() */
+ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
+ Transpose<const Derived>
+ >::type AdjointReturnType;
+
+
+ typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject;
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
+# include "../plugins/CommonCwiseUnaryOps.h"
+# include "../plugins/CommonCwiseBinaryOps.h"
+# include "../plugins/MatrixCwiseUnaryOps.h"
+# include "../plugins/MatrixCwiseBinaryOps.h"
+# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
+# include EIGEN_SPARSEMATRIXBASE_PLUGIN
+# endif
+# undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is the "real scalar" type; if the \a Scalar type is already real numbers
+ * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
+ * \a Scalar is \a std::complex<T> then RealScalar is \a T.
+ *
+ * \sa class NumTraits
+ */
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** \internal the return type of coeff()
+ */
+ typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
+
+ /** \internal Represents a matrix with all coefficients equal to one another*/
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
+
+ /** type of the equivalent square matrix */
+ typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
+ EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+
+ inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+ inline Derived& derived() { return *static_cast<Derived*>(this); }
+ inline Derived& const_cast_derived() const
+ { return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ /** \returns the number of rows. \sa cols(), RowsAtCompileTime */
+ inline Index rows() const { return derived().rows(); }
+ /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
+ inline Index cols() const { return derived().cols(); }
+ /** \returns the number of coefficients, which is \a rows()*cols().
+ * \sa rows(), cols(), SizeAtCompileTime. */
+ inline Index size() const { return rows() * cols(); }
+ /** \returns the number of nonzero coefficients which is in practice the number
+ * of stored coefficients. */
+ inline Index nonZeros() const { return derived().nonZeros(); }
+ /** \returns true if either the number of rows or the number of columns is equal to 1.
+ * In other words, this function returns
+ * \code rows()==1 || cols()==1 \endcode
+ * \sa rows(), cols(), IsVectorAtCompileTime. */
+ inline bool isVector() const { return rows()==1 || cols()==1; }
+ /** \returns the size of the storage major dimension,
+ * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
+ Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
+ /** \returns the size of the inner dimension according to the storage order,
+ * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
+ Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
+
+ bool isRValue() const { return m_isRValue; }
+ Derived& markAsRValue() { m_isRValue = true; return derived(); }
+
+ SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
+
+ inline Derived& operator=(const Derived& other)
+ {
+// std::cout << "Derived& operator=(const Derived& other)\n";
+// if (other.isRValue())
+// derived().swap(other.const_cast_derived());
+// else
+ this->operator=<Derived>(other);
+ return derived();
+ }
+
+ template<typename OtherDerived>
+ Derived& operator=(const ReturnByValue<OtherDerived>& other)
+ {
+ other.evalTo(derived());
+ return derived();
+ }
+
+
+ template<typename OtherDerived>
+ inline void assignGeneric(const OtherDerived& other)
+ {
+// std::cout << "Derived& operator=(const MatrixBase<OtherDerived>& other)\n";
+ //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+ (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
+ "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
+ enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
+
+ const Index outerSize = other.outerSize();
+ //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
+ // thanks to shallow copies, we always eval to a tempary
+ Derived temp(other.rows(), other.cols());
+
+ temp.reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
+ {
+ temp.startVec(j);
+ for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
+ {
+ Scalar v = it.value();
+ if (v!=Scalar(0))
+ temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
+ }
+ }
+ temp.finalize();
+
+ derived() = temp.markAsRValue();
+ }
+
+
+ template<typename OtherDerived>
+ inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+// std::cout << typeid(OtherDerived).name() << "\n";
+// std::cout << Flags << " " << OtherDerived::Flags << "\n";
+ const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+// std::cout << "eval transpose = " << transpose << "\n";
+ const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
+ if ((!transpose) && other.isRValue())
+ {
+ // eval without temporary
+ derived().resize(other.rows(), other.cols());
+ derived().setZero();
+ derived().reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
+ {
+ derived().startVec(j);
+ for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
+ {
+ Scalar v = it.value();
+ if (v!=Scalar(0))
+ derived().insertBackByOuterInner(j,it.index()) = v;
+ }
+ }
+ derived().finalize();
+ }
+ else
+ {
+ assignGeneric(other.derived());
+ }
+ return derived();
+ }
+
+ template<typename Lhs, typename Rhs>
+ inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
+
+ template<typename Lhs, typename Rhs>
+ inline void _experimentalNewProduct(const Lhs& lhs, const Rhs& rhs);
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
+ {
+ if (Flags&RowMajorBit)
+ {
+ for (Index row=0; row<m.outerSize(); ++row)
+ {
+ Index col = 0;
+ for (typename Derived::InnerIterator it(m.derived(), row); it; ++it)
+ {
+ for ( ; col<it.index(); ++col)
+ s << "0 ";
+ s << it.value() << " ";
+ ++col;
+ }
+ for ( ; col<m.cols(); ++col)
+ s << "0 ";
+ s << std::endl;
+ }
+ }
+ else
+ {
+ if (m.cols() == 1) {
+ Index row = 0;
+ for (typename Derived::InnerIterator it(m.derived(), 0); it; ++it)
+ {
+ for ( ; row<it.index(); ++row)
+ s << "0" << std::endl;
+ s << it.value() << std::endl;
+ ++row;
+ }
+ for ( ; row<m.rows(); ++row)
+ s << "0" << std::endl;
+ }
+ else
+ {
+ SparseMatrix<Scalar, RowMajorBit> trans = m.derived();
+ s << trans;
+ }
+ }
+ return s;
+ }
+
+// const SparseCwiseUnaryOp<internal::scalar_opposite_op<typename internal::traits<Derived>::Scalar>,Derived> operator-() const;
+
+// template<typename OtherDerived>
+// const CwiseBinaryOp<internal::scalar_sum_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
+// operator+(const SparseMatrixBase<OtherDerived> &other) const;
+
+// template<typename OtherDerived>
+// const CwiseBinaryOp<internal::scalar_difference_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
+// operator-(const SparseMatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
+ template<typename OtherDerived>
+ Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
+
+// template<typename Lhs,typename Rhs>
+// Derived& operator+=(const Flagged<Product<Lhs,Rhs,CacheFriendlyProduct>, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other);
+
+ Derived& operator*=(const Scalar& other);
+ Derived& operator/=(const Scalar& other);
+
+ #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
+ CwiseBinaryOp< \
+ internal::scalar_product_op< \
+ typename internal::scalar_product_traits< \
+ typename internal::traits<Derived>::Scalar, \
+ typename internal::traits<OtherDerived>::Scalar \
+ >::ReturnType \
+ >, \
+ Derived, \
+ OtherDerived \
+ >
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+ cwiseProduct(const MatrixBase<OtherDerived> &other) const;
+
+// const SparseCwiseUnaryOp<internal::scalar_multiple_op<typename internal::traits<Derived>::Scalar>, Derived>
+// operator*(const Scalar& scalar) const;
+// const SparseCwiseUnaryOp<internal::scalar_quotient1_op<typename internal::traits<Derived>::Scalar>, Derived>
+// operator/(const Scalar& scalar) const;
+
+// inline friend const SparseCwiseUnaryOp<internal::scalar_multiple_op<typename internal::traits<Derived>::Scalar>, Derived>
+// operator*(const Scalar& scalar, const SparseMatrixBase& matrix)
+// { return matrix*scalar; }
+
+
+ // sparse * sparse
+ template<typename OtherDerived>
+ const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+ operator*(const SparseMatrixBase<OtherDerived> &other) const;
+
+ // sparse * diagonal
+ template<typename OtherDerived>
+ const SparseDiagonalProduct<Derived,OtherDerived>
+ operator*(const DiagonalBase<OtherDerived> &other) const;
+
+ // diagonal * sparse
+ template<typename OtherDerived> friend
+ const SparseDiagonalProduct<OtherDerived,Derived>
+ operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
+ { return SparseDiagonalProduct<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
+
+ /** dense * sparse (return a dense object unless it is an outer product) */
+ template<typename OtherDerived> friend
+ const typename DenseSparseProductReturnType<OtherDerived,Derived>::Type
+ operator*(const MatrixBase<OtherDerived>& lhs, const Derived& rhs)
+ { return typename DenseSparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
+
+ /** sparse * dense (returns a dense object unless it is an outer product) */
+ template<typename OtherDerived>
+ const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+ operator*(const MatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
+
+ #ifdef EIGEN2_SUPPORT
+ // deprecated
+ template<typename OtherDerived>
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type
+ solveTriangular(const MatrixBase<OtherDerived>& other) const;
+
+ // deprecated
+ template<typename OtherDerived>
+ void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
+// template<typename OtherDerived>
+// void solveTriangularInPlace(SparseMatrixBase<OtherDerived>& other) const;
+ #endif // EIGEN2_SUPPORT
+
+ template<int Mode>
+ inline const SparseTriangularView<Derived, Mode> triangularView() const;
+
+ template<unsigned int UpLo> inline const SparseSelfAdjointView<Derived, UpLo> selfadjointView() const;
+ template<unsigned int UpLo> inline SparseSelfAdjointView<Derived, UpLo> selfadjointView();
+
+ template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
+ RealScalar squaredNorm() const;
+ RealScalar norm() const;
+// const PlainObject normalized() const;
+// void normalize();
+
+ Transpose<Derived> transpose() { return derived(); }
+ const Transpose<const Derived> transpose() const { return derived(); }
+ // void transposeInPlace();
+ const AdjointReturnType adjoint() const { return transpose(); }
+
+ // sub-vector
+ SparseInnerVectorSet<Derived,1> row(Index i);
+ const SparseInnerVectorSet<Derived,1> row(Index i) const;
+ SparseInnerVectorSet<Derived,1> col(Index j);
+ const SparseInnerVectorSet<Derived,1> col(Index j) const;
+ SparseInnerVectorSet<Derived,1> innerVector(Index outer);
+ const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const;
+
+ // set of sub-vectors
+ SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
+ const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
+
+// typename BlockReturnType<Derived>::Type block(int startRow, int startCol, int blockRows, int blockCols);
+// const typename BlockReturnType<Derived>::Type
+// block(int startRow, int startCol, int blockRows, int blockCols) const;
+//
+// typename BlockReturnType<Derived>::SubVectorType segment(int start, int size);
+// const typename BlockReturnType<Derived>::SubVectorType segment(int start, int size) const;
+//
+// typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size);
+// const typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size) const;
+//
+// typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size);
+// const typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size) const;
+//
+// template<int BlockRows, int BlockCols>
+// typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol);
+// template<int BlockRows, int BlockCols>
+// const typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol) const;
+
+// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType start(void);
+// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType start() const;
+
+// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType end();
+// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType end() const;
+
+// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType segment(int start);
+// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType segment(int start) const;
+
+// Diagonal<Derived> diagonal();
+// const Diagonal<Derived> diagonal() const;
+
+// template<unsigned int Mode> Part<Derived, Mode> part();
+// template<unsigned int Mode> const Part<Derived, Mode> part() const;
+
+
+// static const ConstantReturnType Constant(int rows, int cols, const Scalar& value);
+// static const ConstantReturnType Constant(int size, const Scalar& value);
+// static const ConstantReturnType Constant(const Scalar& value);
+
+// template<typename CustomNullaryOp>
+// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(int rows, int cols, const CustomNullaryOp& func);
+// template<typename CustomNullaryOp>
+// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(int size, const CustomNullaryOp& func);
+// template<typename CustomNullaryOp>
+// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(const CustomNullaryOp& func);
+
+// static const ConstantReturnType Zero(int rows, int cols);
+// static const ConstantReturnType Zero(int size);
+// static const ConstantReturnType Zero();
+// static const ConstantReturnType Ones(int rows, int cols);
+// static const ConstantReturnType Ones(int size);
+// static const ConstantReturnType Ones();
+// static const IdentityReturnType Identity();
+// static const IdentityReturnType Identity(int rows, int cols);
+// static const BasisReturnType Unit(int size, int i);
+// static const BasisReturnType Unit(int i);
+// static const BasisReturnType UnitX();
+// static const BasisReturnType UnitY();
+// static const BasisReturnType UnitZ();
+// static const BasisReturnType UnitW();
+
+// const DiagonalMatrix<Derived> asDiagonal() const;
+
+// Derived& setConstant(const Scalar& value);
+// Derived& setZero();
+// Derived& setOnes();
+// Derived& setRandom();
+// Derived& setIdentity();
+
+ /** \internal use operator= */
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived>& dst) const
+ {
+ dst.setZero();
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ dst.coeffRef(i.row(),i.col()) = i.value();
+ }
+
+ Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+ {
+ return derived();
+ }
+
+ template<typename OtherDerived>
+ bool isApprox(const SparseMatrixBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ { return toDense().isApprox(other.toDense(),prec); }
+
+ template<typename OtherDerived>
+ bool isApprox(const MatrixBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ { return toDense().isApprox(other,prec); }
+// bool isMuchSmallerThan(const RealScalar& other,
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// template<typename OtherDerived>
+// bool isMuchSmallerThan(const MatrixBase<OtherDerived>& other,
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+// bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isZero(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isOnes(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+// bool isUpper(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isLower(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+// template<typename OtherDerived>
+// bool isOrthogonal(const MatrixBase<OtherDerived>& other,
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isUnitary(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+// template<typename OtherDerived>
+// inline bool operator==(const MatrixBase<OtherDerived>& other) const
+// { return (cwise() == other).all(); }
+
+// template<typename OtherDerived>
+// inline bool operator!=(const MatrixBase<OtherDerived>& other) const
+// { return (cwise() != other).any(); }
+
+
+// template<typename NewType>
+// const SparseCwiseUnaryOp<internal::scalar_cast_op<typename internal::traits<Derived>::Scalar, NewType>, Derived> cast() const;
+
+ /** \returns the matrix or vector obtained by evaluating this expression.
+ *
+ * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+ * a const reference, in order to avoid a useless copy.
+ */
+ inline const typename internal::eval<Derived>::type eval() const
+ { return typename internal::eval<Derived>::type(derived()); }
+
+// template<typename OtherDerived>
+// void swap(MatrixBase<OtherDerived> const & other);
+
+// template<unsigned int Added>
+// const SparseFlagged<Derived, Added, 0> marked() const;
+// const Flagged<Derived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit> lazy() const;
+
+ /** \returns number of elements to skip to pass from one row (resp. column) to another
+ * for a row-major (resp. column-major) matrix.
+ * Combined with coeffRef() and the \ref flags flags, it allows a direct access to the data
+ * of the underlying matrix.
+ */
+// inline int stride(void) const { return derived().stride(); }
+
+// FIXME
+// ConjugateReturnType conjugate() const;
+// const RealReturnType real() const;
+// const ImagReturnType imag() const;
+
+// template<typename CustomUnaryOp>
+// const SparseCwiseUnaryOp<CustomUnaryOp, Derived> unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const;
+
+// template<typename CustomBinaryOp, typename OtherDerived>
+// const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
+// binaryExpr(const MatrixBase<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const;
+
+
+ Scalar sum() const;
+// Scalar trace() const;
+
+// typename internal::traits<Derived>::Scalar minCoeff() const;
+// typename internal::traits<Derived>::Scalar maxCoeff() const;
+
+// typename internal::traits<Derived>::Scalar minCoeff(int* row, int* col = 0) const;
+// typename internal::traits<Derived>::Scalar maxCoeff(int* row, int* col = 0) const;
+
+// template<typename BinaryOp>
+// typename internal::result_of<BinaryOp(typename internal::traits<Derived>::Scalar)>::type
+// redux(const BinaryOp& func) const;
+
+// template<typename Visitor>
+// void visit(Visitor& func) const;
+
+
+// const SparseCwise<Derived> cwise() const;
+// SparseCwise<Derived> cwise();
+
+// inline const WithFormat<Derived> format(const IOFormat& fmt) const;
+
+/////////// Array module ///////////
+ /*
+ bool all(void) const;
+ bool any(void) const;
+
+ const VectorwiseOp<Derived,Horizontal> rowwise() const;
+ const VectorwiseOp<Derived,Vertical> colwise() const;
+
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(int rows, int cols);
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(int size);
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random();
+
+ template<typename ThenDerived,typename ElseDerived>
+ const Select<Derived,ThenDerived,ElseDerived>
+ select(const MatrixBase<ThenDerived>& thenMatrix,
+ const MatrixBase<ElseDerived>& elseMatrix) const;
+
+ template<typename ThenDerived>
+ inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
+ select(const MatrixBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const;
+
+ template<typename ElseDerived>
+ inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
+ select(typename ElseDerived::Scalar thenScalar, const MatrixBase<ElseDerived>& elseMatrix) const;
+
+ template<int p> RealScalar lpNorm() const;
+ */
+
+
+// template<typename OtherDerived>
+// Scalar dot(const MatrixBase<OtherDerived>& other) const
+// {
+// EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+// EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+// EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+//
+// eigen_assert(derived().size() == other.size());
+// // short version, but the assembly looks more complicated because
+// // of the CwiseBinaryOp iterator complexity
+// // return res = (derived().cwise() * other.derived().conjugate()).sum();
+//
+// // optimized, generic version
+// typename Derived::InnerIterator i(derived(),0);
+// typename OtherDerived::InnerIterator j(other.derived(),0);
+// Scalar res = 0;
+// while (i && j)
+// {
+// if (i.index()==j.index())
+// {
+// // std::cerr << i.value() << " * " << j.value() << "\n";
+// res += i.value() * internal::conj(j.value());
+// ++i; ++j;
+// }
+// else if (i.index()<j.index())
+// ++i;
+// else
+// ++j;
+// }
+// return res;
+// }
+//
+// Scalar sum() const
+// {
+// Scalar res = 0;
+// for (typename Derived::InnerIterator iter(*this,0); iter; ++iter)
+// {
+// res += iter.value();
+// }
+// return res;
+// }
+
+ protected:
+
+ bool m_isRValue;
+};
+
+#endif // EIGEN_SPARSEMATRIXBASE_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseProduct.h
new file mode 100644
index 00000000000..1c1f54706ac
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseProduct.h
@@ -0,0 +1,141 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEPRODUCT_H
+#define EIGEN_SPARSEPRODUCT_H
+
+template<typename Lhs, typename Rhs>
+struct SparseSparseProductReturnType
+{
+ typedef typename internal::traits<Lhs>::Scalar Scalar;
+ enum {
+ LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
+ RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
+ TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
+ TransposeLhs = LhsRowMajor && (!RhsRowMajor)
+ };
+
+ typedef typename internal::conditional<TransposeLhs,
+ SparseMatrix<Scalar,0>,
+ const typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
+
+ typedef typename internal::conditional<TransposeRhs,
+ SparseMatrix<Scalar,0>,
+ const typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
+
+ typedef SparseSparseProduct<LhsNested, RhsNested> Type;
+};
+
+namespace internal {
+template<typename LhsNested, typename RhsNested>
+struct traits<SparseSparseProduct<LhsNested, RhsNested> >
+{
+ typedef MatrixXpr XprKind;
+ // clean the nested types:
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+ typedef typename _LhsNested::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<_LhsNested>::Index,
+ typename traits<_RhsNested>::Index>::type Index;
+
+ enum {
+ LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+ RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+ LhsFlags = _LhsNested::Flags,
+ RhsFlags = _RhsNested::Flags,
+
+ RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
+ ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
+ MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+ InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+ EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
+
+ RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
+
+ Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+ | EvalBeforeAssigningBit
+ | EvalBeforeNestingBit,
+
+ CoeffReadCost = Dynamic
+ };
+
+ typedef Sparse StorageKind;
+};
+
+} // end namespace internal
+
+template<typename LhsNested, typename RhsNested>
+class SparseSparseProduct : internal::no_assignment_operator,
+ public SparseMatrixBase<SparseSparseProduct<LhsNested, RhsNested> >
+{
+ public:
+
+ typedef SparseMatrixBase<SparseSparseProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct)
+
+ private:
+
+ typedef typename internal::traits<SparseSparseProduct>::_LhsNested _LhsNested;
+ typedef typename internal::traits<SparseSparseProduct>::_RhsNested _RhsNested;
+
+ public:
+
+ template<typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ eigen_assert(lhs.cols() == rhs.rows());
+
+ enum {
+ ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
+ || _RhsNested::RowsAtCompileTime==Dynamic
+ || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
+ AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwise()*v2
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseRedux.h b/extern/Eigen3/Eigen/src/Sparse/SparseRedux.h
new file mode 100644
index 00000000000..afc49de7aad
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseRedux.h
@@ -0,0 +1,56 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEREDUX_H
+#define EIGEN_SPARSEREDUX_H
+
+template<typename Derived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ Scalar res = 0;
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
+ res += iter.value();
+ return res;
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
+SparseMatrix<_Scalar,_Options,_Index>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
+SparseVector<_Scalar,_Options,_Index>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+#endif // EIGEN_SPARSEREDUX_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h b/extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h
new file mode 100644
index 00000000000..d82044c789c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h
@@ -0,0 +1,454 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
+#define EIGEN_SPARSE_SELFADJOINTVIEW_H
+
+/** \class SparseSelfAdjointView
+ *
+ *
+ * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
+ *
+ * \param MatrixType the type of the dense matrix storing the coefficients
+ * \param UpLo can be either \c #Lower or \c #Upper
+ *
+ * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
+ * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
+ * and most of the time this is the only way that it is used.
+ *
+ * \sa SparseMatrixBase::selfadjointView()
+ */
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct;
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct;
+
+template<typename MatrixType,int UpLo>
+class SparseSymmetricPermutationProduct;
+
+namespace internal {
+
+template<typename MatrixType, unsigned int UpLo>
+struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+}
+
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
+ : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
+{
+ public:
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+ inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
+ {
+ eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
+ }
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ /** \internal \returns a reference to the nested matrix */
+ const _MatrixTypeNested& matrix() const { return m_matrix; }
+ _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
+
+ /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
+ template<typename OtherDerived>
+ SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
+ operator*(const MatrixBase<OtherDerived>& rhs) const
+ {
+ return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
+ }
+
+ /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
+ template<typename OtherDerived> friend
+ DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
+ operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+ {
+ return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
+ }
+
+ /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
+ * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
+ *
+ * \returns a reference to \c *this
+ *
+ * Note that it is faster to set alpha=0 than initializing the matrix to zero
+ * and then keep the default value alpha=1.
+ *
+ * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
+ * call this function with u.adjoint().
+ */
+ template<typename DerivedU>
+ SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+
+ /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
+ template<typename DestScalar> void evalTo(SparseMatrix<DestScalar>& _dest) const
+ {
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
+ }
+
+ template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar>& _dest) const
+ {
+ // TODO directly evaluate into _dest;
+ SparseMatrix<DestScalar> tmp(_dest.rows(),_dest.cols());
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
+ _dest = tmp;
+ }
+
+ /** \returns an expression of P^-1 H P */
+ SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic>& perm) const
+ {
+ return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
+ }
+
+ template<typename SrcMatrixType,int SrcUpLo>
+ SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
+ {
+ permutedMatrix.evalTo(*this);
+ return *this;
+ }
+
+
+ // const SparseLLT<PlainObject, UpLo> llt() const;
+ // const SparseLDLT<PlainObject, UpLo> ldlt() const;
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ mutable VectorI m_countPerRow;
+ mutable VectorI m_countPerCol;
+};
+
+/***************************************************************************
+* Implementation of SparseMatrixBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<unsigned int UpLo>
+const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
+{
+ return derived();
+}
+
+template<typename Derived>
+template<unsigned int UpLo>
+SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
+{
+ return derived();
+}
+
+/***************************************************************************
+* Implementation of SparseSelfAdjointView methods
+***************************************************************************/
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU>
+SparseSelfAdjointView<MatrixType,UpLo>&
+SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha)
+{
+ SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
+ if(alpha==Scalar(0))
+ m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
+ else
+ m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
+
+ return *this;
+}
+
+/***************************************************************************
+* Implementation of sparse self-adjoint time dense matrix
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct
+ : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
+
+ SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ // TODO use alpha
+ eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
+ typedef typename internal::remove_all<Lhs>::type _Lhs;
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::InnerIterator LhsInnerIterator;
+ enum {
+ LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
+ ProcessFirstHalf =
+ ((UpLo&(Upper|Lower))==(Upper|Lower))
+ || ( (UpLo&Upper) && !LhsIsRowMajor)
+ || ( (UpLo&Lower) && LhsIsRowMajor),
+ ProcessSecondHalf = !ProcessFirstHalf
+ };
+ for (Index j=0; j<m_lhs.outerSize(); ++j)
+ {
+ LhsInnerIterator i(m_lhs,j);
+ if (ProcessSecondHalf && i && (i.index()==j))
+ {
+ dest.row(j) += i.value() * m_rhs.row(j);
+ ++i;
+ }
+ Block<Dest,1,Dest::ColsAtCompileTime> dest_j(dest.row(LhsIsRowMajor ? j : 0));
+ for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+ {
+ Index a = LhsIsRowMajor ? j : i.index();
+ Index b = LhsIsRowMajor ? i.index() : j;
+ typename Lhs::Scalar v = i.value();
+ dest.row(a) += (v) * m_rhs.row(b);
+ dest.row(b) += internal::conj(v) * m_rhs.row(a);
+ }
+ if (ProcessFirstHalf && i && (i.index()==j))
+ dest.row(j) += i.value() * m_rhs.row(j);
+ }
+ }
+
+ private:
+ SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&);
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct
+ : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
+
+ DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, Scalar /*alpha*/) const
+ {
+ // TODO
+ }
+
+ private:
+ DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
+};
+
+/***************************************************************************
+* Implementation of symmetric copies and permutations
+***************************************************************************/
+namespace internal {
+
+template<typename MatrixType, int UpLo>
+struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+
+ Dest& dest(_dest.derived());
+ enum {
+ StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
+ };
+ eigen_assert(perm==0);
+ Index size = mat.rows();
+ VectorI count;
+ count.resize(size);
+ count.setZero();
+ dest.resize(size,size);
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ Index ip = perm ? perm[i] : i;
+ if(i==j)
+ count[ip]++;
+ else if((UpLo==Lower && i>j) || (UpLo==Upper && i<j))
+ {
+ count[ip]++;
+ count[jp]++;
+ }
+ }
+ }
+ Index nnz = count.sum();
+
+ // reserve space
+ dest.reserve(nnz);
+ dest._outerIndexPtr()[0] = 0;
+ for(Index j=0; j<size; ++j)
+ dest._outerIndexPtr()[j+1] = dest._outerIndexPtr()[j] + count[j];
+ for(Index j=0; j<size; ++j)
+ count[j] = dest._outerIndexPtr()[j];
+
+ // copy data
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ Index ip = perm ? perm[i] : i;
+ if(i==j)
+ {
+ int k = count[ip]++;
+ dest._innerIndexPtr()[k] = ip;
+ dest._valuePtr()[k] = it.value();
+ }
+ else if((UpLo==Lower && i>j) || (UpLo==Upper && i<j))
+ {
+ int k = count[jp]++;
+ dest._innerIndexPtr()[k] = ip;
+ dest._valuePtr()[k] = it.value();
+ k = count[ip]++;
+ dest._innerIndexPtr()[k] = jp;
+ dest._valuePtr()[k] = internal::conj(it.value());
+ }
+ }
+ }
+}
+
+template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
+ Dest& dest(_dest.derived());
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ //internal::conj_if<SrcUpLo!=DstUpLo> cj;
+
+ Index size = mat.rows();
+ VectorI count(size);
+ count.setZero();
+ dest.resize(size,size);
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ if((SrcUpLo==Lower && i<j) || (SrcUpLo==Upper && i>j))
+ continue;
+
+ Index ip = perm ? perm[i] : i;
+ count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ }
+ }
+ dest._outerIndexPtr()[0] = 0;
+ for(Index j=0; j<size; ++j)
+ dest._outerIndexPtr()[j+1] = dest._outerIndexPtr()[j] + count[j];
+ dest.resizeNonZeros(dest._outerIndexPtr()[size]);
+ for(Index j=0; j<size; ++j)
+ count[j] = dest._outerIndexPtr()[j];
+
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ if((SrcUpLo==Lower && i<j) || (SrcUpLo==Upper && i>j))
+ continue;
+
+ Index ip = perm? perm[i] : i;
+ Index k = count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ dest._innerIndexPtr()[k] = DstUpLo==Lower ? (std::max)(ip,jp) : (std::min)(ip,jp);
+
+ if((DstUpLo==Lower && ip<jp) || (DstUpLo==Upper && ip>jp))
+ dest._valuePtr()[k] = conj(it.value());
+ else
+ dest._valuePtr()[k] = it.value();
+ }
+ }
+}
+
+}
+
+template<typename MatrixType,int UpLo>
+class SparseSymmetricPermutationProduct
+ : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
+{
+ typedef PermutationMatrix<Dynamic> Perm;
+ public:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+ SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
+ : m_matrix(mat), m_perm(perm)
+ {}
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ template<typename DestScalar> void evalTo(SparseMatrix<DestScalar>& _dest) const
+ {
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+ }
+
+ template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
+ {
+ internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
+ }
+
+ protected:
+ const MatrixTypeNested m_matrix;
+ const Perm& m_perm;
+
+};
+
+#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h
new file mode 100644
index 00000000000..19abcd1f8e4
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h
@@ -0,0 +1,401 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSESPARSEPRODUCT_H
+#define EIGEN_SPARSESPARSEPRODUCT_H
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Index Index;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ std::vector<bool> mask(rows,false);
+ Matrix<Scalar,Dynamic,1> values(rows);
+ Matrix<Index,Dynamic,1> indices(rows);
+
+ // estimate the number of non zero entries
+ float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
+ float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
+ float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
+
+// int t200 = rows/(log2(200)*1.39);
+// int t = (rows*100)/139;
+
+ res.resize(rows, cols);
+ res.reserve(Index(ratioRes*rows*cols));
+ // we compute each column of the result, one after the other
+ for (Index j=0; j<cols; ++j)
+ {
+
+ res.startVec(j);
+ Index nnz = 0;
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ Scalar y = rhsIt.value();
+ Index k = rhsIt.index();
+ for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt)
+ {
+ Index i = lhsIt.index();
+ Scalar x = lhsIt.value();
+ if(!mask[i])
+ {
+ mask[i] = true;
+// values[i] = x * y;
+// indices[nnz] = i;
+ ++nnz;
+ }
+ else
+ values[i] += x * y;
+ }
+ }
+ // FIXME reserve nnz non zeros
+ // FIXME implement fast sort algorithms for very small nnz
+ // if the result is sparse enough => use a quick sort
+ // otherwise => loop through the entire vector
+ // In order to avoid to perform an expensive log2 when the
+ // result is clearly very sparse we use a linear bound up to 200.
+// if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
+// {
+// if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
+// for(int k=0; k<nnz; ++k)
+// {
+// int i = indices[k];
+// res.insertBackNoCheck(j,i) = values[i];
+// mask[i] = false;
+// }
+// }
+// else
+// {
+// // dense path
+// for(int i=0; i<rows; ++i)
+// {
+// if(mask[i])
+// {
+// mask[i] = false;
+// res.insertBackNoCheck(j,i) = values[i];
+// }
+// }
+// }
+
+ }
+ res.finalize();
+}
+
+// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+// return sparse_product_impl2(lhs,rhs,res);
+
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Index Index;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
+ //int size = lhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ // allocate a temporary buffer
+ AmbiVector<Scalar,Index> tempVector(rows);
+
+ // estimate the number of non zero entries
+ float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
+ float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
+ float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
+
+ // mimics a resizeByInnerOuter:
+ if(ResultType::IsRowMajor)
+ res.resize(cols, rows);
+ else
+ res.resize(rows, cols);
+
+ res.reserve(Index(ratioRes*rows*cols));
+ for (Index j=0; j<cols; ++j)
+ {
+ // let's do a more accurate determination of the nnz ratio for the current column j of res
+ //float ratioColRes = (std::min)(ratioLhs * rhs.innerNonZeros(j), 1.f);
+ // FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
+ float ratioColRes = ratioRes;
+ tempVector.init(ratioColRes);
+ tempVector.setZero();
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
+ tempVector.restart();
+ Scalar x = rhsIt.value();
+ for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
+ {
+ tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
+ }
+ }
+ res.startVec(j);
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector); it; ++it)
+ res.insertBackByOuterInner(j,it.index()) = it.value();
+ }
+ res.finalize();
+}
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct sparse_product_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << __LINE__ << "\n";
+ typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+ sparse_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << __LINE__ << "\n";
+ // we need a col-major matrix to hold the result
+ typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+ SparseTemporaryType _res(res.rows(), res.cols());
+ sparse_product_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res);
+ res = _res;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << __LINE__ << "\n";
+ // let's transpose the product to get a column x column product
+ typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+ sparse_product_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << "here...\n";
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix colLhs(lhs);
+ ColMajorMatrix colRhs(rhs);
+// std::cerr << "more...\n";
+ sparse_product_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res);
+// std::cerr << "OK.\n";
+
+ // let's transpose the product to get a column x column product
+
+// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+// SparseTemporaryType _res(res.cols(), res.rows());
+// sparse_product_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
+// res = _res.transpose();
+ }
+};
+
+// NOTE the 2 others cases (col row *) must never occur since they are caught
+// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
+
+} // end namespace internal
+
+// sparse = sparse * sparse
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+{
+// std::cerr << "there..." << typeid(Lhs).name() << " " << typeid(Lhs).name() << " " << (Derived::Flags&&RowMajorBit) << "\n";
+ internal::sparse_product_selector<
+ typename internal::remove_all<Lhs>::type,
+ typename internal::remove_all<Rhs>::type,
+ Derived>::run(product.lhs(),product.rhs(),derived());
+ return derived();
+}
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct sparse_product_selector2;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ sparse_product_impl2<Lhs,Rhs,ResultType>(lhs, rhs, res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ // prevent warnings until the code is fixed
+ EIGEN_UNUSED_VARIABLE(lhs);
+ EIGEN_UNUSED_VARIABLE(rhs);
+ EIGEN_UNUSED_VARIABLE(res);
+
+// typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+// RowMajorMatrix rhsRow = rhs;
+// RowMajorMatrix resRow(res.rows(), res.cols());
+// sparse_product_impl2<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
+// res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix lhsRow = lhs;
+ RowMajorMatrix resRow(res.rows(), res.cols());
+ sparse_product_impl2<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
+ res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix resRow(res.rows(), res.cols());
+ sparse_product_impl2<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
+ res = resRow;
+ }
+};
+
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix lhsCol = lhs;
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix rhsCol = rhs;
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+// ColMajorMatrix lhsTr(lhs);
+// ColMajorMatrix rhsTr(rhs);
+// ColMajorMatrix aux(res.rows(), res.cols());
+// sparse_product_impl2<Rhs,Lhs,ColMajorMatrix>(rhs, lhs, aux);
+// // ColMajorMatrix aux2 = aux.transpose();
+// res = aux;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix lhsCol(lhs);
+ ColMajorMatrix rhsCol(rhs);
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<ColMajorMatrix,ColMajorMatrix,ColMajorMatrix>(lhsCol, rhsCol, resCol);
+ res = resCol;
+ }
+};
+
+} // end namespace internal
+
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline void SparseMatrixBase<Derived>::_experimentalNewProduct(const Lhs& lhs, const Rhs& rhs)
+{
+ //derived().resize(lhs.rows(), rhs.cols());
+ internal::sparse_product_selector2<
+ typename internal::remove_all<Lhs>::type,
+ typename internal::remove_all<Rhs>::type,
+ Derived>::run(lhs,rhs,derived());
+}
+
+// sparse * sparse
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return typename SparseSparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+#endif // EIGEN_SPARSESPARSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h b/extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h
new file mode 100644
index 00000000000..2aea2fa32c7
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h
@@ -0,0 +1,68 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSETRANSPOSE_H
+#define EIGEN_SPARSETRANSPOSE_H
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
+ : public SparseMatrixBase<Transpose<MatrixType> >
+{
+ typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
+};
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerIterator
+ : public _MatrixTypeNested::InnerIterator
+{
+ typedef typename _MatrixTypeNested::InnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, Index outer)
+ : Base(trans.derived().nestedExpression(), outer)
+ {}
+ inline Index row() const { return Base::col(); }
+ inline Index col() const { return Base::row(); }
+};
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
+ : public _MatrixTypeNested::ReverseInnerIterator
+{
+ typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, Index outer)
+ : Base(xpr.derived().nestedExpression(), outer)
+ {}
+ inline Index row() const { return Base::col(); }
+ inline Index col() const { return Base::row(); }
+};
+
+#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h b/extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h
new file mode 100644
index 00000000000..319eaf06638
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h
@@ -0,0 +1,100 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
+#define EIGEN_SPARSE_TRIANGULARVIEW_H
+
+namespace internal {
+
+template<typename MatrixType, int Mode>
+struct traits<SparseTriangularView<MatrixType,Mode> >
+: public traits<MatrixType>
+{};
+
+} // namespace internal
+
+template<typename MatrixType, int Mode> class SparseTriangularView
+ : public SparseMatrixBase<SparseTriangularView<MatrixType,Mode> >
+{
+ enum { SkipFirst = (Mode==Lower && !(MatrixType::Flags&RowMajorBit))
+ || (Mode==Upper && (MatrixType::Flags&RowMajorBit)) };
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView)
+
+ class InnerIterator;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ typedef typename internal::conditional<internal::must_nest_by_value<MatrixType>::ret,
+ MatrixType, const MatrixType&>::type MatrixTypeNested;
+
+ inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {}
+
+ /** \internal */
+ inline const MatrixType& nestedExpression() const { return m_matrix; }
+
+ template<typename OtherDerived>
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type
+ solve(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
+
+ protected:
+ MatrixTypeNested m_matrix;
+};
+
+template<typename MatrixType, int Mode>
+class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixType::InnerIterator
+{
+ typedef typename MatrixType::InnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
+ : Base(view.nestedExpression(), outer)
+ {
+ if(SkipFirst)
+ while((*this) && this->index()<outer)
+ ++(*this);
+ }
+ inline Index row() const { return Base::row(); }
+ inline Index col() const { return Base::col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const
+ {
+ return SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer());
+ }
+};
+
+template<typename Derived>
+template<int Mode>
+inline const SparseTriangularView<Derived, Mode>
+SparseMatrixBase<Derived>::triangularView() const
+{
+ return derived();
+}
+
+#endif // EIGEN_SPARSE_TRIANGULARVIEW_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseUtil.h b/extern/Eigen3/Eigen/src/Sparse/SparseUtil.h
new file mode 100644
index 00000000000..db9ae98e7a0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseUtil.h
@@ -0,0 +1,130 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEUTIL_H
+#define EIGEN_SPARSEUTIL_H
+
+#ifdef NDEBUG
+#define EIGEN_DBG_SPARSE(X)
+#else
+#define EIGEN_DBG_SPARSE(X) X
+#endif
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
+{ \
+ return Base::operator Op(other.derived()); \
+} \
+EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
+{ \
+ return Base::operator Op(other); \
+}
+
+#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename Other> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
+{ \
+ return Base::operator Op(scalar); \
+}
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
+
+#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
+ typedef BaseClass Base; \
+ typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+ typedef typename Eigen::internal::nested<Derived>::type Nested; \
+ typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::internal::traits<Derived>::Index Index; \
+ enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+ Flags = Eigen::internal::traits<Derived>::Flags, \
+ CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+ using Base::derived; \
+ using Base::const_cast_derived;
+
+#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived>)
+
+const int CoherentAccessPattern = 0x1;
+const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
+const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
+const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
+
+template<typename Derived> class SparseMatrixBase;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix;
+
+template<typename MatrixType, int Size> class SparseInnerVectorSet;
+template<typename MatrixType, int Mode> class SparseTriangularView;
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
+template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
+template<typename MatrixType> class SparseView;
+
+template<typename Lhs, typename Rhs> class SparseSparseProduct;
+template<typename Lhs, typename Rhs> class SparseTimeDenseProduct;
+template<typename Lhs, typename Rhs> class DenseTimeSparseProduct;
+template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
+
+template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
+template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct DenseSparseProductReturnType;
+template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct SparseDenseProductReturnType;
+
+namespace internal {
+
+template<typename T> struct eval<T,Sparse>
+{
+ typedef typename traits<T>::Scalar _Scalar;
+ enum {
+ _Flags = traits<T>::Flags
+ };
+
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+template<typename T> struct plain_matrix_type<T,Sparse>
+{
+ typedef typename traits<T>::Scalar _Scalar;
+ enum {
+ _Flags = traits<T>::Flags
+ };
+
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+} // end namespace internal
+
+#endif // EIGEN_SPARSEUTIL_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseVector.h b/extern/Eigen3/Eigen/src/Sparse/SparseVector.h
new file mode 100644
index 00000000000..ce4bb51a27e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseVector.h
@@ -0,0 +1,431 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEVECTOR_H
+#define EIGEN_SPARSEVECTOR_H
+
+/** \class SparseVector
+ *
+ * \brief a sparse vector class
+ *
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseVector<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ IsColVector = _Options & RowMajorBit ? 0 : 1,
+
+ RowsAtCompileTime = IsColVector ? Dynamic : 1,
+ ColsAtCompileTime = IsColVector ? 1 : Dynamic,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseVector
+ : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
+// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, =)
+
+ protected:
+ public:
+
+ typedef SparseMatrixBase<SparseVector> SparseBase;
+ enum { IsColVector = internal::traits<SparseVector>::IsColVector };
+
+ enum {
+ Options = _Options
+ };
+
+ CompressedStorage<Scalar,Index> m_data;
+ Index m_size;
+
+ CompressedStorage<Scalar,Index>& _data() { return m_data; }
+ CompressedStorage<Scalar,Index>& _data() const { return m_data; }
+
+ public:
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
+ EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
+ EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
+ EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
+ EIGEN_STRONG_INLINE Index innerNonZeros(Index j) const { eigen_assert(j==0); return m_size; }
+
+ EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); }
+ EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); }
+
+ EIGEN_STRONG_INLINE const Index* _innerIndexPtr() const { return &m_data.index(0); }
+ EIGEN_STRONG_INLINE Index* _innerIndexPtr() { return &m_data.index(0); }
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ eigen_assert((IsColVector ? col : row)==0);
+ return coeff(IsColVector ? row : col);
+ }
+ inline Scalar coeff(Index i) const { return m_data.at(i); }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ eigen_assert((IsColVector ? col : row)==0);
+ return coeff(IsColVector ? row : col);
+ }
+
+ /** \returns a reference to the coefficient value at given index \a i
+ * This operation involes a log(rho*size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ *
+ * This insertion might be very costly if the number of nonzeros above \a i is large.
+ */
+ inline Scalar& coeffRef(Index i)
+ {
+ return m_data.atWithInsertion(i);
+ }
+
+ public:
+
+ class InnerIterator;
+
+ inline void setZero() { m_data.clear(); }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
+
+ inline void startVec(Index outer)
+ {
+ eigen_assert(outer==0);
+ }
+
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(outer==0);
+ return insertBack(inner);
+ }
+ inline Scalar& insertBack(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
+
+ inline Scalar& insert(Index row, Index col)
+ {
+ Index inner = IsColVector ? row : col;
+ Index outer = IsColVector ? col : row;
+ eigen_assert(outer==0);
+ return insert(inner);
+ }
+ Scalar& insert(Index i)
+ {
+ Index startId = 0;
+ Index p = m_data.size() - 1;
+ // TODO smart realloc
+ m_data.resize(p+2,1);
+
+ while ( (p >= startId) && (m_data.index(p) > i) )
+ {
+ m_data.index(p+1) = m_data.index(p);
+ m_data.value(p+1) = m_data.value(p);
+ --p;
+ }
+ m_data.index(p+1) = i;
+ m_data.value(p+1) = 0;
+ return m_data.value(p+1);
+ }
+
+ /**
+ */
+ inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
+
+
+ inline void finalize() {}
+
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ m_data.prune(reference,epsilon);
+ }
+
+ void resize(Index rows, Index cols)
+ {
+ eigen_assert(rows==1 || cols==1);
+ resize(IsColVector ? rows : cols);
+ }
+
+ void resize(Index newSize)
+ {
+ m_size = newSize;
+ m_data.clear();
+ }
+
+ void resizeNonZeros(Index size) { m_data.resize(size); }
+
+ inline SparseVector() : m_size(0) { resize(0); }
+
+ inline SparseVector(Index size) : m_size(0) { resize(size); }
+
+ inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
+
+ template<typename OtherDerived>
+ inline SparseVector(const MatrixBase<OtherDerived>& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ template<typename OtherDerived>
+ inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ inline SparseVector(const SparseVector& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(SparseVector& other)
+ {
+ std::swap(m_size, other.m_size);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseVector& operator=(const SparseVector& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.size());
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
+ return Base::operator=(other.transpose());
+ else
+ return Base::operator=(other);
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Lhs, typename Rhs>
+ inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+ {
+ return Base::operator=(product);
+ }
+ #endif
+
+// const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+// if (needToTranspose)
+// {
+// // two passes algorithm:
+// // 1 - compute the number of coeffs per dest inner vector
+// // 2 - do the actual copy/eval
+// // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed
+// typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+// OtherCopy otherCopy(other.derived());
+// typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+//
+// resize(other.rows(), other.cols());
+// Eigen::Map<VectorXi>(m_outerIndex,outerSize()).setZero();
+// // pass 1
+// // FIXME the above copy could be merged with that pass
+// for (int j=0; j<otherCopy.outerSize(); ++j)
+// for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+// ++m_outerIndex[it.index()];
+//
+// // prefix sum
+// int count = 0;
+// VectorXi positions(outerSize());
+// for (int j=0; j<outerSize(); ++j)
+// {
+// int tmp = m_outerIndex[j];
+// m_outerIndex[j] = count;
+// positions[j] = count;
+// count += tmp;
+// }
+// m_outerIndex[outerSize()] = count;
+// // alloc
+// m_data.resize(count);
+// // pass 2
+// for (int j=0; j<otherCopy.outerSize(); ++j)
+// for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+// {
+// int pos = positions[it.index()]++;
+// m_data.index(pos) = j;
+// m_data.value(pos) = it.value();
+// }
+//
+// return *this;
+// }
+// else
+// {
+// // there is no special optimization
+// return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
+// }
+// }
+
+ friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
+ {
+ for (Index i=0; i<m.nonZeros(); ++i)
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ s << std::endl;
+ return s;
+ }
+
+ // this specialized version does not seems to be faster
+// Scalar dot(const SparseVector& other) const
+// {
+// int i=0, j=0;
+// Scalar res = 0;
+// asm("#begindot");
+// while (i<nonZeros() && j<other.nonZeros())
+// {
+// if (m_data.index(i)==other.m_data.index(j))
+// {
+// res += m_data.value(i) * internal::conj(other.m_data.value(j));
+// ++i; ++j;
+// }
+// else if (m_data.index(i)<other.m_data.index(j))
+// ++i;
+// else
+// ++j;
+// }
+// asm("#enddot");
+// return res;
+// }
+
+ /** Destructor */
+ inline ~SparseVector() {}
+
+ /** Overloaded for performance */
+ Scalar sum() const;
+
+ public:
+
+ /** \deprecated use setZero() and reserve() */
+ EIGEN_DEPRECATED void startFill(Index reserve)
+ {
+ setZero();
+ m_data.reserve(reserve);
+ }
+
+ /** \deprecated use insertBack(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
+ {
+ eigen_assert(r==0 || c==0);
+ return fill(IsColVector ? r : c);
+ }
+
+ /** \deprecated use insertBack(Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
+
+ /** \deprecated use insert(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
+ {
+ eigen_assert(r==0 || c==0);
+ return fillrand(IsColVector ? r : c);
+ }
+
+ /** \deprecated use insert(Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index i)
+ {
+ return insert(i);
+ }
+
+ /** \deprecated use finalize() */
+ EIGEN_DEPRECATED void endFill() {}
+
+# ifdef EIGEN_SPARSEVECTOR_PLUGIN
+# include EIGEN_SPARSEVECTOR_PLUGIN
+# endif
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseVector& vec, Index outer=0)
+ : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+ {
+ eigen_assert(outer==0);
+ }
+
+ InnerIterator(const CompressedStorage<Scalar,Index>& data)
+ : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+ {}
+
+ template<unsigned int Added, unsigned int Removed>
+ InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, Index )
+ : m_data(vec._expression().m_data), m_id(0), m_end(m_data.size())
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_data.value(m_id); }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
+
+ inline Index index() const { return m_data.index(m_id); }
+ inline Index row() const { return IsColVector ? index() : 0; }
+ inline Index col() const { return IsColVector ? 0 : index(); }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const CompressedStorage<Scalar,Index>& m_data;
+ Index m_id;
+ const Index m_end;
+};
+
+#endif // EIGEN_SPARSEVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseView.h b/extern/Eigen3/Eigen/src/Sparse/SparseView.h
new file mode 100644
index 00000000000..24306561098
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseView.h
@@ -0,0 +1,109 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEVIEW_H
+#define EIGEN_SPARSEVIEW_H
+
+namespace internal {
+
+template<typename MatrixType>
+struct traits<SparseView<MatrixType> > : traits<MatrixType>
+{
+ typedef int Index;
+ typedef Sparse StorageKind;
+ enum {
+ Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
+ };
+};
+
+} // end namespace internal
+
+template<typename MatrixType>
+class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
+{
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
+
+ SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0),
+ typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) :
+ m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
+
+ class InnerIterator;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ inline Index innerSize() const { return m_matrix.innerSize(); }
+ inline Index outerSize() const { return m_matrix.outerSize(); }
+
+protected:
+ const MatrixTypeNested m_matrix;
+ Scalar m_reference;
+ typename NumTraits<Scalar>::Real m_epsilon;
+};
+
+template<typename MatrixType>
+class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
+{
+public:
+ typedef typename _MatrixTypeNested::InnerIterator IterBase;
+ InnerIterator(const SparseView& view, Index outer) :
+ IterBase(view.m_matrix, outer), m_view(view)
+ {
+ incrementToNonZero();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ IterBase::operator++();
+ incrementToNonZero();
+ return *this;
+ }
+
+ using IterBase::value;
+
+protected:
+ const SparseView& m_view;
+
+private:
+ void incrementToNonZero()
+ {
+ while(internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon) && (bool(*this)))
+ {
+ IterBase::operator++();
+ }
+ }
+};
+
+template<typename Derived>
+const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
+ typename NumTraits<Scalar>::Real m_epsilon) const
+{
+ return SparseView<Derived>(derived(), m_reference, m_epsilon);
+}
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h b/extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h
new file mode 100644
index 00000000000..62bb8bb44c9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h
@@ -0,0 +1,339 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
+#define EIGEN_SPARSETRIANGULARSOLVER_H
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+ int UpLo = (Mode & Lower)
+ ? Lower
+ : (Mode & Upper)
+ ? Upper
+ : -1,
+ int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
+struct sparse_solve_triangular_selector;
+
+// forward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.rows(); ++i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ Scalar lastVal = 0;
+ int lastIndex = 0;
+ for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
+ {
+ lastVal = it.value();
+ lastIndex = it.index();
+ if(lastIndex==i)
+ break;
+ tmp -= lastVal * other.coeff(lastIndex,col);
+ }
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ eigen_assert(lastIndex==i);
+ other.coeffRef(i,col) = tmp/lastVal;
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.rows()-1 ; i>=0 ; --i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ typename Lhs::InnerIterator it(lhs, i);
+ if (it && it.index() == i)
+ ++it;
+ for(; it; ++it)
+ {
+ tmp -= it.value() * other.coeff(it.index(),col);
+ }
+
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ typename Lhs::InnerIterator it(lhs, i);
+ eigen_assert(it && it.index() == i);
+ other.coeffRef(i,col) = tmp/it.value();
+ }
+ }
+ }
+ }
+};
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.cols(); ++i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ typename Lhs::InnerIterator it(lhs, i);
+ if(!(Mode & UnitDiag))
+ {
+ eigen_assert(it.index()==i);
+ tmp /= it.value();
+ }
+ if (it && it.index()==i)
+ ++it;
+ for(; it; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.cols()-1; i>=0; --i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ if(!(Mode & UnitDiag))
+ {
+ // FIXME lhs.coeff(i,i) might not be always efficient while it must simply be the
+ // last element of the column !
+ other.coeffRef(i,col) /= lhs.innerVector(i).lastCoeff();
+ }
+ typename Lhs::InnerIterator it(lhs, i);
+ for(; it && it.index()<i; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDerived>& other) const
+{
+ eigen_assert(m_matrix.cols() == m_matrix.rows());
+ eigen_assert(m_matrix.cols() == other.rows());
+ eigen_assert(!(Mode & ZeroDiag));
+ eigen_assert((Mode & (Upper|Lower)) != 0);
+
+ enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+ typedef typename internal::conditional<copy,
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(m_matrix, otherCopy);
+
+ if (copy)
+ other = otherCopy;
+}
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseTriangularView<ExpressionType,Mode>::solve(const MatrixBase<OtherDerived>& other) const
+{
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+ solveInPlace(res);
+ return res;
+}
+
+// pure sparse path
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+ int UpLo = (Mode & Lower)
+ ? Lower
+ : (Mode & Upper)
+ ? Upper
+ : -1,
+ int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>
+struct sparse_solve_triangular_sparse_selector;
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode, int UpLo>
+struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ const bool IsLower = (UpLo==Lower);
+ AmbiVector<Scalar,Index> tempVector(other.rows()*2);
+ tempVector.setBounds(0,other.rows());
+
+ Rhs res(other.rows(), other.cols());
+ res.reserve(other.nonZeros());
+
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ // FIXME estimate number of non zeros
+ tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
+ tempVector.setZero();
+ tempVector.restart();
+ for (typename Rhs::InnerIterator rhsIt(other, col); rhsIt; ++rhsIt)
+ {
+ tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
+ }
+
+ for(int i=IsLower?0:lhs.cols()-1;
+ IsLower?i<lhs.cols():i>=0;
+ i+=IsLower?1:-1)
+ {
+ tempVector.restart();
+ Scalar& ci = tempVector.coeffRef(i);
+ if (ci!=Scalar(0))
+ {
+ // find
+ typename Lhs::InnerIterator it(lhs, i);
+ if(!(Mode & UnitDiag))
+ {
+ if (IsLower)
+ {
+ eigen_assert(it.index()==i);
+ ci /= it.value();
+ }
+ else
+ ci /= lhs.coeff(i,i);
+ }
+ tempVector.restart();
+ if (IsLower)
+ {
+ if (it.index()==i)
+ ++it;
+ for(; it; ++it)
+ tempVector.coeffRef(it.index()) -= ci * it.value();
+ }
+ else
+ {
+ for(; it && it.index()<i; ++it)
+ tempVector.coeffRef(it.index()) -= ci * it.value();
+ }
+ }
+ }
+
+
+ int count = 0;
+ // FIXME compute a reference value to filter zeros
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector/*,1e-12*/); it; ++it)
+ {
+ ++ count;
+// std::cerr << "fill " << it.index() << ", " << col << "\n";
+// std::cout << it.value() << " ";
+ // FIXME use insertBack
+ res.insert(it.index(), col) = it.value();
+ }
+// std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n";
+ }
+ res.finalize();
+ other = res.markAsRValue();
+ }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
+{
+ eigen_assert(m_matrix.cols() == m_matrix.rows());
+ eigen_assert(m_matrix.cols() == other.rows());
+ eigen_assert(!(Mode & ZeroDiag));
+ eigen_assert((Mode & (Upper|Lower)) != 0);
+
+// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+// typedef typename internal::conditional<copy,
+// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+// OtherCopy otherCopy(other.derived());
+
+ internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(m_matrix, other.derived());
+
+// if (copy)
+// other = otherCopy;
+}
+
+#ifdef EIGEN2_SUPPORT
+
+// deprecated stuff:
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+void SparseMatrixBase<Derived>::solveTriangularInPlace(MatrixBase<OtherDerived>& other) const
+{
+ this->template triangular<Flags&(Upper|Lower)>().solveInPlace(other);
+}
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseMatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
+{
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+ derived().solveTriangularInPlace(res);
+ return res;
+}
+#endif // EIGEN2_SUPPORT
+
+#endif // EIGEN_SPARSETRIANGULARSOLVER_H
diff --git a/extern/Eigen3/Eigen/src/SparseCholesky/CMakeLists.txt b/extern/Eigen3/Eigen/src/SparseCholesky/CMakeLists.txt
new file mode 100644
index 00000000000..375a59d7a57
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCholesky/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SparseCholesky_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SparseCholesky_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseCholesky COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h b/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
new file mode 100644
index 00000000000..5a1255a27a3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -0,0 +1,876 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+/*
+
+NOTE: the _symbolic, and _numeric functions has been adapted from
+ the LDL library:
+
+LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
+
+LDL License:
+
+ Your use or distribution of LDL or any modified version of
+ LDL implies that you agree to this License.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
+ USA
+
+ Permission is hereby granted to use or copy this program under the
+ terms of the GNU LGPL, provided that the Copyright, this License,
+ and the Availability of the original version is retained on all copies.
+ User documentation of any code that uses this code or any modified
+ version of this code must cite the Copyright, this License, the
+ Availability note, and "Used by permission." Permission to modify
+ the code and to distribute modified code is granted, provided the
+ Copyright, this License, and the Availability note are retained,
+ and a notice that the code was modified is included.
+ */
+
+#ifndef EIGEN_SIMPLICIAL_CHOLESKY_H
+#define EIGEN_SIMPLICIAL_CHOLESKY_H
+
+namespace Eigen {
+
+enum SimplicialCholeskyMode {
+ SimplicialCholeskyLLT,
+ SimplicialCholeskyLDLT
+};
+
+/** \ingroup SparseCholesky_Module
+ * \brief A direct sparse Cholesky factorizations
+ *
+ * These classes provide LL^T and LDL^T Cholesky factorizations of sparse matrices that are
+ * selfadjoint and positive definite. The factorization allows for solving A.X = B where
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ */
+template<typename Derived>
+class SimplicialCholeskyBase : internal::noncopyable
+{
+ public:
+ typedef typename internal::traits<Derived>::MatrixType MatrixType;
+ enum { UpLo = internal::traits<Derived>::UpLo };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+
+ public:
+
+ /** Default constructor */
+ SimplicialCholeskyBase()
+ : m_info(Success), m_isInitialized(false), m_shiftOffset(0), m_shiftScale(1)
+ {}
+
+ SimplicialCholeskyBase(const MatrixType& matrix)
+ : m_info(Success), m_isInitialized(false), m_shiftOffset(0), m_shiftScale(1)
+ {
+ derived().compute(matrix);
+ }
+
+ ~SimplicialCholeskyBase()
+ {
+ }
+
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+ inline Index cols() const { return m_matrix.cols(); }
+ inline Index rows() const { return m_matrix.rows(); }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<SimplicialCholeskyBase, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "Simplicial LLT or LDLT is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "SimplicialCholeskyBase::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<SimplicialCholeskyBase, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<SimplicialCholeskyBase, Rhs>
+ solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "Simplicial LLT or LDLT is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "SimplicialCholesky::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::sparse_solve_retval<SimplicialCholeskyBase, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the permutation P
+ * \sa permutationPinv() */
+ const PermutationMatrix<Dynamic,Dynamic,Index>& permutationP() const
+ { return m_P; }
+
+ /** \returns the inverse P^-1 of the permutation P
+ * \sa permutationP() */
+ const PermutationMatrix<Dynamic,Dynamic,Index>& permutationPinv() const
+ { return m_Pinv; }
+
+ /** Sets the shift parameters that will be used to adjust the diagonal coefficients during the numerical factorization.
+ *
+ * During the numerical factorization, the diagonal coefficients are transformed by the following linear model:\n
+ * \c d_ii = \a offset + \a scale * \c d_ii
+ *
+ * The default is the identity transformation with \a offset=0, and \a scale=1.
+ *
+ * \returns a reference to \c *this.
+ */
+ Derived& setShift(const RealScalar& offset, const RealScalar& scale = 1)
+ {
+ m_shiftOffset = offset;
+ m_shiftScale = scale;
+ return derived();
+ }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal */
+ template<typename Stream>
+ void dumpMemory(Stream& s)
+ {
+ int total = 0;
+ s << " L: " << ((total+=(m_matrix.cols()+1) * sizeof(int) + m_matrix.nonZeros()*(sizeof(int)+sizeof(Scalar))) >> 20) << "Mb" << "\n";
+ s << " diag: " << ((total+=m_diag.size() * sizeof(Scalar)) >> 20) << "Mb" << "\n";
+ s << " tree: " << ((total+=m_parent.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+ s << " nonzeros: " << ((total+=m_nonZerosPerCol.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+ s << " perm: " << ((total+=m_P.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+ s << " perm^-1: " << ((total+=m_Pinv.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+ s << " TOTAL: " << (total>> 20) << "Mb" << "\n";
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+ {
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ eigen_assert(m_matrix.rows()==b.rows());
+
+ if(m_info!=Success)
+ return;
+
+ if(m_P.size()>0)
+ dest = m_Pinv * b;
+ else
+ dest = b;
+
+ if(m_matrix.nonZeros()>0) // otherwise L==I
+ derived().matrixL().solveInPlace(dest);
+
+ if(m_diag.size()>0)
+ dest = m_diag.asDiagonal().inverse() * dest;
+
+ if (m_matrix.nonZeros()>0) // otherwise I==I
+ derived().matrixU().solveInPlace(dest);
+
+ if(m_P.size()>0)
+ dest = m_P * dest;
+ }
+
+ /** \internal */
+ template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
+ void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+ {
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ eigen_assert(m_matrix.rows()==b.rows());
+
+ // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
+ static const int NbColsAtOnce = 4;
+ int rhsCols = b.cols();
+ int size = b.rows();
+ Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,rhsCols);
+ for(int k=0; k<rhsCols; k+=NbColsAtOnce)
+ {
+ int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
+ tmp.leftCols(actualCols) = b.middleCols(k,actualCols);
+ tmp.leftCols(actualCols) = derived().solve(tmp.leftCols(actualCols));
+ dest.middleCols(k,actualCols) = tmp.leftCols(actualCols).sparseView();
+ }
+ }
+
+#endif // EIGEN_PARSED_BY_DOXYGEN
+
+ protected:
+
+ /** Computes the sparse Cholesky decomposition of \a matrix */
+ template<bool DoLDLT>
+ void compute(const MatrixType& matrix)
+ {
+ eigen_assert(matrix.rows()==matrix.cols());
+ Index size = matrix.cols();
+ CholMatrixType ap(size,size);
+ ordering(matrix, ap);
+ analyzePattern_preordered(ap, DoLDLT);
+ factorize_preordered<DoLDLT>(ap);
+ }
+
+ template<bool DoLDLT>
+ void factorize(const MatrixType& a)
+ {
+ eigen_assert(a.rows()==a.cols());
+ int size = a.cols();
+ CholMatrixType ap(size,size);
+ ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_Pinv);
+ factorize_preordered<DoLDLT>(ap);
+ }
+
+ template<bool DoLDLT>
+ void factorize_preordered(const CholMatrixType& a);
+
+ void analyzePattern(const MatrixType& a, bool doLDLT)
+ {
+ eigen_assert(a.rows()==a.cols());
+ int size = a.cols();
+ CholMatrixType ap(size,size);
+ ordering(a, ap);
+ analyzePattern_preordered(ap,doLDLT);
+ }
+ void analyzePattern_preordered(const CholMatrixType& a, bool doLDLT);
+
+ void ordering(const MatrixType& a, CholMatrixType& ap);
+
+ /** keeps off-diagonal entries; drops diagonal entries */
+ struct keep_diag {
+ inline bool operator() (const Index& row, const Index& col, const Scalar&) const
+ {
+ return row!=col;
+ }
+ };
+
+ mutable ComputationInfo m_info;
+ bool m_isInitialized;
+ bool m_factorizationIsOk;
+ bool m_analysisIsOk;
+
+ CholMatrixType m_matrix;
+ VectorType m_diag; // the diagonal coefficients (LDLT mode)
+ VectorXi m_parent; // elimination tree
+ VectorXi m_nonZerosPerCol;
+ PermutationMatrix<Dynamic,Dynamic,Index> m_P; // the permutation
+ PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv; // the inverse permutation
+
+ RealScalar m_shiftOffset;
+ RealScalar m_shiftScale;
+};
+
+template<typename _MatrixType, int _UpLo = Lower> class SimplicialLLT;
+template<typename _MatrixType, int _UpLo = Lower> class SimplicialLDLT;
+template<typename _MatrixType, int _UpLo = Lower> class SimplicialCholesky;
+
+namespace internal {
+
+template<typename _MatrixType, int _UpLo> struct traits<SimplicialLLT<_MatrixType,_UpLo> >
+{
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar, ColMajor, Index> CholMatrixType;
+ typedef SparseTriangularView<CholMatrixType, Eigen::Lower> MatrixL;
+ typedef SparseTriangularView<typename CholMatrixType::AdjointReturnType, Eigen::Upper> MatrixU;
+ static inline MatrixL getL(const MatrixType& m) { return m; }
+ static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+};
+
+template<typename _MatrixType,int _UpLo> struct traits<SimplicialLDLT<_MatrixType,_UpLo> >
+{
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar, ColMajor, Index> CholMatrixType;
+ typedef SparseTriangularView<CholMatrixType, Eigen::UnitLower> MatrixL;
+ typedef SparseTriangularView<typename CholMatrixType::AdjointReturnType, Eigen::UnitUpper> MatrixU;
+ static inline MatrixL getL(const MatrixType& m) { return m; }
+ static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+};
+
+template<typename _MatrixType, int _UpLo> struct traits<SimplicialCholesky<_MatrixType,_UpLo> >
+{
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+};
+
+}
+
+/** \ingroup SparseCholesky_Module
+ * \class SimplicialLLT
+ * \brief A direct sparse LLT Cholesky factorizations
+ *
+ * This class provides a LL^T Cholesky factorizations of sparse matrices that are
+ * selfadjoint and positive definite. The factorization allows for solving A.X = B where
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ * \sa class SimplicialLDLT
+ */
+template<typename _MatrixType, int _UpLo>
+ class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo> >
+{
+public:
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef SimplicialCholeskyBase<SimplicialLLT> Base;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+ typedef internal::traits<SimplicialLLT> Traits;
+ typedef typename Traits::MatrixL MatrixL;
+ typedef typename Traits::MatrixU MatrixU;
+public:
+ /** Default constructor */
+ SimplicialLLT() : Base() {}
+ /** Constructs and performs the LLT factorization of \a matrix */
+ SimplicialLLT(const MatrixType& matrix)
+ : Base(matrix) {}
+
+ /** \returns an expression of the factor L */
+ inline const MatrixL matrixL() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized");
+ return Traits::getL(Base::m_matrix);
+ }
+
+ /** \returns an expression of the factor U (= L^*) */
+ inline const MatrixU matrixU() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized");
+ return Traits::getU(Base::m_matrix);
+ }
+
+ /** Computes the sparse Cholesky decomposition of \a matrix */
+ SimplicialLLT& compute(const MatrixType& matrix)
+ {
+ Base::template compute<false>(matrix);
+ return *this;
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& a)
+ {
+ Base::analyzePattern(a, false);
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& a)
+ {
+ Base::template factorize<false>(a);
+ }
+
+ /** \returns the determinant of the underlying matrix from the current factorization */
+ Scalar determinant() const
+ {
+ Scalar detL = Base::m_matrix.diagonal().prod();
+ return internal::abs2(detL);
+ }
+};
+
+/** \ingroup SparseCholesky_Module
+ * \class SimplicialLDLT
+ * \brief A direct sparse LDLT Cholesky factorizations without square root.
+ *
+ * This class provides a LDL^T Cholesky factorizations without square root of sparse matrices that are
+ * selfadjoint and positive definite. The factorization allows for solving A.X = B where
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ *
+ * \sa class SimplicialLLT
+ */
+template<typename _MatrixType, int _UpLo>
+ class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo> >
+{
+public:
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef SimplicialCholeskyBase<SimplicialLDLT> Base;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+ typedef internal::traits<SimplicialLDLT> Traits;
+ typedef typename Traits::MatrixL MatrixL;
+ typedef typename Traits::MatrixU MatrixU;
+public:
+ /** Default constructor */
+ SimplicialLDLT() : Base() {}
+
+ /** Constructs and performs the LLT factorization of \a matrix */
+ SimplicialLDLT(const MatrixType& matrix)
+ : Base(matrix) {}
+
+ /** \returns a vector expression of the diagonal D */
+ inline const VectorType vectorD() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized");
+ return Base::m_diag;
+ }
+ /** \returns an expression of the factor L */
+ inline const MatrixL matrixL() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized");
+ return Traits::getL(Base::m_matrix);
+ }
+
+ /** \returns an expression of the factor U (= L^*) */
+ inline const MatrixU matrixU() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized");
+ return Traits::getU(Base::m_matrix);
+ }
+
+ /** Computes the sparse Cholesky decomposition of \a matrix */
+ SimplicialLDLT& compute(const MatrixType& matrix)
+ {
+ Base::template compute<true>(matrix);
+ return *this;
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& a)
+ {
+ Base::analyzePattern(a, true);
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& a)
+ {
+ Base::template factorize<true>(a);
+ }
+
+ /** \returns the determinant of the underlying matrix from the current factorization */
+ Scalar determinant() const
+ {
+ return Base::m_diag.prod();
+ }
+};
+
+/** \deprecated use SimplicialLDLT or class SimplicialLLT
+ * \ingroup SparseCholesky_Module
+ * \class SimplicialCholesky
+ *
+ * \sa class SimplicialLDLT, class SimplicialLLT
+ */
+template<typename _MatrixType, int _UpLo>
+ class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo> >
+{
+public:
+ typedef _MatrixType MatrixType;
+ enum { UpLo = _UpLo };
+ typedef SimplicialCholeskyBase<SimplicialCholesky> Base;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+ typedef internal::traits<SimplicialCholesky> Traits;
+ typedef internal::traits<SimplicialLDLT<MatrixType,UpLo> > LDLTTraits;
+ typedef internal::traits<SimplicialLLT<MatrixType,UpLo> > LLTTraits;
+ public:
+ SimplicialCholesky() : Base(), m_LDLT(true) {}
+
+ SimplicialCholesky(const MatrixType& matrix)
+ : Base(), m_LDLT(true)
+ {
+ compute(matrix);
+ }
+
+ SimplicialCholesky& setMode(SimplicialCholeskyMode mode)
+ {
+ switch(mode)
+ {
+ case SimplicialCholeskyLLT:
+ m_LDLT = false;
+ break;
+ case SimplicialCholeskyLDLT:
+ m_LDLT = true;
+ break;
+ default:
+ break;
+ }
+
+ return *this;
+ }
+
+ inline const VectorType vectorD() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized");
+ return Base::m_diag;
+ }
+ inline const CholMatrixType rawMatrix() const {
+ eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized");
+ return Base::m_matrix;
+ }
+
+ /** Computes the sparse Cholesky decomposition of \a matrix */
+ SimplicialCholesky& compute(const MatrixType& matrix)
+ {
+ if(m_LDLT)
+ Base::template compute<true>(matrix);
+ else
+ Base::template compute<false>(matrix);
+ return *this;
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& a)
+ {
+ Base::analyzePattern(a, m_LDLT);
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& a)
+ {
+ if(m_LDLT)
+ Base::template factorize<true>(a);
+ else
+ Base::template factorize<false>(a);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+ {
+ eigen_assert(Base::m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+ eigen_assert(Base::m_matrix.rows()==b.rows());
+
+ if(Base::m_info!=Success)
+ return;
+
+ if(Base::m_P.size()>0)
+ dest = Base::m_Pinv * b;
+ else
+ dest = b;
+
+ if(Base::m_matrix.nonZeros()>0) // otherwise L==I
+ {
+ if(m_LDLT)
+ LDLTTraits::getL(Base::m_matrix).solveInPlace(dest);
+ else
+ LLTTraits::getL(Base::m_matrix).solveInPlace(dest);
+ }
+
+ if(Base::m_diag.size()>0)
+ dest = Base::m_diag.asDiagonal().inverse() * dest;
+
+ if (Base::m_matrix.nonZeros()>0) // otherwise I==I
+ {
+ if(m_LDLT)
+ LDLTTraits::getU(Base::m_matrix).solveInPlace(dest);
+ else
+ LLTTraits::getU(Base::m_matrix).solveInPlace(dest);
+ }
+
+ if(Base::m_P.size()>0)
+ dest = Base::m_P * dest;
+ }
+
+ Scalar determinant() const
+ {
+ if(m_LDLT)
+ {
+ return Base::m_diag.prod();
+ }
+ else
+ {
+ Scalar detL = Diagonal<const CholMatrixType>(Base::m_matrix).prod();
+ return internal::abs2(detL);
+ }
+ }
+
+ protected:
+ bool m_LDLT;
+};
+
+template<typename Derived>
+void SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, CholMatrixType& ap)
+{
+ eigen_assert(a.rows()==a.cols());
+ const Index size = a.rows();
+ // TODO allows to configure the permutation
+ {
+ CholMatrixType C;
+ C = a.template selfadjointView<UpLo>();
+ // remove diagonal entries:
+ // seems not to be needed
+ // C.prune(keep_diag());
+ internal::minimum_degree_ordering(C, m_P);
+ }
+
+ if(m_P.size()>0)
+ m_Pinv = m_P.inverse();
+ else
+ m_Pinv.resize(0);
+
+ ap.resize(size,size);
+ ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_Pinv);
+}
+
+template<typename Derived>
+void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)
+{
+ const Index size = ap.rows();
+ m_matrix.resize(size, size);
+ m_parent.resize(size);
+ m_nonZerosPerCol.resize(size);
+
+ ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
+
+ for(Index k = 0; k < size; ++k)
+ {
+ /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
+ m_parent[k] = -1; /* parent of k is not yet known */
+ tags[k] = k; /* mark node k as visited */
+ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
+ for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
+ {
+ Index i = it.index();
+ if(i < k)
+ {
+ /* follow path from i to root of etree, stop at flagged node */
+ for(; tags[i] != k; i = m_parent[i])
+ {
+ /* find parent of i if not yet determined */
+ if (m_parent[i] == -1)
+ m_parent[i] = k;
+ m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */
+ tags[i] = k; /* mark i as visited */
+ }
+ }
+ }
+ }
+
+ /* construct Lp index array from m_nonZerosPerCol column counts */
+ Index* Lp = m_matrix.outerIndexPtr();
+ Lp[0] = 0;
+ for(Index k = 0; k < size; ++k)
+ Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
+
+ m_matrix.resizeNonZeros(Lp[size]);
+
+ m_isInitialized = true;
+ m_info = Success;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+}
+
+
+template<typename Derived>
+template<bool DoLDLT>
+void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap)
+{
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ eigen_assert(ap.rows()==ap.cols());
+ const Index size = ap.rows();
+ eigen_assert(m_parent.size()==size);
+ eigen_assert(m_nonZerosPerCol.size()==size);
+
+ const Index* Lp = m_matrix.outerIndexPtr();
+ Index* Li = m_matrix.innerIndexPtr();
+ Scalar* Lx = m_matrix.valuePtr();
+
+ ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);
+ ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0);
+ ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
+
+ bool ok = true;
+ m_diag.resize(DoLDLT ? size : 0);
+
+ for(Index k = 0; k < size; ++k)
+ {
+ // compute nonzero pattern of kth row of L, in topological order
+ y[k] = 0.0; // Y(0:k) is now all zero
+ Index top = size; // stack for pattern is empty
+ tags[k] = k; // mark node k as visited
+ m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
+ for(typename MatrixType::InnerIterator it(ap,k); it; ++it)
+ {
+ Index i = it.index();
+ if(i <= k)
+ {
+ y[i] += internal::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */
+ Index len;
+ for(len = 0; tags[i] != k; i = m_parent[i])
+ {
+ pattern[len++] = i; /* L(k,i) is nonzero */
+ tags[i] = k; /* mark i as visited */
+ }
+ while(len > 0)
+ pattern[--top] = pattern[--len];
+ }
+ }
+
+ /* compute numerical values kth row of L (a sparse triangular solve) */
+
+ RealScalar d = internal::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k)
+ y[k] = 0.0;
+ for(; top < size; ++top)
+ {
+ Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
+ Scalar yi = y[i]; /* get and clear Y(i) */
+ y[i] = 0.0;
+
+ /* the nonzero entry L(k,i) */
+ Scalar l_ki;
+ if(DoLDLT)
+ l_ki = yi / m_diag[i];
+ else
+ yi = l_ki = yi / Lx[Lp[i]];
+
+ Index p2 = Lp[i] + m_nonZerosPerCol[i];
+ Index p;
+ for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)
+ y[Li[p]] -= internal::conj(Lx[p]) * yi;
+ d -= internal::real(l_ki * internal::conj(yi));
+ Li[p] = k; /* store L(k,i) in column form of L */
+ Lx[p] = l_ki;
+ ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
+ }
+ if(DoLDLT)
+ {
+ m_diag[k] = d;
+ if(d == RealScalar(0))
+ {
+ ok = false; /* failure, D(k,k) is zero */
+ break;
+ }
+ }
+ else
+ {
+ Index p = Lp[k] + m_nonZerosPerCol[k]++;
+ Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */
+ if(d <= RealScalar(0)) {
+ ok = false; /* failure, matrix is not positive definite */
+ break;
+ }
+ Lx[p] = internal::sqrt(d) ;
+ }
+ }
+
+ m_info = ok ? Success : NumericalIssue;
+ m_factorizationIsOk = true;
+}
+
+namespace internal {
+
+template<typename Derived, typename Rhs>
+struct solve_retval<SimplicialCholeskyBase<Derived>, Rhs>
+ : solve_retval_base<SimplicialCholeskyBase<Derived>, Rhs>
+{
+ typedef SimplicialCholeskyBase<Derived> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve(rhs(),dst);
+ }
+};
+
+template<typename Derived, typename Rhs>
+struct sparse_solve_retval<SimplicialCholeskyBase<Derived>, Rhs>
+ : sparse_solve_retval_base<SimplicialCholeskyBase<Derived>, Rhs>
+{
+ typedef SimplicialCholeskyBase<Derived> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve_sparse(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SIMPLICIAL_CHOLESKY_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h b/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h
new file mode 100644
index 00000000000..8ec63107a55
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h
@@ -0,0 +1,386 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_AMBIVECTOR_H
+#define EIGEN_AMBIVECTOR_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal
+ * Hybrid sparse/dense vector class designed for intensive read-write operations.
+ *
+ * See BasicSparseLLT and SparseProduct for usage examples.
+ */
+template<typename _Scalar, typename _Index>
+class AmbiVector
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ AmbiVector(Index size)
+ : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
+ {
+ resize(size);
+ }
+
+ void init(double estimatedDensity);
+ void init(int mode);
+
+ Index nonZeros() const;
+
+ /** Specifies a sub-vector to work on */
+ void setBounds(Index start, Index end) { m_start = start; m_end = end; }
+
+ void setZero();
+
+ void restart();
+ Scalar& coeffRef(Index i);
+ Scalar& coeff(Index i);
+
+ class Iterator;
+
+ ~AmbiVector() { delete[] m_buffer; }
+
+ void resize(Index size)
+ {
+ if (m_allocatedSize < size)
+ reallocate(size);
+ m_size = size;
+ }
+
+ Index size() const { return m_size; }
+
+ protected:
+
+ void reallocate(Index size)
+ {
+ // if the size of the matrix is not too large, let's allocate a bit more than needed such
+ // that we can handle dense vector even in sparse mode.
+ delete[] m_buffer;
+ if (size<1000)
+ {
+ Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
+ m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
+ m_buffer = new Scalar[allocSize];
+ }
+ else
+ {
+ m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
+ m_buffer = new Scalar[size];
+ }
+ m_size = size;
+ m_start = 0;
+ m_end = m_size;
+ }
+
+ void reallocateSparse()
+ {
+ Index copyElements = m_allocatedElements;
+ m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
+ Index allocSize = m_allocatedElements * sizeof(ListEl);
+ allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
+ Scalar* newBuffer = new Scalar[allocSize];
+ memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
+ delete[] m_buffer;
+ m_buffer = newBuffer;
+ }
+
+ protected:
+ // element type of the linked list
+ struct ListEl
+ {
+ Index next;
+ Index index;
+ Scalar value;
+ };
+
+ // used to store data in both mode
+ Scalar* m_buffer;
+ Scalar m_zero;
+ Index m_size;
+ Index m_start;
+ Index m_end;
+ Index m_allocatedSize;
+ Index m_allocatedElements;
+ Index m_mode;
+
+ // linked list mode
+ Index m_llStart;
+ Index m_llCurrent;
+ Index m_llSize;
+};
+
+/** \returns the number of non zeros in the current sub vector */
+template<typename _Scalar,typename _Index>
+_Index AmbiVector<_Scalar,_Index>::nonZeros() const
+{
+ if (m_mode==IsSparse)
+ return m_llSize;
+ else
+ return m_end - m_start;
+}
+
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
+{
+ if (estimatedDensity>0.1)
+ init(IsDense);
+ else
+ init(IsSparse);
+}
+
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(int mode)
+{
+ m_mode = mode;
+ if (m_mode==IsSparse)
+ {
+ m_llSize = 0;
+ m_llStart = -1;
+ }
+}
+
+/** Must be called whenever we might perform a write access
+ * with an index smaller than the previous one.
+ *
+ * Don't worry, this function is extremely cheap.
+ */
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::restart()
+{
+ m_llCurrent = m_llStart;
+}
+
+/** Set all coefficients of current subvector to zero */
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::setZero()
+{
+ if (m_mode==IsDense)
+ {
+ for (Index i=m_start; i<m_end; ++i)
+ m_buffer[i] = Scalar(0);
+ }
+ else
+ {
+ eigen_assert(m_mode==IsSparse);
+ m_llSize = 0;
+ m_llStart = -1;
+ }
+}
+
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
+{
+ if (m_mode==IsDense)
+ return m_buffer[i];
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+ // TODO factorize the following code to reduce code generation
+ eigen_assert(m_mode==IsSparse);
+ if (m_llSize==0)
+ {
+ // this is the first element
+ m_llStart = 0;
+ m_llCurrent = 0;
+ ++m_llSize;
+ llElements[0].value = Scalar(0);
+ llElements[0].index = i;
+ llElements[0].next = -1;
+ return llElements[0].value;
+ }
+ else if (i<llElements[m_llStart].index)
+ {
+ // this is going to be the new first element of the list
+ ListEl& el = llElements[m_llSize];
+ el.value = Scalar(0);
+ el.index = i;
+ el.next = m_llStart;
+ m_llStart = m_llSize;
+ ++m_llSize;
+ m_llCurrent = m_llStart;
+ return el.value;
+ }
+ else
+ {
+ Index nextel = llElements[m_llCurrent].next;
+ eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
+ while (nextel >= 0 && llElements[nextel].index<=i)
+ {
+ m_llCurrent = nextel;
+ nextel = llElements[nextel].next;
+ }
+
+ if (llElements[m_llCurrent].index==i)
+ {
+ // the coefficient already exists and we found it !
+ return llElements[m_llCurrent].value;
+ }
+ else
+ {
+ if (m_llSize>=m_allocatedElements)
+ {
+ reallocateSparse();
+ llElements = reinterpret_cast<ListEl*>(m_buffer);
+ }
+ eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
+ // let's insert a new coefficient
+ ListEl& el = llElements[m_llSize];
+ el.value = Scalar(0);
+ el.index = i;
+ el.next = llElements[m_llCurrent].next;
+ llElements[m_llCurrent].next = m_llSize;
+ ++m_llSize;
+ return el.value;
+ }
+ }
+ }
+}
+
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
+{
+ if (m_mode==IsDense)
+ return m_buffer[i];
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+ eigen_assert(m_mode==IsSparse);
+ if ((m_llSize==0) || (i<llElements[m_llStart].index))
+ {
+ return m_zero;
+ }
+ else
+ {
+ Index elid = m_llStart;
+ while (elid >= 0 && llElements[elid].index<i)
+ elid = llElements[elid].next;
+
+ if (llElements[elid].index==i)
+ return llElements[m_llCurrent].value;
+ else
+ return m_zero;
+ }
+ }
+}
+
+/** Iterator over the nonzero coefficients */
+template<typename _Scalar,typename _Index>
+class AmbiVector<_Scalar,_Index>::Iterator
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** Default constructor
+ * \param vec the vector on which we iterate
+ * \param epsilon the minimal value used to prune zero coefficients.
+ * In practice, all coefficients having a magnitude smaller than \a epsilon
+ * are skipped.
+ */
+ Iterator(const AmbiVector& vec, RealScalar epsilon = 0)
+ : m_vector(vec)
+ {
+ m_epsilon = epsilon;
+ m_isDense = m_vector.m_mode==IsDense;
+ if (m_isDense)
+ {
+ m_currentEl = 0; // this is to avoid a compilation warning
+ m_cachedValue = 0; // this is to avoid a compilation warning
+ m_cachedIndex = m_vector.m_start-1;
+ ++(*this);
+ }
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+ m_currentEl = m_vector.m_llStart;
+ while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<=m_epsilon)
+ m_currentEl = llElements[m_currentEl].next;
+ if (m_currentEl<0)
+ {
+ m_cachedValue = 0; // this is to avoid a compilation warning
+ m_cachedIndex = -1;
+ }
+ else
+ {
+ m_cachedIndex = llElements[m_currentEl].index;
+ m_cachedValue = llElements[m_currentEl].value;
+ }
+ }
+ }
+
+ Index index() const { return m_cachedIndex; }
+ Scalar value() const { return m_cachedValue; }
+
+ operator bool() const { return m_cachedIndex>=0; }
+
+ Iterator& operator++()
+ {
+ if (m_isDense)
+ {
+ do {
+ ++m_cachedIndex;
+ } while (m_cachedIndex<m_vector.m_end && internal::abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
+ if (m_cachedIndex<m_vector.m_end)
+ m_cachedValue = m_vector.m_buffer[m_cachedIndex];
+ else
+ m_cachedIndex=-1;
+ }
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+ do {
+ m_currentEl = llElements[m_currentEl].next;
+ } while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon);
+ if (m_currentEl<0)
+ {
+ m_cachedIndex = -1;
+ }
+ else
+ {
+ m_cachedIndex = llElements[m_currentEl].index;
+ m_cachedValue = llElements[m_currentEl].value;
+ }
+ }
+ return *this;
+ }
+
+ protected:
+ const AmbiVector& m_vector; // the target vector
+ Index m_currentEl; // the current element in sparse/linked-list mode
+ RealScalar m_epsilon; // epsilon used to prune zero coefficients
+ Index m_cachedIndex; // current coordinate
+ Scalar m_cachedValue; // current value
+ bool m_isDense; // mode of the vector
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_AMBIVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/CMakeLists.txt b/extern/Eigen3/Eigen/src/SparseCore/CMakeLists.txt
new file mode 100644
index 00000000000..d860452a65c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SparseCore_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SparseCore_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseCore COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h b/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h
new file mode 100644
index 00000000000..fa2bfd763cb
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h
@@ -0,0 +1,248 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COMPRESSED_STORAGE_H
+#define EIGEN_COMPRESSED_STORAGE_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal
+ * Stores a sparse set of values as a list of values and a list of indices.
+ *
+ */
+template<typename _Scalar,typename _Index>
+class CompressedStorage
+{
+ public:
+
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+
+ protected:
+
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ public:
+
+ CompressedStorage()
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {}
+
+ CompressedStorage(size_t size)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {
+ resize(size);
+ }
+
+ CompressedStorage(const CompressedStorage& other)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {
+ *this = other;
+ }
+
+ CompressedStorage& operator=(const CompressedStorage& other)
+ {
+ resize(other.size());
+ memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
+ memcpy(m_indices, other.m_indices, m_size * sizeof(Index));
+ return *this;
+ }
+
+ void swap(CompressedStorage& other)
+ {
+ std::swap(m_values, other.m_values);
+ std::swap(m_indices, other.m_indices);
+ std::swap(m_size, other.m_size);
+ std::swap(m_allocatedSize, other.m_allocatedSize);
+ }
+
+ ~CompressedStorage()
+ {
+ delete[] m_values;
+ delete[] m_indices;
+ }
+
+ void reserve(size_t size)
+ {
+ size_t newAllocatedSize = m_size + size;
+ if (newAllocatedSize > m_allocatedSize)
+ reallocate(newAllocatedSize);
+ }
+
+ void squeeze()
+ {
+ if (m_allocatedSize>m_size)
+ reallocate(m_size);
+ }
+
+ void resize(size_t size, float reserveSizeFactor = 0)
+ {
+ if (m_allocatedSize<size)
+ reallocate(size + size_t(reserveSizeFactor*size));
+ m_size = size;
+ }
+
+ void append(const Scalar& v, Index i)
+ {
+ Index id = static_cast<Index>(m_size);
+ resize(m_size+1, 1);
+ m_values[id] = v;
+ m_indices[id] = i;
+ }
+
+ inline size_t size() const { return m_size; }
+ inline size_t allocatedSize() const { return m_allocatedSize; }
+ inline void clear() { m_size = 0; }
+
+ inline Scalar& value(size_t i) { return m_values[i]; }
+ inline const Scalar& value(size_t i) const { return m_values[i]; }
+
+ inline Index& index(size_t i) { return m_indices[i]; }
+ inline const Index& index(size_t i) const { return m_indices[i]; }
+
+ static CompressedStorage Map(Index* indices, Scalar* values, size_t size)
+ {
+ CompressedStorage res;
+ res.m_indices = indices;
+ res.m_values = values;
+ res.m_allocatedSize = res.m_size = size;
+ return res;
+ }
+
+ /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
+ inline Index searchLowerIndex(Index key) const
+ {
+ return searchLowerIndex(0, m_size, key);
+ }
+
+ /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
+ inline Index searchLowerIndex(size_t start, size_t end, Index key) const
+ {
+ while(end>start)
+ {
+ size_t mid = (end+start)>>1;
+ if (m_indices[mid]<key)
+ start = mid+1;
+ else
+ end = mid;
+ }
+ return static_cast<Index>(start);
+ }
+
+ /** \returns the stored value at index \a key
+ * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
+ inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (m_size==0)
+ return defaultValue;
+ else if (key==m_indices[m_size-1])
+ return m_values[m_size-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const size_t id = searchLowerIndex(0,m_size-1,key);
+ return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** Like at(), but the search is performed in the range [start,end) */
+ inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (start>=end)
+ return Scalar(0);
+ else if (end>start && key==m_indices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const size_t id = searchLowerIndex(start,end-1,key);
+ return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** \returns a reference to the value at index \a key
+ * If the value does not exist, then the value \a defaultValue is inserted
+ * such that the keys are sorted. */
+ inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0))
+ {
+ size_t id = searchLowerIndex(0,m_size,key);
+ if (id>=m_size || m_indices[id]!=key)
+ {
+ resize(m_size+1,1);
+ for (size_t j=m_size-1; j>id; --j)
+ {
+ m_indices[j] = m_indices[j-1];
+ m_values[j] = m_values[j-1];
+ }
+ m_indices[id] = key;
+ m_values[id] = defaultValue;
+ }
+ return m_values[id];
+ }
+
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ size_t k = 0;
+ size_t n = size();
+ for (size_t i=0; i<n; ++i)
+ {
+ if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
+ {
+ value(k) = value(i);
+ index(k) = index(i);
+ ++k;
+ }
+ }
+ resize(k,0);
+ }
+
+ protected:
+
+ inline void reallocate(size_t size)
+ {
+ Scalar* newValues = new Scalar[size];
+ Index* newIndices = new Index[size];
+ size_t copySize = (std::min)(size, m_size);
+ // copy
+ internal::smart_copy(m_values, m_values+copySize, newValues);
+ internal::smart_copy(m_indices, m_indices+copySize, newIndices);
+ // delete old stuff
+ delete[] m_values;
+ delete[] m_indices;
+ m_values = newValues;
+ m_indices = newIndices;
+ m_allocatedSize = size;
+ }
+
+ protected:
+ Scalar* m_values;
+ Index* m_indices;
+ size_t m_size;
+ size_t m_allocatedSize;
+
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPRESSED_STORAGE_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
new file mode 100644
index 00000000000..0fb4c1c97c0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -0,0 +1,260 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
+#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType>
+static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Index Index;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ std::vector<bool> mask(rows,false);
+ Matrix<Scalar,Dynamic,1> values(rows);
+ Matrix<Index,Dynamic,1> indices(rows);
+
+ // estimate the number of non zero entries
+ // given a rhs column containing Y non zeros, we assume that the respective Y columns
+ // of the lhs differs in average of one non zeros, thus the number of non zeros for
+ // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
+ // per column of the lhs.
+ // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
+ Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
+
+ res.setZero();
+ res.reserve(Index(estimated_nnz_prod));
+ // we compute each column of the result, one after the other
+ for (Index j=0; j<cols; ++j)
+ {
+
+ res.startVec(j);
+ Index nnz = 0;
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ Scalar y = rhsIt.value();
+ Index k = rhsIt.index();
+ for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt)
+ {
+ Index i = lhsIt.index();
+ Scalar x = lhsIt.value();
+ if(!mask[i])
+ {
+ mask[i] = true;
+ values[i] = x * y;
+ indices[nnz] = i;
+ ++nnz;
+ }
+ else
+ values[i] += x * y;
+ }
+ }
+
+ // unordered insertion
+ for(int k=0; k<nnz; ++k)
+ {
+ int i = indices[k];
+ res.insertBackByOuterInnerUnordered(j,i) = values[i];
+ mask[i] = false;
+ }
+
+#if 0
+ // alternative ordered insertion code:
+
+ int t200 = rows/(log2(200)*1.39);
+ int t = (rows*100)/139;
+
+ // FIXME reserve nnz non zeros
+ // FIXME implement fast sort algorithms for very small nnz
+ // if the result is sparse enough => use a quick sort
+ // otherwise => loop through the entire vector
+ // In order to avoid to perform an expensive log2 when the
+ // result is clearly very sparse we use a linear bound up to 200.
+ //if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
+ //res.startVec(j);
+ if(true)
+ {
+ if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
+ for(int k=0; k<nnz; ++k)
+ {
+ int i = indices[k];
+ res.insertBackByOuterInner(j,i) = values[i];
+ mask[i] = false;
+ }
+ }
+ else
+ {
+ // dense path
+ for(int i=0; i<rows; ++i)
+ {
+ if(mask[i])
+ {
+ mask[i] = false;
+ res.insertBackByOuterInner(j,i) = values[i];
+ }
+ }
+ }
+#endif
+
+ }
+ res.finalize();
+}
+
+
+} // end namespace internal
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct conservative_sparse_sparse_product_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename remove_all<Lhs>::type LhsCleaned;
+ typedef typename LhsCleaned::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix resCol(lhs.rows(),rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
+ // sort the non zeros:
+ RowMajorMatrix resRow(resCol);
+ res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix rhsRow = rhs;
+ RowMajorMatrix resRow(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
+ res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix lhsRow = lhs;
+ RowMajorMatrix resRow(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
+ res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix resRow(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
+ res = resRow;
+ }
+};
+
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix resCol(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix lhsCol = lhs;
+ ColMajorMatrix resCol(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix rhsCol = rhs;
+ ColMajorMatrix resCol(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ RowMajorMatrix resRow(lhs.rows(),rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
+ // sort the non zeros:
+ ColMajorMatrix resCol(resRow);
+ res = resCol;
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/CoreIterators.h b/extern/Eigen3/Eigen/src/SparseCore/CoreIterators.h
new file mode 100644
index 00000000000..ea51e92312a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/CoreIterators.h
@@ -0,0 +1,76 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COREITERATORS_H
+#define EIGEN_COREITERATORS_H
+
+namespace Eigen {
+
+/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
+ */
+
+/** \ingroup SparseCore_Module
+ * \class InnerIterator
+ * \brief An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression
+ *
+ * todo
+ */
+
+// generic version for dense matrix and expressions
+template<typename Derived> class DenseBase<Derived>::InnerIterator
+{
+ protected:
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+
+ enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit };
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, Index outer)
+ : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.innerSize())
+ {}
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ {
+ return (IsRowMajor) ? m_expression.coeff(m_outer, m_inner)
+ : m_expression.coeff(m_inner, m_outer);
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_inner; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
+
+ protected:
+ const Derived& m_expression;
+ Index m_inner;
+ const Index m_outer;
+ const Index m_end;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_COREITERATORS_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h b/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h
new file mode 100644
index 00000000000..fc7f9d14327
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h
@@ -0,0 +1,194 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
+#define EIGEN_MAPPED_SPARSEMATRIX_H
+
+namespace Eigen {
+
+/** \class MappedSparseMatrix
+ *
+ * \brief Sparse matrix
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ */
+namespace internal {
+template<typename _Scalar, int _Flags, typename _Index>
+struct traits<MappedSparseMatrix<_Scalar, _Flags, _Index> > : traits<SparseMatrix<_Scalar, _Flags, _Index> >
+{};
+}
+
+template<typename _Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix
+ : public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix)
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ protected:
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Index m_nnz;
+ Index* m_outerIndex;
+ Index* m_innerIndices;
+ Scalar* m_values;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+
+ //----------------------------------------
+ // direct access interface
+ inline const Scalar* valuePtr() const { return m_values; }
+ inline Scalar* valuePtr() { return m_values; }
+
+ inline const Index* innerIndexPtr() const { return m_innerIndices; }
+ inline Index* innerIndexPtr() { return m_innerIndices; }
+
+ inline const Index* outerIndexPtr() const { return m_outerIndex; }
+ inline Index* outerIndexPtr() { return m_outerIndex; }
+ //----------------------------------------
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ if (start==end)
+ return Scalar(0);
+ else if (end>0 && inner==m_innerIndices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+
+ const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
+ const Index id = r-&m_innerIndices[0];
+ return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
+ const Index id = r-&m_innerIndices[0];
+ eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
+ return m_values[id];
+ }
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return m_nnz; }
+
+ inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr)
+ : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
+ m_innerIndices(innerIndexPtr), m_values(valuePtr)
+ {}
+
+ /** Empty destructor */
+ inline ~MappedSparseMatrix() {}
+};
+
+template<typename Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const MappedSparseMatrix& mat, Index outer)
+ : m_matrix(mat),
+ m_outer(outer),
+ m_id(mat.outerIndexPtr()[outer]),
+ m_start(m_id),
+ m_end(mat.outerIndexPtr()[outer+1])
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_matrix.valuePtr()[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id]); }
+
+ inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
+
+ protected:
+ const MappedSparseMatrix& m_matrix;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
+};
+
+template<typename Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix<Scalar,_Flags,_Index>::ReverseInnerIterator
+{
+ public:
+ ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer)
+ : m_matrix(mat),
+ m_outer(outer),
+ m_id(mat.outerIndexPtr()[outer+1]),
+ m_start(mat.outerIndexPtr()[outer]),
+ m_end(m_id)
+ {}
+
+ inline ReverseInnerIterator& operator--() { m_id--; return *this; }
+
+ inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id-1]); }
+
+ inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); }
+
+ protected:
+ const MappedSparseMatrix& m_matrix;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_MAPPED_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseAssign.h b/extern/Eigen3/Eigen/src/SparseCore/SparseAssign.h
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseAssign.h
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h b/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h
new file mode 100644
index 00000000000..189538f39cf
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h
@@ -0,0 +1,402 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_BLOCK_H
+#define EIGEN_SPARSE_BLOCK_H
+
+namespace Eigen {
+
+namespace internal {
+template<typename MatrixType, int Size>
+struct traits<SparseInnerVectorSet<MatrixType, Size> >
+{
+ typedef typename traits<MatrixType>::Scalar Scalar;
+ typedef typename traits<MatrixType>::Index Index;
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit,
+ Flags = MatrixType::Flags,
+ RowsAtCompileTime = IsRowMajor ? Size : MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = IsRowMajor ? MatrixType::ColsAtCompileTime : Size,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ CoeffReadCost = MatrixType::CoeffReadCost
+ };
+};
+} // end namespace internal
+
+template<typename MatrixType, int Size>
+class SparseInnerVectorSet : internal::no_assignment_operator,
+ public SparseMatrixBase<SparseInnerVectorSet<MatrixType, Size> >
+{
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+ class ReverseInnerIterator: public MatrixType::ReverseInnerIterator
+ {
+ public:
+ inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size!=Dynamic);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+// template<typename OtherDerived>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+};
+
+
+/***************************************************************************
+* specialisation for SparseMatrix
+***************************************************************************/
+
+template<typename _Scalar, int _Options, typename _Index, int Size>
+class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
+ : public SparseMatrixBase<SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size> >
+{
+ typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+ class ReverseInnerIterator: public MatrixType::ReverseInnerIterator
+ {
+ public:
+ inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size==1);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+ template<typename OtherDerived>
+ inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ typedef typename internal::remove_all<typename MatrixType::Nested>::type _NestedMatrixType;
+ _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);;
+ // This assignement is slow if this vector set is not empty
+ // and/or it is not at the end of the nonzeros of the underlying matrix.
+
+ // 1 - eval to a temporary to avoid transposition and/or aliasing issues
+ SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, Index> tmp(other);
+
+ // 2 - let's check whether there is enough allocated memory
+ Index nnz = tmp.nonZeros();
+ Index nnz_previous = nonZeros();
+ Index free_size = Index(matrix.data().allocatedSize()) + nnz_previous;
+ Index nnz_head = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart];
+ Index tail = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()];
+ Index nnz_tail = matrix.nonZeros() - tail;
+
+ if(nnz>free_size)
+ {
+ // realloc manually to reduce copies
+ typename MatrixType::Storage newdata(m_matrix.nonZeros() - nnz_previous + nnz);
+
+ std::memcpy(&newdata.value(0), &m_matrix.data().value(0), nnz_head*sizeof(Scalar));
+ std::memcpy(&newdata.index(0), &m_matrix.data().index(0), nnz_head*sizeof(Index));
+
+ std::memcpy(&newdata.value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&newdata.index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+
+ std::memcpy(&newdata.value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
+ std::memcpy(&newdata.index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+
+ matrix.data().swap(newdata);
+ }
+ else
+ {
+ // no need to realloc, simply copy the tail at its respective position and insert tmp
+ matrix.data().resize(nnz_head + nnz + nnz_tail);
+
+ if(nnz<nnz_previous)
+ {
+ std::memcpy(&matrix.data().value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+ }
+ else
+ {
+ for(Index i=nnz_tail-1; i>=0; --i)
+ {
+ matrix.data().value(nnz_head+nnz+i) = matrix.data().value(tail+i);
+ matrix.data().index(nnz_head+nnz+i) = matrix.data().index(tail+i);
+ }
+ }
+
+ std::memcpy(&matrix.data().value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+ }
+
+ // update outer index pointers
+ Index p = nnz_head;
+ for(Index k=0; k<m_outerSize.value(); ++k)
+ {
+ matrix.outerIndexPtr()[m_outerStart+k] = p;
+ p += tmp.innerVector(k).nonZeros();
+ }
+ std::ptrdiff_t offset = nnz - nnz_previous;
+ for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
+ {
+ matrix.outerIndexPtr()[k] += offset;
+ }
+
+ return *this;
+ }
+
+ inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+ {
+ return operator=<SparseInnerVectorSet>(other);
+ }
+
+ inline const Scalar* valuePtr() const
+ { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ inline Scalar* valuePtr()
+ { return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* innerIndexPtr() const
+ { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ inline Index* innerIndexPtr()
+ { return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* outerIndexPtr() const
+ { return m_matrix.outerIndexPtr() + m_outerStart; }
+ inline Index* outerIndexPtr()
+ { return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; }
+
+ Index nonZeros() const
+ {
+ if(m_matrix.isCompressed())
+ return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
+ - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]);
+ else if(m_outerSize.value()==0)
+ return 0;
+ else
+ return Map<const Matrix<Index,Size,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
+ }
+
+ const Scalar& lastCoeff() const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+ eigen_assert(nonZeros()>0);
+ if(m_matrix.isCompressed())
+ return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
+ else
+ return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
+ }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+
+};
+
+//----------
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i)
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i) const
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i)
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i) const
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer)
+{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) const
+{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size)
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size) const
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleCols(Index start, Index size)
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleCols(Index start, Index size) const
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
+{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
+{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_BLOCK_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
new file mode 100644
index 00000000000..28167066a3b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -0,0 +1,339 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
+#define EIGEN_SPARSE_CWISE_BINARY_OP_H
+
+namespace Eigen {
+
+// Here we have to handle 3 cases:
+// 1 - sparse op dense
+// 2 - dense op sparse
+// 3 - sparse op sparse
+// We also need to implement a 4th iterator for:
+// 4 - dense op dense
+// Finally, we also need to distinguish between the product and other operations :
+// configuration returned mode
+// 1 - sparse op dense product sparse
+// generic dense
+// 2 - dense op sparse product sparse
+// generic dense
+// 3 - sparse op sparse product sparse
+// generic sparse
+// 4 - dense op dense product dense
+// generic dense
+
+namespace internal {
+
+template<> struct promote_storage_type<Dense,Sparse>
+{ typedef Sparse ret; };
+
+template<> struct promote_storage_type<Sparse,Dense>
+{ typedef Sparse ret; };
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
+ typename _LhsStorageMode = typename traits<Lhs>::StorageKind,
+ typename _RhsStorageMode = typename traits<Rhs>::StorageKind>
+class sparse_cwise_binary_op_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
+ : public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+ public:
+ class InnerIterator;
+ class ReverseInnerIterator;
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+ CwiseBinaryOpImpl()
+ {
+ typedef typename internal::traits<Lhs>::StorageKind LhsStorageKind;
+ typedef typename internal::traits<Rhs>::StorageKind RhsStorageKind;
+ EIGEN_STATIC_ASSERT((
+ (!internal::is_same<LhsStorageKind,RhsStorageKind>::value)
+ || ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))),
+ THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);
+ }
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
+ : public internal::sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs,typename CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator>
+{
+ public:
+ typedef typename Lhs::Index Index;
+ typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
+ BinaryOp,Lhs,Rhs, InnerIterator> Base;
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename CwiseBinaryOpImpl::Index outer)
+ : Base(binOp.derived(),outer)
+ {}
+};
+
+/***************************************************************************
+* Implementation of inner-iterators
+***************************************************************************/
+
+// template<typename T> struct internal::func_is_conjunction { enum { ret = false }; };
+// template<typename T> struct internal::func_is_conjunction<internal::scalar_product_op<T> > { enum { ret = true }; };
+
+// TODO generalize the internal::scalar_product_op specialization to all conjunctions if any !
+
+namespace internal {
+
+// sparse - sparse (generic)
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename traits<CwiseBinaryXpr>::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
+ ++m_lhsIter;
+ ++m_rhsIter;
+ }
+ else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), Scalar(0));
+ ++m_lhsIter;
+ }
+ else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
+ {
+ m_id = m_rhsIter.index();
+ m_value = m_functor(Scalar(0), m_rhsIter.value());
+ ++m_rhsIter;
+ }
+ else
+ {
+ m_value = 0; // this is to avoid a compilation warning
+ m_id = -1;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
+ EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ Index m_id;
+};
+
+// sparse - sparse (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ ++m_rhsIter;
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Dense>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename traits<CwiseBinaryXpr>::RhsNested RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename Lhs::Index Index;
+ enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_lhsIter.value(),
+ m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
+
+ protected:
+ RhsNested m_rhs;
+ LhsIterator m_lhsIter;
+ const BinaryFunc m_functor;
+ const Index m_outer;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Dense, Sparse>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
+ enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_rhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
+
+ protected:
+ const CwiseBinaryXpr& m_xpr;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+ const Index m_outer;
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of SparseMatrixBase and SparseCwise functions/operators
+***************************************************************************/
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
+{
+ return *this = derived() - other.derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
+{
+ return *this = derived() + other.derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
new file mode 100644
index 00000000000..2a63cf2af31
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
@@ -0,0 +1,178 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
+#define EIGEN_SPARSE_CWISE_UNARY_OP_H
+
+namespace Eigen {
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>
+ : public SparseMatrixBase<CwiseUnaryOp<UnaryOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ typedef CwiseUnaryOp<UnaryOp, MatrixType> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+
+ protected:
+ typedef typename internal::traits<Derived>::_XprTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator;
+};
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::InnerIterator
+ : public CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeIterator
+{
+ typedef typename CwiseUnaryOpImpl::Scalar Scalar;
+ typedef typename CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer)
+ : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { Base::operator++(); return *this; }
+
+ EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); }
+
+ protected:
+ const UnaryOp m_functor;
+ private:
+ typename CwiseUnaryOpImpl::Scalar& valueRef();
+};
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::ReverseInnerIterator
+ : public CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeReverseIterator
+{
+ typedef typename CwiseUnaryOpImpl::Scalar Scalar;
+ typedef typename CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeReverseIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer)
+ : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
+ { Base::operator--(); return *this; }
+
+ EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); }
+
+ protected:
+ const UnaryOp m_functor;
+ private:
+ typename CwiseUnaryOpImpl::Scalar& valueRef();
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>
+ : public SparseMatrixBase<CwiseUnaryView<ViewOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+
+ protected:
+ typedef typename internal::traits<Derived>::_MatrixTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::InnerIterator
+ : public CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeIterator
+{
+ typedef typename CwiseUnaryViewImpl::Scalar Scalar;
+ typedef typename CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer)
+ : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { Base::operator++(); return *this; }
+
+ EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); }
+ EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); }
+
+ protected:
+ const ViewOp m_functor;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::ReverseInnerIterator
+ : public CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeReverseIterator
+{
+ typedef typename CwiseUnaryViewImpl::Scalar Scalar;
+ typedef typename CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeReverseIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer)
+ : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
+ { Base::operator--(); return *this; }
+
+ EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); }
+ EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); }
+
+ protected:
+ const ViewOp m_functor;
+};
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator*=(const Scalar& other)
+{
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() *= other;
+ return derived();
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator/=(const Scalar& other)
+{
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() /= other;
+ return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
new file mode 100644
index 00000000000..00ba606be5e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -0,0 +1,315 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEDENSEPRODUCT_H
+#define EIGEN_SPARSEDENSEPRODUCT_H
+
+namespace Eigen {
+
+template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType
+{
+ typedef SparseTimeDenseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
+{
+ typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type;
+};
+
+template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
+{
+ typedef DenseTimeSparseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
+{
+ typedef SparseDenseOuterProduct<Rhs,Lhs,true> Type;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, bool Tr>
+struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+ typedef Sparse StorageKind;
+ typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
+ typename traits<Rhs>::Scalar>::ReturnType Scalar;
+ typedef typename Lhs::Index Index;
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+
+ enum {
+ LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost,
+ RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost,
+
+ RowsAtCompileTime = Tr ? int(traits<Rhs>::RowsAtCompileTime) : int(traits<Lhs>::RowsAtCompileTime),
+ ColsAtCompileTime = Tr ? int(traits<Lhs>::ColsAtCompileTime) : int(traits<Rhs>::ColsAtCompileTime),
+ MaxRowsAtCompileTime = Tr ? int(traits<Rhs>::MaxRowsAtCompileTime) : int(traits<Lhs>::MaxRowsAtCompileTime),
+ MaxColsAtCompileTime = Tr ? int(traits<Lhs>::MaxColsAtCompileTime) : int(traits<Rhs>::MaxColsAtCompileTime),
+
+ Flags = Tr ? RowMajorBit : 0,
+
+ CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits<Scalar>::MulCost
+ };
+};
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs, bool Tr>
+class SparseDenseOuterProduct
+ : public SparseMatrixBase<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+ public:
+
+ typedef SparseMatrixBase<SparseDenseOuterProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct)
+ typedef internal::traits<SparseDenseOuterProduct> Traits;
+
+ private:
+
+ typedef typename Traits::LhsNested LhsNested;
+ typedef typename Traits::RhsNested RhsNested;
+ typedef typename Traits::_LhsNested _LhsNested;
+ typedef typename Traits::_RhsNested _RhsNested;
+
+ public:
+
+ class InnerIterator;
+
+ EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+template<typename Lhs, typename Rhs, bool Transpose>
+class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
+{
+ typedef typename _LhsNested::InnerIterator Base;
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
+ : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
+ {
+ }
+
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return Transpose ? Base::row() : m_outer; }
+ inline Index col() const { return Transpose ? m_outer : Base::row(); }
+
+ inline Scalar value() const { return Base::value() * m_factor; }
+
+ protected:
+ int m_outer;
+ Scalar m_factor;
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<SparseTimeDenseProduct<Lhs,Rhs> >
+ : traits<ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+ typedef MatrixXpr XprKind;
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
+ int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
+ bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
+struct sparse_time_dense_product_impl;
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, true>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::Index Index;
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ {
+ for(Index c=0; c<rhs.cols(); ++c)
+ {
+ int n = lhs.outerSize();
+ for(Index j=0; j<n; ++j)
+ {
+ typename Res::Scalar tmp(0);
+ for(LhsInnerIterator it(lhs,j); it ;++it)
+ tmp += it.value() * rhs.coeff(it.index(),c);
+ res.coeffRef(j,c) = alpha * tmp;
+ }
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, true>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+ typedef typename Lhs::Index Index;
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ {
+ for(Index c=0; c<rhs.cols(); ++c)
+ {
+ for(Index j=0; j<lhs.outerSize(); ++j)
+ {
+ typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
+ for(LhsInnerIterator it(lhs,j); it ;++it)
+ res.coeffRef(it.index(),c) += it.value() * rhs_j;
+ }
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, false>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+ typedef typename Lhs::Index Index;
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ {
+ for(Index j=0; j<lhs.outerSize(); ++j)
+ {
+ typename Res::RowXpr res_j(res.row(j));
+ for(LhsInnerIterator it(lhs,j); it ;++it)
+ res_j += (alpha*it.value()) * rhs.row(it.index());
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, false>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+ typedef typename Lhs::Index Index;
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ {
+ for(Index j=0; j<lhs.outerSize(); ++j)
+ {
+ typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
+ for(LhsInnerIterator it(lhs,j); it ;++it)
+ res.row(it.index()) += (alpha*it.value()) * rhs_j;
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
+inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
+{
+ sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType>::run(lhs, rhs, res, alpha);
+}
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseTimeDenseProduct
+ : public ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct)
+
+ SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha);
+ }
+
+ private:
+ SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&);
+};
+
+
+// dense = dense * sparse
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<DenseTimeSparseProduct<Lhs,Rhs> >
+ : traits<ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class DenseTimeSparseProduct
+ : public ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct)
+
+ DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ Transpose<const _LhsNested> lhs_t(m_lhs);
+ Transpose<const _RhsNested> rhs_t(m_rhs);
+ Transpose<Dest> dest_t(dest);
+ internal::sparse_time_dense_product(rhs_t, lhs_t, dest_t, alpha);
+ }
+
+ private:
+ DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
+};
+
+// sparse * dense
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h
new file mode 100644
index 00000000000..b09c4a7151b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h
@@ -0,0 +1,199 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+
+namespace Eigen {
+
+// The product of a diagonal matrix with a sparse matrix can be easily
+// implemented using expression template.
+// We have two consider very different cases:
+// 1 - diag * row-major sparse
+// => each inner vector <=> scalar * sparse vector product
+// => so we can reuse CwiseUnaryOp::InnerIterator
+// 2 - diag * col-major sparse
+// => each inner vector <=> densevector * sparse vector cwise product
+// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
+// for that particular case
+// The two other cases are symmetric.
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<SparseDiagonalProduct<Lhs, Rhs> >
+{
+ typedef typename remove_all<Lhs>::type _Lhs;
+ typedef typename remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = _Lhs::RowsAtCompileTime,
+ ColsAtCompileTime = _Rhs::ColsAtCompileTime,
+
+ MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime,
+
+ SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags),
+ Flags = (SparseFlags&RowMajorBit),
+ CoeffReadCost = Dynamic
+ };
+};
+
+enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
+class sparse_diagonal_product_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseDiagonalProduct
+ : public SparseMatrixBase<SparseDiagonalProduct<Lhs,Rhs> >,
+ internal::no_assignment_operator
+{
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+
+ typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+ typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+
+ enum {
+ LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal
+ : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor,
+ RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal
+ : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor
+ };
+
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
+
+ typedef internal::sparse_diagonal_product_inner_iterator_selector
+ <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+
+ EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
+ : public CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator
+{
+ typedef typename CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
+ : public CwiseBinaryOp<
+ scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::DiagonalVectorType>::InnerIterator
+{
+ typedef typename CwiseBinaryOp<
+ scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::DiagonalVectorType>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
+ : public CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator
+{
+ typedef typename CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
+ : public CwiseBinaryOp<
+ scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
+{
+ typedef typename CwiseBinaryOp<
+ scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0)
+ {}
+};
+
+} // end namespace internal
+
+// SparseMatrixBase functions
+
+template<typename Derived>
+template<typename OtherDerived>
+const SparseDiagonalProduct<Derived,OtherDerived>
+SparseMatrixBase<Derived>::operator*(const DiagonalBase<OtherDerived> &other) const
+{
+ return SparseDiagonalProduct<Derived,OtherDerived>(this->derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h
new file mode 100644
index 00000000000..ebb63d36b95
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h
@@ -0,0 +1,109 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_DOT_H
+#define EIGEN_SPARSE_DOT_H
+
+namespace Eigen {
+
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ eigen_assert(size() == other.size());
+ eigen_assert(other.size()>0 && "you are using a non initialized vector");
+
+ typename Derived::InnerIterator i(derived(),0);
+ Scalar res(0);
+ while (i)
+ {
+ res += internal::conj(i.value()) * other.coeff(i.index());
+ ++i;
+ }
+ return res;
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ eigen_assert(size() == other.size());
+
+ typedef typename Derived::Nested Nested;
+ typedef typename OtherDerived::Nested OtherNested;
+ typedef typename internal::remove_all<Nested>::type NestedCleaned;
+ typedef typename internal::remove_all<OtherNested>::type OtherNestedCleaned;
+
+ const Nested nthis(derived());
+ const OtherNested nother(other.derived());
+
+ typename NestedCleaned::InnerIterator i(nthis,0);
+ typename OtherNestedCleaned::InnerIterator j(nother,0);
+ Scalar res(0);
+ while (i && j)
+ {
+ if (i.index()==j.index())
+ {
+ res += internal::conj(i.value()) * j.value();
+ ++i; ++j;
+ }
+ else if (i.index()<j.index())
+ ++i;
+ else
+ ++j;
+ }
+ return res;
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::squaredNorm() const
+{
+ return internal::real((*this).cwiseAbs2().sum());
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::norm() const
+{
+ return internal::sqrt(squaredNorm());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_DOT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseFuzzy.h b/extern/Eigen3/Eigen/src/SparseCore/SparseFuzzy.h
new file mode 100644
index 00000000000..f00b3d6469b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseFuzzy.h
@@ -0,0 +1,41 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_FUZZY_H
+#define EIGEN_SPARSE_FUZZY_H
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// bool SparseMatrixBase<Derived>::isApprox(
+// const OtherDerived& other,
+// typename NumTraits<Scalar>::Real prec
+// ) const
+// {
+// const typename internal::nested<Derived,2>::type nested(derived());
+// const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
+// return (nested - otherNested).cwise().abs2().sum()
+// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
+// }
+
+#endif // EIGEN_SPARSE_FUZZY_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
new file mode 100644
index 00000000000..214f130f5ed
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
@@ -0,0 +1,1127 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEMATRIX_H
+#define EIGEN_SPARSEMATRIX_H
+
+namespace Eigen {
+
+/** \ingroup SparseCore_Module
+ *
+ * \class SparseMatrix
+ *
+ * \brief A versatible sparse matrix representation
+ *
+ * This class implements a more versatile variants of the common \em compressed row/column storage format.
+ * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
+ * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
+ * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
+ * can be done with limited memory reallocation and copies.
+ *
+ * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
+ * compatible with many library.
+ *
+ * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
+ *
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+ * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
+ * is RowMajor. The default is 0 which means column-major.
+ * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseMatrix<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+
+template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
+struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
+{
+ typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+
+ typedef _Scalar Scalar;
+ typedef Dense StorageKind;
+ typedef _Index Index;
+ typedef MatrixXpr XprKind;
+
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = 1,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = 1,
+ Flags = 0,
+ CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
+ };
+};
+
+} // end namespace internal
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseMatrix
+ : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
+
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+ using Base::IsRowMajor;
+ typedef internal::CompressedStorage<Scalar,Index> Storage;
+ enum {
+ Options = _Options
+ };
+
+ protected:
+
+ typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Index* m_outerIndex;
+ Index* m_innerNonZeros; // optional, if null then the data is compressed
+ Storage m_data;
+
+ Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
+ const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
+
+ public:
+
+ /** \returns whether \c *this is in compressed form. */
+ inline bool isCompressed() const { return m_innerNonZeros==0; }
+
+ /** \returns the number of rows of the matrix */
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ /** \returns the number of columns of the matrix */
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+
+ /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
+ inline Index innerSize() const { return m_innerSize; }
+ /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
+ inline Index outerSize() const { return m_outerSize; }
+
+ /** \returns a const pointer to the array of values.
+ * This function is aimed at interoperability with other libraries.
+ * \sa innerIndexPtr(), outerIndexPtr() */
+ inline const Scalar* valuePtr() const { return &m_data.value(0); }
+ /** \returns a non-const pointer to the array of values.
+ * This function is aimed at interoperability with other libraries.
+ * \sa innerIndexPtr(), outerIndexPtr() */
+ inline Scalar* valuePtr() { return &m_data.value(0); }
+
+ /** \returns a const pointer to the array of inner indices.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), outerIndexPtr() */
+ inline const Index* innerIndexPtr() const { return &m_data.index(0); }
+ /** \returns a non-const pointer to the array of inner indices.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), outerIndexPtr() */
+ inline Index* innerIndexPtr() { return &m_data.index(0); }
+
+ /** \returns a const pointer to the array of the starting positions of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), innerIndexPtr() */
+ inline const Index* outerIndexPtr() const { return m_outerIndex; }
+ /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), innerIndexPtr() */
+ inline Index* outerIndexPtr() { return m_outerIndex; }
+
+ /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \warning it returns the null pointer 0 in compressed mode */
+ inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
+ /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \warning it returns the null pointer 0 in compressed mode */
+ inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
+
+ /** \internal */
+ inline Storage& data() { return m_data; }
+ /** \internal */
+ inline const Storage& data() const { return m_data; }
+
+ /** \returns the value of the matrix at position \a i, \a j
+ * This function returns Scalar(0) if the element is an explicit \em zero */
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
+ return m_data.atInRange(m_outerIndex[outer], end, inner);
+ }
+
+ /** \returns a non-const reference to the value of the matrix at position \a i, \a j
+ *
+ * If the element does not exist then it is inserted via the insert(Index,Index) function
+ * which itself turns the matrix into a non compressed form if that was not the case.
+ *
+ * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
+ * function if the element does not already exist.
+ */
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ if(end<=start)
+ return insert(row,col);
+ const Index p = m_data.searchLowerIndex(start,end-1,inner);
+ if((p<end) && (m_data.index(p)==inner))
+ return m_data.value(p);
+ else
+ return insert(row,col);
+ }
+
+ /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
+ * The non zero coefficient must \b not already exist.
+ *
+ * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
+ * mode while reserving room for 2 non zeros per inner vector. It is strongly recommended to first
+ * call reserve(const SizesType &) to reserve a more appropriate number of elements per
+ * inner vector that better match your scenario.
+ *
+ * This function performs a sorted insertion in O(1) if the elements of each inner vector are
+ * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
+ *
+ */
+ EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
+ {
+ if(isCompressed())
+ {
+ reserve(VectorXi::Constant(outerSize(), 2));
+ }
+ return insertUncompressed(row,col);
+ }
+
+ public:
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ /** Removes all non zeros but keep allocated memory */
+ inline void setZero()
+ {
+ m_data.clear();
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ if(m_innerNonZeros)
+ memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
+ }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const
+ {
+ if(m_innerNonZeros)
+ return innerNonZeros().sum();
+ return static_cast<Index>(m_data.size());
+ }
+
+ /** Preallocates \a reserveSize non zeros.
+ *
+ * Precondition: the matrix must be in compressed mode. */
+ inline void reserve(Index reserveSize)
+ {
+ eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
+ m_data.reserve(reserveSize);
+ }
+
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
+ /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
+ *
+ * This function turns the matrix in non-compressed mode */
+ template<class SizesType>
+ inline void reserve(const SizesType& reserveSizes);
+ #else
+ template<class SizesType>
+ inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
+ {
+ EIGEN_UNUSED_VARIABLE(enableif);
+ reserveInnerVectors(reserveSizes);
+ }
+ template<class SizesType>
+ inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif = typename SizesType::Scalar())
+ {
+ EIGEN_UNUSED_VARIABLE(enableif);
+ reserveInnerVectors(reserveSizes);
+ }
+ #endif // EIGEN_PARSED_BY_DOXYGEN
+ protected:
+ template<class SizesType>
+ inline void reserveInnerVectors(const SizesType& reserveSizes)
+ {
+
+ if(isCompressed())
+ {
+ std::size_t totalReserveSize = 0;
+ // turn the matrix into non-compressed mode
+ m_innerNonZeros = new Index[m_outerSize];
+
+ // temporarily use m_innerSizes to hold the new starting points.
+ Index* newOuterIndex = m_innerNonZeros;
+
+ Index count = 0;
+ for(Index j=0; j<m_outerSize; ++j)
+ {
+ newOuterIndex[j] = count;
+ count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
+ totalReserveSize += reserveSizes[j];
+ }
+ m_data.reserve(totalReserveSize);
+ std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
+ for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
+ {
+ ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
+ for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+ {
+ m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
+ m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
+ }
+ previousOuterIndex = m_outerIndex[j];
+ m_outerIndex[j] = newOuterIndex[j];
+ m_innerNonZeros[j] = innerNNZ;
+ }
+ m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
+
+ m_data.resize(m_outerIndex[m_outerSize]);
+ }
+ else
+ {
+ Index* newOuterIndex = new Index[m_outerSize+1];
+ Index count = 0;
+ for(Index j=0; j<m_outerSize; ++j)
+ {
+ newOuterIndex[j] = count;
+ Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
+ Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
+ count += toReserve + m_innerNonZeros[j];
+ }
+ newOuterIndex[m_outerSize] = count;
+
+ m_data.resize(count);
+ for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
+ {
+ std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
+ if(offset>0)
+ {
+ std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
+ for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+ {
+ m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
+ m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
+ }
+ }
+ }
+
+ std::swap(m_outerIndex, newOuterIndex);
+ delete[] newOuterIndex;
+ }
+
+ }
+ public:
+
+ //--- low level purely coherent filling ---
+
+ /** \internal
+ * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+ * - the nonzero does not already exist
+ * - the new coefficient is the last one according to the storage order
+ *
+ * Before filling a given inner vector you must call the statVec(Index) function.
+ *
+ * After an insertion session, you should call the finalize() function.
+ *
+ * \sa insert, insertBackByOuterInner, startVec */
+ inline Scalar& insertBack(Index row, Index col)
+ {
+ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+ }
+
+ /** \internal
+ * \sa insertBack, startVec */
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
+ eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \internal
+ * \warning use it only if you know what you are doing */
+ inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
+ {
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \internal
+ * \sa insertBack, insertBackByOuterInner */
+ inline void startVec(Index outer)
+ {
+ eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
+ eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ /** \internal
+ * Must be called after inserting a set of non zero entries using the low level compressed API.
+ */
+ inline void finalize()
+ {
+ if(isCompressed())
+ {
+ Index size = static_cast<Index>(m_data.size());
+ Index i = m_outerSize;
+ // find the last filled column
+ while (i>=0 && m_outerIndex[i]==0)
+ --i;
+ ++i;
+ while (i<=m_outerSize)
+ {
+ m_outerIndex[i] = size;
+ ++i;
+ }
+ }
+ }
+
+ //---
+
+ template<typename InputIterators>
+ void setFromTriplets(const InputIterators& begin, const InputIterators& end);
+
+ void sumupDuplicates();
+
+ //---
+
+ /** \internal
+ * same as insert(Index,Index) except that the indices are given relative to the storage order */
+ EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
+ {
+ return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
+ }
+
+ /** Turns the matrix into the \em compressed format.
+ */
+ void makeCompressed()
+ {
+ if(isCompressed())
+ return;
+
+ Index oldStart = m_outerIndex[1];
+ m_outerIndex[1] = m_innerNonZeros[0];
+ for(Index j=1; j<m_outerSize; ++j)
+ {
+ Index nextOldStart = m_outerIndex[j+1];
+ std::ptrdiff_t offset = oldStart - m_outerIndex[j];
+ if(offset>0)
+ {
+ for(Index k=0; k<m_innerNonZeros[j]; ++k)
+ {
+ m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
+ m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
+ }
+ }
+ m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
+ oldStart = nextOldStart;
+ }
+ delete[] m_innerNonZeros;
+ m_innerNonZeros = 0;
+ m_data.resize(m_outerIndex[m_outerSize]);
+ m_data.squeeze();
+ }
+
+ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ prune(default_prunning_func(reference,epsilon));
+ }
+
+ /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
+ * The functor type \a KeepFunc must implement the following function:
+ * \code
+ * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
+ * \endcode
+ * \sa prune(Scalar,RealScalar)
+ */
+ template<typename KeepFunc>
+ void prune(const KeepFunc& keep = KeepFunc())
+ {
+ // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
+ // TODO also implement a unit test
+ makeCompressed();
+
+ Index k = 0;
+ for(Index j=0; j<m_outerSize; ++j)
+ {
+ Index previousStart = m_outerIndex[j];
+ m_outerIndex[j] = k;
+ Index end = m_outerIndex[j+1];
+ for(Index i=previousStart; i<end; ++i)
+ {
+ if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
+ {
+ m_data.value(k) = m_data.value(i);
+ m_data.index(k) = m_data.index(i);
+ ++k;
+ }
+ }
+ }
+ m_outerIndex[m_outerSize] = k;
+ m_data.resize(k,0);
+ }
+
+ /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
+ * \sa resizeNonZeros(Index), reserve(), setZero()
+ */
+ void resize(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ m_data.clear();
+ if (m_outerSize != outerSize || m_outerSize==0)
+ {
+ delete[] m_outerIndex;
+ m_outerIndex = new Index [outerSize+1];
+ m_outerSize = outerSize;
+ }
+ if(m_innerNonZeros)
+ {
+ delete[] m_innerNonZeros;
+ m_innerNonZeros = 0;
+ }
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ }
+
+ /** \internal
+ * Resize the nonzero vector to \a size */
+ void resizeNonZeros(Index size)
+ {
+ // TODO remove this function
+ m_data.resize(size);
+ }
+
+ /** \returns a const expression of the diagonal coefficients */
+ const Diagonal<const SparseMatrix> diagonal() const { return *this; }
+
+ /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
+ inline SparseMatrix()
+ : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ resize(0, 0);
+ }
+
+ /** Constructs a \a rows \c x \a cols empty matrix */
+ inline SparseMatrix(Index rows, Index cols)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ resize(rows, cols);
+ }
+
+ /** Constructs a sparse matrix from the sparse expression \a other */
+ template<typename OtherDerived>
+ inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ *this = other.derived();
+ }
+
+ /** Copy constructor (it performs a deep copy) */
+ inline SparseMatrix(const SparseMatrix& other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ *this = other.derived();
+ }
+
+ /** Swaps the content of two sparse matrices of the same type.
+ * This is a fast operation that simply swaps the underlying pointers and parameters. */
+ inline void swap(SparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_outerIndex, other.m_outerIndex);
+ std::swap(m_innerSize, other.m_innerSize);
+ std::swap(m_outerSize, other.m_outerSize);
+ std::swap(m_innerNonZeros, other.m_innerNonZeros);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseMatrix& operator=(const SparseMatrix& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ initAssignment(other);
+ if(other.isCompressed())
+ {
+ memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
+ m_data = other.m_data;
+ }
+ else
+ {
+ Base::operator=(other);
+ }
+ }
+ return *this;
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Lhs, typename Rhs>
+ inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+ { return Base::operator=(product); }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
+ { return Base::operator=(other.derived()); }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
+ { return Base::operator=(other.derived()); }
+ #endif
+
+ template<typename OtherDerived>
+ EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ initAssignment(other.derived());
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if (needToTranspose)
+ {
+ // two passes algorithm:
+ // 1 - compute the number of coeffs per dest inner vector
+ // 2 - do the actual copy/eval
+ // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+ typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+ typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
+ // pass 1
+ // FIXME the above copy could be merged with that pass
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ ++m_outerIndex[it.index()];
+
+ // prefix sum
+ Index count = 0;
+ VectorXi positions(outerSize());
+ for (Index j=0; j<outerSize(); ++j)
+ {
+ Index tmp = m_outerIndex[j];
+ m_outerIndex[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ m_outerIndex[outerSize()] = count;
+ // alloc
+ m_data.resize(count);
+ // pass 2
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ {
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ {
+ Index pos = positions[it.index()]++;
+ m_data.index(pos) = j;
+ m_data.value(pos) = it.value();
+ }
+ }
+ return *this;
+ }
+ else
+ {
+ // there is no special optimization
+ return Base::operator=(other.derived());
+ }
+ }
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
+ {
+ EIGEN_DBG_SPARSE(
+ s << "Nonzero entries:\n";
+ if(m.isCompressed())
+ for (Index i=0; i<m.nonZeros(); ++i)
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ else
+ for (Index i=0; i<m.outerSize(); ++i)
+ {
+ int p = m.m_outerIndex[i];
+ int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
+ Index k=p;
+ for (; k<pe; ++k)
+ s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
+ for (; k<m.m_outerIndex[i+1]; ++k)
+ s << "(_,_) ";
+ }
+ s << std::endl;
+ s << std::endl;
+ s << "Outer pointers:\n";
+ for (Index i=0; i<m.outerSize(); ++i)
+ s << m.m_outerIndex[i] << " ";
+ s << " $" << std::endl;
+ if(!m.isCompressed())
+ {
+ s << "Inner non zeros:\n";
+ for (Index i=0; i<m.outerSize(); ++i)
+ s << m.m_innerNonZeros[i] << " ";
+ s << " $" << std::endl;
+ }
+ s << std::endl;
+ );
+ s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
+ return s;
+ }
+
+ /** Destructor */
+ inline ~SparseMatrix()
+ {
+ delete[] m_outerIndex;
+ delete[] m_innerNonZeros;
+ }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** Overloaded for performance */
+ Scalar sum() const;
+#endif
+
+# ifdef EIGEN_SPARSEMATRIX_PLUGIN
+# include EIGEN_SPARSEMATRIX_PLUGIN
+# endif
+
+protected:
+
+ template<typename Other>
+ void initAssignment(const Other& other)
+ {
+ resize(other.rows(), other.cols());
+ if(m_innerNonZeros)
+ {
+ delete[] m_innerNonZeros;
+ m_innerNonZeros = 0;
+ }
+ }
+
+ /** \internal
+ * \sa insert(Index,Index) */
+ EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
+ {
+ eigen_assert(isCompressed());
+
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index previousOuter = outer;
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
+ {
+ m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+ --previousOuter;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ // here we have to handle the tricky case where the outerIndex array
+ // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
+ // the 2nd inner vector...
+ bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
+ && (size_t(m_outerIndex[outer+1]) == m_data.size());
+
+ size_t startId = m_outerIndex[outer];
+ // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+ size_t p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ float reallocRatio = 1;
+ if (m_data.allocatedSize()<=m_data.size())
+ {
+ // if there is no preallocated memory, let's reserve a minimum of 32 elements
+ if (m_data.size()==0)
+ {
+ m_data.reserve(32);
+ }
+ else
+ {
+ // we need to reallocate the data, to reduce multiple reallocations
+ // we use a smart resize algorithm based on the current filling ratio
+ // in addition, we use float to avoid integers overflows
+ float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
+ reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
+ // furthermore we bound the realloc ratio to:
+ // 1) reduce multiple minor realloc when the matrix is almost filled
+ // 2) avoid to allocate too much memory when the matrix is almost empty
+ reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
+ }
+ }
+ m_data.resize(m_data.size()+1,reallocRatio);
+
+ if (!isLastVec)
+ {
+ if (previousOuter==-1)
+ {
+ // oops wrong guess.
+ // let's correct the outer offsets
+ for (Index k=0; k<=(outer+1); ++k)
+ m_outerIndex[k] = 0;
+ Index k=outer+1;
+ while(m_outerIndex[k]==0)
+ m_outerIndex[k++] = 1;
+ while (k<=m_outerSize && m_outerIndex[k]!=0)
+ m_outerIndex[k++]++;
+ p = 0;
+ --k;
+ k = m_outerIndex[k]-1;
+ while (k>0)
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ else
+ {
+ // we are not inserting into the last inner vec
+ // update outer indices:
+ Index j = outer+2;
+ while (j<=m_outerSize && m_outerIndex[j]!=0)
+ m_outerIndex[j++]++;
+ --j;
+ // shift data of last vecs:
+ Index k = m_outerIndex[j]-1;
+ while (k>=Index(p))
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ }
+
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+ }
+
+ /** \internal
+ * A vector object that is equal to 0 everywhere but v at the position i */
+ class SingletonVector
+ {
+ Index m_index;
+ Index m_value;
+ public:
+ typedef Index value_type;
+ SingletonVector(Index i, Index v)
+ : m_index(i), m_value(v)
+ {}
+
+ Index operator[](Index i) const { return i==m_index ? m_value : 0; }
+ };
+
+ /** \internal
+ * \sa insert(Index,Index) */
+ EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
+ {
+ eigen_assert(!isCompressed());
+
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
+ std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
+ if(innerNNZ>=room)
+ {
+ // this inner vector is full, we need to reallocate the whole buffer :(
+ reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
+ }
+
+ Index startId = m_outerIndex[outer];
+ Index p = startId + m_innerNonZeros[outer];
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_innerNonZeros[outer]++;
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+ }
+
+public:
+ /** \internal
+ * \sa insert(Index,Index) */
+ inline Scalar& insertBackUncompressed(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(!isCompressed());
+ eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
+
+ Index p = m_outerIndex[outer] + m_innerNonZeros[outer];
+ m_innerNonZeros[outer]++;
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+ }
+
+private:
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+ }
+
+ struct default_prunning_func {
+ default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
+ inline bool operator() (const Index&, const Index&, const Scalar& value) const
+ {
+ return !internal::isMuchSmallerThan(value, reference, epsilon);
+ }
+ Scalar reference;
+ RealScalar epsilon;
+ };
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseMatrix& mat, Index outer)
+ : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
+ {
+ if(mat.isCompressed())
+ m_end = mat.m_outerIndex[outer+1];
+ else
+ m_end = m_id + mat.m_innerNonZeros[outer];
+ }
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline const Scalar& value() const { return m_values[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
+
+ inline Index index() const { return m_indices[m_id]; }
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const Scalar* m_values;
+ const Index* m_indices;
+ const Index m_outer;
+ Index m_id;
+ Index m_end;
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
+{
+ public:
+ ReverseInnerIterator(const SparseMatrix& mat, Index outer)
+ : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
+ {
+ if(mat.isCompressed())
+ m_id = mat.m_outerIndex[outer+1];
+ else
+ m_id = m_start + mat.m_innerNonZeros[outer];
+ }
+
+ inline ReverseInnerIterator& operator--() { --m_id; return *this; }
+
+ inline const Scalar& value() const { return m_values[m_id-1]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
+
+ inline Index index() const { return m_indices[m_id-1]; }
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id > m_start); }
+
+ protected:
+ const Scalar* m_values;
+ const Index* m_indices;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+};
+
+namespace internal {
+
+template<typename InputIterator, typename SparseMatrixType>
+void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
+{
+ EIGEN_UNUSED_VARIABLE(Options);
+ enum { IsRowMajor = SparseMatrixType::IsRowMajor };
+ typedef typename SparseMatrixType::Scalar Scalar;
+ typedef typename SparseMatrixType::Index Index;
+ SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
+
+ // pass 1: count the nnz per inner-vector
+ VectorXi wi(trMat.outerSize());
+ wi.setZero();
+ for(InputIterator it(begin); it!=end; ++it)
+ wi(IsRowMajor ? it->col() : it->row())++;
+
+ // pass 2: insert all the elements into trMat
+ trMat.reserve(wi);
+ for(InputIterator it(begin); it!=end; ++it)
+ trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
+
+ // pass 3:
+ trMat.sumupDuplicates();
+
+ // pass 4: transposed copy -> implicit sorting
+ mat = trMat;
+}
+
+}
+
+
+/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \b.
+ *
+ * A \em triplet is a tuple (i,j,value) defining a non-zero element.
+ * The input list of triplets does not have to be sorted, and can contains duplicated elements.
+ * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
+ * This is a \em O(n) operation, with \em n the number of triplet elements.
+ * The initial contents of \c *this is destroyed.
+ * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
+ * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
+ *
+ * The \a InputIterators value_type must provide the following interface:
+ * \code
+ * Scalar value() const; // the value
+ * Scalar row() const; // the row index i
+ * Scalar col() const; // the column index j
+ * \endcode
+ * See for instance the Eigen::Triplet template class.
+ *
+ * Here is a typical usage example:
+ * \code
+ typedef Triplet<double> T;
+ std::vector<T> tripletList;
+ triplets.reserve(estimation_of_entries);
+ for(...)
+ {
+ // ...
+ tripletList.push_back(T(i,j,v_ij));
+ }
+ SparseMatrixType m(rows,cols);
+ m.setFromTriplets(tripletList.begin(), tripletList.end());
+ // m is ready to go!
+ * \endcode
+ *
+ * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
+ * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
+ * be explicitely stored into a std::vector for instance.
+ */
+template<typename Scalar, int _Options, typename _Index>
+template<typename InputIterators>
+void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
+{
+ internal::set_from_triplets(begin, end, *this);
+}
+
+/** \internal */
+template<typename Scalar, int _Options, typename _Index>
+void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
+{
+ eigen_assert(!isCompressed());
+ // TODO, in practice we should be able to use m_innerNonZeros for that task
+ VectorXi wi(innerSize());
+ wi.fill(-1);
+ Index count = 0;
+ // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
+ for(int j=0; j<outerSize(); ++j)
+ {
+ Index start = count;
+ Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
+ for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
+ {
+ Index i = m_data.index(k);
+ if(wi(i)>=start)
+ {
+ // we already meet this entry => accumulate it
+ m_data.value(wi(i)) += m_data.value(k);
+ }
+ else
+ {
+ m_data.value(count) = m_data.value(k);
+ m_data.index(count) = m_data.index(k);
+ wi(i) = count;
+ ++count;
+ }
+ }
+ m_outerIndex[j] = start;
+ }
+ m_outerIndex[m_outerSize] = count;
+
+ // turn the matrix into compressed form
+ delete[] m_innerNonZeros;
+ m_innerNonZeros = 0;
+ m_data.resize(m_outerIndex[m_outerSize]);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
new file mode 100644
index 00000000000..ed7da68928d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -0,0 +1,473 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEMATRIXBASE_H
+#define EIGEN_SPARSEMATRIXBASE_H
+
+namespace Eigen {
+
+/** \ingroup SparseCore_Module
+ *
+ * \class SparseMatrixBase
+ *
+ * \brief Base class of any sparse matrices or sparse expressions
+ *
+ * \tparam Derived
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
+ */
+template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
+{
+ public:
+
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::add_const_on_value_type_if_arithmetic<
+ typename internal::packet_traits<Scalar>::type
+ >::type PacketReturnType;
+
+ typedef SparseMatrixBase StorageBaseType;
+ typedef EigenBase<Derived> Base;
+
+ template<typename OtherDerived>
+ Derived& operator=(const EigenBase<OtherDerived> &other)
+ {
+ other.derived().evalTo(derived());
+ return derived();
+ }
+
+ enum {
+
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+ /**< The number of rows at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ /**< The number of columns at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+ SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+ internal::traits<Derived>::ColsAtCompileTime>::ret),
+ /**< This is equal to the number of coefficients, i.e. the number of
+ * rows times the number of columns, or to \a Dynamic if this is not
+ * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+
+ MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
+ MaxColsAtCompileTime>::ret),
+
+ IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
+ /**< This is set to true if either the number of rows or the number of
+ * columns is known at compile-time to be equal to 1. Indeed, in that case,
+ * we are dealing with a column-vector (if there is only one column) or with
+ * a row-vector (if there is only one row). */
+
+ Flags = internal::traits<Derived>::Flags,
+ /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+ * constructed from this one. See the \ref flags "list of flags".
+ */
+
+ CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+ /**< This is a rough measure of how expensive it is to read one coefficient from
+ * this expression.
+ */
+
+ IsRowMajor = Flags&RowMajorBit ? 1 : 0,
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
+ #endif
+ };
+
+ /** \internal the return type of MatrixBase::adjoint() */
+ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
+ Transpose<const Derived>
+ >::type AdjointReturnType;
+
+
+ typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject;
+
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is the "real scalar" type; if the \a Scalar type is already real numbers
+ * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
+ * \a Scalar is \a std::complex<T> then RealScalar is \a T.
+ *
+ * \sa class NumTraits
+ */
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** \internal the return type of coeff()
+ */
+ typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
+
+ /** \internal Represents a matrix with all coefficients equal to one another*/
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
+
+ /** type of the equivalent square matrix */
+ typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
+ EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+
+ inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+ inline Derived& derived() { return *static_cast<Derived*>(this); }
+ inline Derived& const_cast_derived() const
+ { return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
+# include "../plugins/CommonCwiseUnaryOps.h"
+# include "../plugins/CommonCwiseBinaryOps.h"
+# include "../plugins/MatrixCwiseUnaryOps.h"
+# include "../plugins/MatrixCwiseBinaryOps.h"
+# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
+# include EIGEN_SPARSEMATRIXBASE_PLUGIN
+# endif
+# undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+
+ /** \returns the number of rows. \sa cols() */
+ inline Index rows() const { return derived().rows(); }
+ /** \returns the number of columns. \sa rows() */
+ inline Index cols() const { return derived().cols(); }
+ /** \returns the number of coefficients, which is \a rows()*cols().
+ * \sa rows(), cols(). */
+ inline Index size() const { return rows() * cols(); }
+ /** \returns the number of nonzero coefficients which is in practice the number
+ * of stored coefficients. */
+ inline Index nonZeros() const { return derived().nonZeros(); }
+ /** \returns true if either the number of rows or the number of columns is equal to 1.
+ * In other words, this function returns
+ * \code rows()==1 || cols()==1 \endcode
+ * \sa rows(), cols(), IsVectorAtCompileTime. */
+ inline bool isVector() const { return rows()==1 || cols()==1; }
+ /** \returns the size of the storage major dimension,
+ * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
+ Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
+ /** \returns the size of the inner dimension according to the storage order,
+ * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
+ Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
+
+ bool isRValue() const { return m_isRValue; }
+ Derived& markAsRValue() { m_isRValue = true; return derived(); }
+
+ SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
+
+
+ template<typename OtherDerived>
+ Derived& operator=(const ReturnByValue<OtherDerived>& other)
+ {
+ other.evalTo(derived());
+ return derived();
+ }
+
+
+ template<typename OtherDerived>
+ inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ return assign(other.derived());
+ }
+
+ inline Derived& operator=(const Derived& other)
+ {
+// if (other.isRValue())
+// derived().swap(other.const_cast_derived());
+// else
+ return assign(other.derived());
+ }
+
+ protected:
+
+ template<typename OtherDerived>
+ inline Derived& assign(const OtherDerived& other)
+ {
+ const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
+ if ((!transpose) && other.isRValue())
+ {
+ // eval without temporary
+ derived().resize(other.rows(), other.cols());
+ derived().setZero();
+ derived().reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
+ {
+ derived().startVec(j);
+ for (typename OtherDerived::InnerIterator it(other, j); it; ++it)
+ {
+ Scalar v = it.value();
+ derived().insertBackByOuterInner(j,it.index()) = v;
+ }
+ }
+ derived().finalize();
+ }
+ else
+ {
+ assignGeneric(other);
+ }
+ return derived();
+ }
+
+ template<typename OtherDerived>
+ inline void assignGeneric(const OtherDerived& other)
+ {
+ //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+ (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
+ "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
+ enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
+
+ const Index outerSize = other.outerSize();
+ //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
+ // thanks to shallow copies, we always eval to a tempary
+ Derived temp(other.rows(), other.cols());
+
+ temp.reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
+ {
+ temp.startVec(j);
+ for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
+ {
+ Scalar v = it.value();
+ temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
+ }
+ }
+ temp.finalize();
+
+ derived() = temp.markAsRValue();
+ }
+
+ public:
+
+ template<typename Lhs, typename Rhs>
+ inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
+ {
+ typedef typename Derived::Nested Nested;
+ typedef typename internal::remove_all<Nested>::type NestedCleaned;
+
+ if (Flags&RowMajorBit)
+ {
+ const Nested nm(m.derived());
+ for (Index row=0; row<nm.outerSize(); ++row)
+ {
+ Index col = 0;
+ for (typename NestedCleaned::InnerIterator it(nm.derived(), row); it; ++it)
+ {
+ for ( ; col<it.index(); ++col)
+ s << "0 ";
+ s << it.value() << " ";
+ ++col;
+ }
+ for ( ; col<m.cols(); ++col)
+ s << "0 ";
+ s << std::endl;
+ }
+ }
+ else
+ {
+ const Nested nm(m.derived());
+ if (m.cols() == 1) {
+ Index row = 0;
+ for (typename NestedCleaned::InnerIterator it(nm.derived(), 0); it; ++it)
+ {
+ for ( ; row<it.index(); ++row)
+ s << "0" << std::endl;
+ s << it.value() << std::endl;
+ ++row;
+ }
+ for ( ; row<m.rows(); ++row)
+ s << "0" << std::endl;
+ }
+ else
+ {
+ SparseMatrix<Scalar, RowMajorBit> trans = m;
+ s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit> >&>(trans);
+ }
+ }
+ return s;
+ }
+
+ template<typename OtherDerived>
+ Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
+ template<typename OtherDerived>
+ Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
+
+ Derived& operator*=(const Scalar& other);
+ Derived& operator/=(const Scalar& other);
+
+ #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
+ CwiseBinaryOp< \
+ internal::scalar_product_op< \
+ typename internal::scalar_product_traits< \
+ typename internal::traits<Derived>::Scalar, \
+ typename internal::traits<OtherDerived>::Scalar \
+ >::ReturnType \
+ >, \
+ Derived, \
+ OtherDerived \
+ >
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+ cwiseProduct(const MatrixBase<OtherDerived> &other) const;
+
+ // sparse * sparse
+ template<typename OtherDerived>
+ const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+ operator*(const SparseMatrixBase<OtherDerived> &other) const;
+
+ // sparse * diagonal
+ template<typename OtherDerived>
+ const SparseDiagonalProduct<Derived,OtherDerived>
+ operator*(const DiagonalBase<OtherDerived> &other) const;
+
+ // diagonal * sparse
+ template<typename OtherDerived> friend
+ const SparseDiagonalProduct<OtherDerived,Derived>
+ operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
+ { return SparseDiagonalProduct<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
+
+ /** dense * sparse (return a dense object unless it is an outer product) */
+ template<typename OtherDerived> friend
+ const typename DenseSparseProductReturnType<OtherDerived,Derived>::Type
+ operator*(const MatrixBase<OtherDerived>& lhs, const Derived& rhs)
+ { return typename DenseSparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
+
+ /** sparse * dense (returns a dense object unless it is an outer product) */
+ template<typename OtherDerived>
+ const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+ operator*(const MatrixBase<OtherDerived> &other) const;
+
+ /** \returns an expression of P^-1 H P */
+ SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+ {
+ return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);
+ }
+
+ template<typename OtherDerived>
+ Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
+
+ #ifdef EIGEN2_SUPPORT
+ // deprecated
+ template<typename OtherDerived>
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type
+ solveTriangular(const MatrixBase<OtherDerived>& other) const;
+
+ // deprecated
+ template<typename OtherDerived>
+ void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
+ #endif // EIGEN2_SUPPORT
+
+ template<int Mode>
+ inline const SparseTriangularView<Derived, Mode> triangularView() const;
+
+ template<unsigned int UpLo> inline const SparseSelfAdjointView<Derived, UpLo> selfadjointView() const;
+ template<unsigned int UpLo> inline SparseSelfAdjointView<Derived, UpLo> selfadjointView();
+
+ template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
+ RealScalar squaredNorm() const;
+ RealScalar norm() const;
+
+ Transpose<Derived> transpose() { return derived(); }
+ const Transpose<const Derived> transpose() const { return derived(); }
+ const AdjointReturnType adjoint() const { return transpose(); }
+
+ // sub-vector
+ SparseInnerVectorSet<Derived,1> row(Index i);
+ const SparseInnerVectorSet<Derived,1> row(Index i) const;
+ SparseInnerVectorSet<Derived,1> col(Index j);
+ const SparseInnerVectorSet<Derived,1> col(Index j) const;
+ SparseInnerVectorSet<Derived,1> innerVector(Index outer);
+ const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const;
+
+ // set of sub-vectors
+ SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
+
+ SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
+ const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
+
+ /** \internal use operator= */
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived>& dst) const
+ {
+ dst.setZero();
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ dst.coeffRef(i.row(),i.col()) = i.value();
+ }
+
+ Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+ {
+ return derived();
+ }
+
+ template<typename OtherDerived>
+ bool isApprox(const SparseMatrixBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ { return toDense().isApprox(other.toDense(),prec); }
+
+ template<typename OtherDerived>
+ bool isApprox(const MatrixBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ { return toDense().isApprox(other,prec); }
+
+ /** \returns the matrix or vector obtained by evaluating this expression.
+ *
+ * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+ * a const reference, in order to avoid a useless copy.
+ */
+ inline const typename internal::eval<Derived>::type eval() const
+ { return typename internal::eval<Derived>::type(derived()); }
+
+ Scalar sum() const;
+
+ protected:
+
+ bool m_isRValue;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEMATRIXBASE_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h b/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h
new file mode 100644
index 00000000000..b1f2a283fb2
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h
@@ -0,0 +1,163 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_PERMUTATION_H
+#define EIGEN_SPARSE_PERMUTATION_H
+
+// This file implements sparse * permutation products
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct traits<permut_sparsematrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+ typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+ typedef typename MatrixTypeNestedCleaned::Scalar Scalar;
+ typedef typename MatrixTypeNestedCleaned::Index Index;
+ enum {
+ SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
+ MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
+ };
+
+ typedef typename internal::conditional<MoveOuter,
+ SparseMatrix<Scalar,SrcStorageOrder,Index>,
+ SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> >::type ReturnType;
+};
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct permut_sparsematrix_product_retval
+ : public ReturnByValue<permut_sparsematrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+ typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+ typedef typename MatrixTypeNestedCleaned::Scalar Scalar;
+ typedef typename MatrixTypeNestedCleaned::Index Index;
+
+ enum {
+ SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
+ MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
+ };
+
+ permut_sparsematrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
+ : m_permutation(perm), m_matrix(matrix)
+ {}
+
+ inline int rows() const { return m_matrix.rows(); }
+ inline int cols() const { return m_matrix.cols(); }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ if(MoveOuter)
+ {
+ SparseMatrix<Scalar,SrcStorageOrder,Index> tmp(m_matrix.rows(), m_matrix.cols());
+ VectorXi sizes(m_matrix.outerSize());
+ for(Index j=0; j<m_matrix.outerSize(); ++j)
+ {
+ Index jp = m_permutation.indices().coeff(j);
+ sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).size();
+ }
+ tmp.reserve(sizes);
+ for(Index j=0; j<m_matrix.outerSize(); ++j)
+ {
+ Index jp = m_permutation.indices().coeff(j);
+ Index jsrc = ((Side==OnTheRight) ^ Transposed) ? jp : j;
+ Index jdst = ((Side==OnTheLeft) ^ Transposed) ? jp : j;
+ for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,jsrc); it; ++it)
+ tmp.insertByOuterInner(jdst,it.index()) = it.value();
+ }
+ dst = tmp;
+ }
+ else
+ {
+ SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> tmp(m_matrix.rows(), m_matrix.cols());
+ VectorXi sizes(tmp.outerSize());
+ sizes.setZero();
+ PermutationMatrix<Dynamic,Dynamic,Index> perm;
+ if((Side==OnTheLeft) ^ Transposed)
+ perm = m_permutation;
+ else
+ perm = m_permutation.transpose();
+
+ for(Index j=0; j<m_matrix.outerSize(); ++j)
+ for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,j); it; ++it)
+ sizes[perm.indices().coeff(it.index())]++;
+ tmp.reserve(sizes);
+ for(Index j=0; j<m_matrix.outerSize(); ++j)
+ for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,j); it; ++it)
+ tmp.insertByOuterInner(perm.indices().coeff(it.index()),j) = it.value();
+ dst = tmp;
+ }
+ }
+
+ protected:
+ const PermutationType& m_permutation;
+ typename MatrixType::Nested m_matrix;
+};
+
+}
+
+
+
+/** \returns the matrix with the permutation applied to the columns
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, false>
+operator*(const SparseMatrixBase<SparseDerived>& matrix, const PermutationBase<PermDerived>& perm)
+{
+ return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, false>(perm, matrix.derived());
+}
+
+/** \returns the matrix with the permutation applied to the rows
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, false>
+operator*( const PermutationBase<PermDerived>& perm, const SparseMatrixBase<SparseDerived>& matrix)
+{
+ return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, false>(perm, matrix.derived());
+}
+
+
+
+/** \returns the matrix with the inverse permutation applied to the columns.
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, true>
+operator*(const SparseMatrixBase<SparseDerived>& matrix, const Transpose<PermutationBase<PermDerived> >& tperm)
+{
+ return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, true>(tperm.nestedPermutation(), matrix.derived());
+}
+
+/** \returns the matrix with the inverse permutation applied to the rows.
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, true>
+operator*(const Transpose<PermutationBase<PermDerived> >& tperm, const SparseMatrixBase<SparseDerived>& matrix)
+{
+ return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, true>(tperm.nestedPermutation(), matrix.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h
new file mode 100644
index 00000000000..813dbf62427
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h
@@ -0,0 +1,201 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEPRODUCT_H
+#define EIGEN_SPARSEPRODUCT_H
+
+namespace Eigen {
+
+template<typename Lhs, typename Rhs>
+struct SparseSparseProductReturnType
+{
+ typedef typename internal::traits<Lhs>::Scalar Scalar;
+ enum {
+ LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
+ RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
+ TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
+ TransposeLhs = LhsRowMajor && (!RhsRowMajor)
+ };
+
+ typedef typename internal::conditional<TransposeLhs,
+ SparseMatrix<Scalar,0>,
+ typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
+
+ typedef typename internal::conditional<TransposeRhs,
+ SparseMatrix<Scalar,0>,
+ typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
+
+ typedef SparseSparseProduct<LhsNested, RhsNested> Type;
+};
+
+namespace internal {
+template<typename LhsNested, typename RhsNested>
+struct traits<SparseSparseProduct<LhsNested, RhsNested> >
+{
+ typedef MatrixXpr XprKind;
+ // clean the nested types:
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+ typedef typename _LhsNested::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<_LhsNested>::Index,
+ typename traits<_RhsNested>::Index>::type Index;
+
+ enum {
+ LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+ RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+ LhsFlags = _LhsNested::Flags,
+ RhsFlags = _RhsNested::Flags,
+
+ RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
+ ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
+ MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+ InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+ EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
+
+ RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
+
+ Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+ | EvalBeforeAssigningBit
+ | EvalBeforeNestingBit,
+
+ CoeffReadCost = Dynamic
+ };
+
+ typedef Sparse StorageKind;
+};
+
+} // end namespace internal
+
+template<typename LhsNested, typename RhsNested>
+class SparseSparseProduct : internal::no_assignment_operator,
+ public SparseMatrixBase<SparseSparseProduct<LhsNested, RhsNested> >
+{
+ public:
+
+ typedef SparseMatrixBase<SparseSparseProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct)
+
+ private:
+
+ typedef typename internal::traits<SparseSparseProduct>::_LhsNested _LhsNested;
+ typedef typename internal::traits<SparseSparseProduct>::_RhsNested _RhsNested;
+
+ public:
+
+ template<typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs), m_tolerance(0), m_conservative(true)
+ {
+ init();
+ }
+
+ template<typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, RealScalar tolerance)
+ : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false)
+ {
+ init();
+ }
+
+ SparseSparseProduct pruned(Scalar reference = 0, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) const
+ {
+ return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon);
+ }
+
+ template<typename Dest>
+ void evalTo(Dest& result) const
+ {
+ if(m_conservative)
+ internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result);
+ else
+ internal::sparse_sparse_product_with_pruning_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result,m_tolerance);
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ void init()
+ {
+ eigen_assert(m_lhs.cols() == m_rhs.rows());
+
+ enum {
+ ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
+ || _RhsNested::RowsAtCompileTime==Dynamic
+ || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
+ AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwise()*v2
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+ }
+
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+ RealScalar m_tolerance;
+ bool m_conservative;
+};
+
+// sparse = sparse * sparse
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+{
+ product.evalTo(derived());
+ return derived();
+}
+
+/** \returns an expression of the product of two sparse matrices.
+ * By default a conservative product preserving the symbolic non zeros is performed.
+ * The automatic pruning of the small values can be achieved by calling the pruned() function
+ * in which case a totally different product algorithm is employed:
+ * \code
+ * C = (A*B).pruned(); // supress numerical zeros (exact)
+ * C = (A*B).pruned(ref);
+ * C = (A*B).pruned(ref,epsilon);
+ * \endcode
+ * where \c ref is a meaningful non zero reference value.
+ * */
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return typename SparseSparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseRedux.h b/extern/Eigen3/Eigen/src/SparseCore/SparseRedux.h
new file mode 100644
index 00000000000..73fb9a318fd
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseRedux.h
@@ -0,0 +1,60 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEREDUX_H
+#define EIGEN_SPARSEREDUX_H
+
+namespace Eigen {
+
+template<typename Derived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ Scalar res(0);
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
+ res += iter.value();
+ return res;
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
+SparseMatrix<_Scalar,_Options,_Index>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
+SparseVector<_Scalar,_Options,_Index>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEREDUX_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h
new file mode 100644
index 00000000000..fc23b24d652
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -0,0 +1,495 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
+#define EIGEN_SPARSE_SELFADJOINTVIEW_H
+
+namespace Eigen {
+
+/** \ingroup SparseCore_Module
+ * \class SparseSelfAdjointView
+ *
+ * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
+ *
+ * \param MatrixType the type of the dense matrix storing the coefficients
+ * \param UpLo can be either \c #Lower or \c #Upper
+ *
+ * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
+ * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
+ * and most of the time this is the only way that it is used.
+ *
+ * \sa SparseMatrixBase::selfadjointView()
+ */
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct;
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct;
+
+namespace internal {
+
+template<typename MatrixType, unsigned int UpLo>
+struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+}
+
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
+ : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
+{
+ public:
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+ inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
+ {
+ eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
+ }
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ /** \internal \returns a reference to the nested matrix */
+ const _MatrixTypeNested& matrix() const { return m_matrix; }
+ _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
+
+ /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
+ template<typename OtherDerived>
+ SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
+ operator*(const MatrixBase<OtherDerived>& rhs) const
+ {
+ return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
+ }
+
+ /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
+ template<typename OtherDerived> friend
+ DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
+ operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+ {
+ return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
+ }
+
+ /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
+ * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
+ *
+ * \returns a reference to \c *this
+ *
+ * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
+ * call this function with u.adjoint().
+ */
+ template<typename DerivedU>
+ SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+
+ /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
+ template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
+ {
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
+ }
+
+ template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
+ {
+ // TODO directly evaluate into _dest;
+ SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
+ _dest = tmp;
+ }
+
+ /** \returns an expression of P^-1 H P */
+ SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+ {
+ return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
+ }
+
+ template<typename SrcMatrixType,int SrcUpLo>
+ SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
+ {
+ permutedMatrix.evalTo(*this);
+ return *this;
+ }
+
+
+ SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
+ {
+ PermutationMatrix<Dynamic> pnull;
+ return *this = src.twistedBy(pnull);
+ }
+
+ template<typename SrcMatrixType,unsigned int SrcUpLo>
+ SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcUpLo>& src)
+ {
+ PermutationMatrix<Dynamic> pnull;
+ return *this = src.twistedBy(pnull);
+ }
+
+
+ // const SparseLLT<PlainObject, UpLo> llt() const;
+ // const SparseLDLT<PlainObject, UpLo> ldlt() const;
+
+ protected:
+
+ typename MatrixType::Nested m_matrix;
+ mutable VectorI m_countPerRow;
+ mutable VectorI m_countPerCol;
+};
+
+/***************************************************************************
+* Implementation of SparseMatrixBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<unsigned int UpLo>
+const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
+{
+ return derived();
+}
+
+template<typename Derived>
+template<unsigned int UpLo>
+SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
+{
+ return derived();
+}
+
+/***************************************************************************
+* Implementation of SparseSelfAdjointView methods
+***************************************************************************/
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU>
+SparseSelfAdjointView<MatrixType,UpLo>&
+SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha)
+{
+ SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
+ if(alpha==Scalar(0))
+ m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
+ else
+ m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
+
+ return *this;
+}
+
+/***************************************************************************
+* Implementation of sparse self-adjoint time dense matrix
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct
+ : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
+
+ SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ // TODO use alpha
+ eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
+ typedef typename internal::remove_all<Lhs>::type _Lhs;
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::InnerIterator LhsInnerIterator;
+ enum {
+ LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
+ ProcessFirstHalf =
+ ((UpLo&(Upper|Lower))==(Upper|Lower))
+ || ( (UpLo&Upper) && !LhsIsRowMajor)
+ || ( (UpLo&Lower) && LhsIsRowMajor),
+ ProcessSecondHalf = !ProcessFirstHalf
+ };
+ for (Index j=0; j<m_lhs.outerSize(); ++j)
+ {
+ LhsInnerIterator i(m_lhs,j);
+ if (ProcessSecondHalf)
+ {
+ while (i && i.index()<j) ++i;
+ if(i && i.index()==j)
+ {
+ dest.row(j) += i.value() * m_rhs.row(j);
+ ++i;
+ }
+ }
+ for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+ {
+ Index a = LhsIsRowMajor ? j : i.index();
+ Index b = LhsIsRowMajor ? i.index() : j;
+ typename Lhs::Scalar v = i.value();
+ dest.row(a) += (v) * m_rhs.row(b);
+ dest.row(b) += internal::conj(v) * m_rhs.row(a);
+ }
+ if (ProcessFirstHalf && i && (i.index()==j))
+ dest.row(j) += i.value() * m_rhs.row(j);
+ }
+ }
+
+ private:
+ SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&);
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct
+ : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
+
+ DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, Scalar /*alpha*/) const
+ {
+ // TODO
+ }
+
+ private:
+ DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
+};
+
+/***************************************************************************
+* Implementation of symmetric copies and permutations
+***************************************************************************/
+namespace internal {
+
+template<typename MatrixType, int UpLo>
+struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+
+ Dest& dest(_dest.derived());
+ enum {
+ StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
+ };
+
+ Index size = mat.rows();
+ VectorI count;
+ count.resize(size);
+ count.setZero();
+ dest.resize(size,size);
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ Index r = it.row();
+ Index c = it.col();
+ Index ip = perm ? perm[i] : i;
+ if(UpLo==(Upper|Lower))
+ count[StorageOrderMatch ? jp : ip]++;
+ else if(r==c)
+ count[ip]++;
+ else if(( UpLo==Lower && r>c) || ( UpLo==Upper && r<c))
+ {
+ count[ip]++;
+ count[jp]++;
+ }
+ }
+ }
+ Index nnz = count.sum();
+
+ // reserve space
+ dest.resizeNonZeros(nnz);
+ dest.outerIndexPtr()[0] = 0;
+ for(Index j=0; j<size; ++j)
+ dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
+ for(Index j=0; j<size; ++j)
+ count[j] = dest.outerIndexPtr()[j];
+
+ // copy data
+ for(Index j = 0; j<size; ++j)
+ {
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ Index r = it.row();
+ Index c = it.col();
+
+ Index jp = perm ? perm[j] : j;
+ Index ip = perm ? perm[i] : i;
+
+ if(UpLo==(Upper|Lower))
+ {
+ Index k = count[StorageOrderMatch ? jp : ip]++;
+ dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
+ dest.valuePtr()[k] = it.value();
+ }
+ else if(r==c)
+ {
+ Index k = count[ip]++;
+ dest.innerIndexPtr()[k] = ip;
+ dest.valuePtr()[k] = it.value();
+ }
+ else if(( (UpLo&Lower)==Lower && r>c) || ( (UpLo&Upper)==Upper && r<c))
+ {
+ if(!StorageOrderMatch)
+ std::swap(ip,jp);
+ Index k = count[jp]++;
+ dest.innerIndexPtr()[k] = ip;
+ dest.valuePtr()[k] = it.value();
+ k = count[ip]++;
+ dest.innerIndexPtr()[k] = jp;
+ dest.valuePtr()[k] = internal::conj(it.value());
+ }
+ }
+ }
+}
+
+template<int _SrcUpLo,int _DstUpLo,typename MatrixType,int DstOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ SparseMatrix<Scalar,DstOrder,Index>& dest(_dest.derived());
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ enum {
+ SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
+ StorageOrderMatch = int(SrcOrder) == int(DstOrder),
+ DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo,
+ SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo
+ };
+
+ Index size = mat.rows();
+ VectorI count(size);
+ count.setZero();
+ dest.resize(size,size);
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+ continue;
+
+ Index ip = perm ? perm[i] : i;
+ count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ }
+ }
+ dest.outerIndexPtr()[0] = 0;
+ for(Index j=0; j<size; ++j)
+ dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
+ dest.resizeNonZeros(dest.outerIndexPtr()[size]);
+ for(Index j=0; j<size; ++j)
+ count[j] = dest.outerIndexPtr()[j];
+
+ for(Index j = 0; j<size; ++j)
+ {
+
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+ continue;
+
+ Index jp = perm ? perm[j] : j;
+ Index ip = perm? perm[i] : i;
+
+ Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
+
+ if(!StorageOrderMatch) std::swap(ip,jp);
+ if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
+ dest.valuePtr()[k] = conj(it.value());
+ else
+ dest.valuePtr()[k] = it.value();
+ }
+ }
+}
+
+}
+
+template<typename MatrixType,int UpLo>
+class SparseSymmetricPermutationProduct
+ : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
+{
+ public:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ protected:
+ typedef PermutationMatrix<Dynamic,Dynamic,Index> Perm;
+ public:
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+ SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
+ : m_matrix(mat), m_perm(perm)
+ {}
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ template<typename DestScalar, int Options, typename DstIndex>
+ void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
+ {
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+ }
+
+ template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
+ {
+ internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
+ }
+
+ protected:
+ MatrixTypeNested m_matrix;
+ const Perm& m_perm;
+
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
new file mode 100644
index 00000000000..abd4fda829c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -0,0 +1,164 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
+#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
+
+namespace Eigen {
+
+namespace internal {
+
+
+// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, typename ResultType::RealScalar tolerance)
+{
+ // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
+
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Index Index;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
+ //int size = lhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ // allocate a temporary buffer
+ AmbiVector<Scalar,Index> tempVector(rows);
+
+ // estimate the number of non zero entries
+ // given a rhs column containing Y non zeros, we assume that the respective Y columns
+ // of the lhs differs in average of one non zeros, thus the number of non zeros for
+ // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
+ // per column of the lhs.
+ // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
+ Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
+
+ // mimics a resizeByInnerOuter:
+ if(ResultType::IsRowMajor)
+ res.resize(cols, rows);
+ else
+ res.resize(rows, cols);
+
+ res.reserve(estimated_nnz_prod);
+ double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
+ for (Index j=0; j<cols; ++j)
+ {
+ // FIXME:
+ //double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
+ // let's do a more accurate determination of the nnz ratio for the current column j of res
+ tempVector.init(ratioColRes);
+ tempVector.setZero();
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
+ tempVector.restart();
+ Scalar x = rhsIt.value();
+ for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
+ {
+ tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
+ }
+ }
+ res.startVec(j);
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector,tolerance); it; ++it)
+ res.insertBackByOuterInner(j,it.index()) = it.value();
+ }
+ res.finalize();
+}
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct sparse_sparse_product_with_pruning_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+ typedef typename ResultType::RealScalar RealScalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ {
+ typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+ internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ {
+ // we need a col-major matrix to hold the result
+ typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+ SparseTemporaryType _res(res.rows(), res.cols());
+ internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
+ res = _res;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ {
+ // let's transpose the product to get a column x column product
+ typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+ internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix colLhs(lhs);
+ ColMajorMatrix colRhs(rhs);
+ internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res, tolerance);
+
+ // let's transpose the product to get a column x column product
+// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+// SparseTemporaryType _res(res.cols(), res.rows());
+// sparse_sparse_product_with_pruning_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
+// res = _res.transpose();
+ }
+};
+
+// NOTE the 2 others cases (col row *) must never occur since they are caught
+// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h b/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h
new file mode 100644
index 00000000000..07d9e0bbd08
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h
@@ -0,0 +1,76 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSETRANSPOSE_H
+#define EIGEN_SPARSETRANSPOSE_H
+
+namespace Eigen {
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
+ : public SparseMatrixBase<Transpose<MatrixType> >
+{
+ typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
+};
+
+// NOTE: VC10 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
+// a typedef typename TransposeImpl<MatrixType,Sparse>::Index Index;
+// does not fix the issue.
+// An alternative is to define the nested class in the parent class itself.
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerIterator
+ : public _MatrixTypeNested::InnerIterator
+{
+ typedef typename _MatrixTypeNested::InnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
+ : Base(trans.derived().nestedExpression(), outer)
+ {}
+ inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
+ inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+};
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
+ : public _MatrixTypeNested::ReverseInnerIterator
+{
+ typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
+ : Base(xpr.derived().nestedExpression(), outer)
+ {}
+ inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
+ inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h
new file mode 100644
index 00000000000..59aab575684
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h
@@ -0,0 +1,179 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
+#define EIGEN_SPARSE_TRIANGULARVIEW_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename MatrixType, int Mode>
+struct traits<SparseTriangularView<MatrixType,Mode> >
+: public traits<MatrixType>
+{};
+
+} // namespace internal
+
+template<typename MatrixType, int Mode> class SparseTriangularView
+ : public SparseMatrixBase<SparseTriangularView<MatrixType,Mode> >
+{
+ enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
+ || ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)),
+ SkipLast = !SkipFirst,
+ HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
+ };
+
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView)
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
+ typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
+
+ inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {}
+
+ /** \internal */
+ inline const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
+
+ template<typename OtherDerived>
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type
+ solve(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
+
+ protected:
+ MatrixTypeNested m_matrix;
+};
+
+template<typename MatrixType, int Mode>
+class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
+{
+ typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
+ : Base(view.nestedExpression(), outer), m_returnOne(false)
+ {
+ if(SkipFirst)
+ {
+ while((*this) && (HasUnitDiag ? this->index()<=outer : this->index()<outer))
+ Base::operator++();
+ if(HasUnitDiag)
+ m_returnOne = true;
+ }
+ else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+ {
+ if((!SkipFirst) && Base::operator bool())
+ Base::operator++();
+ m_returnOne = true;
+ }
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ if(HasUnitDiag && m_returnOne)
+ m_returnOne = false;
+ else
+ {
+ Base::operator++();
+ if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+ {
+ if((!SkipFirst) && Base::operator bool())
+ Base::operator++();
+ m_returnOne = true;
+ }
+ }
+ return *this;
+ }
+
+ inline Index row() const { return Base::row(); }
+ inline Index col() const { return Base::col(); }
+ inline Index index() const
+ {
+ if(HasUnitDiag && m_returnOne) return Base::outer();
+ else return Base::index();
+ }
+ inline Scalar value() const
+ {
+ if(HasUnitDiag && m_returnOne) return Scalar(1);
+ else return Base::value();
+ }
+
+ EIGEN_STRONG_INLINE operator bool() const
+ {
+ if(HasUnitDiag && m_returnOne)
+ return true;
+ return (SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer()));
+ }
+ protected:
+ bool m_returnOne;
+};
+
+template<typename MatrixType, int Mode>
+class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
+{
+ typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer)
+ : Base(view.nestedExpression(), outer)
+ {
+ eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal");
+ if(SkipLast)
+ while((*this) && this->index()>outer)
+ --(*this);
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator--()
+ { Base::operator--(); return *this; }
+
+ inline Index row() const { return Base::row(); }
+ inline Index col() const { return Base::col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const
+ {
+ return SkipLast ? Base::operator bool() : (Base::operator bool() && this->index() >= this->outer());
+ }
+};
+
+template<typename Derived>
+template<int Mode>
+inline const SparseTriangularView<Derived, Mode>
+SparseMatrixBase<Derived>::triangularView() const
+{
+ return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_TRIANGULARVIEW_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h b/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h
new file mode 100644
index 00000000000..050b65253a8
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h
@@ -0,0 +1,188 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEUTIL_H
+#define EIGEN_SPARSEUTIL_H
+
+namespace Eigen {
+
+#ifdef NDEBUG
+#define EIGEN_DBG_SPARSE(X)
+#else
+#define EIGEN_DBG_SPARSE(X) X
+#endif
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
+{ \
+ return Base::operator Op(other.derived()); \
+} \
+EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
+{ \
+ return Base::operator Op(other); \
+}
+
+#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename Other> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
+{ \
+ return Base::operator Op(scalar); \
+}
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
+
+#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
+ typedef BaseClass Base; \
+ typedef typename Eigen::internal::traits<Derived >::Scalar Scalar; \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+ typedef typename Eigen::internal::nested<Derived >::type Nested; \
+ typedef typename Eigen::internal::traits<Derived >::StorageKind StorageKind; \
+ typedef typename Eigen::internal::traits<Derived >::Index Index; \
+ enum { RowsAtCompileTime = Eigen::internal::traits<Derived >::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::internal::traits<Derived >::ColsAtCompileTime, \
+ Flags = Eigen::internal::traits<Derived >::Flags, \
+ CoeffReadCost = Eigen::internal::traits<Derived >::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+ using Base::derived; \
+ using Base::const_cast_derived;
+
+#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived >)
+
+const int CoherentAccessPattern = 0x1;
+const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
+const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
+const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
+
+template<typename Derived> class SparseMatrixBase;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix;
+
+template<typename MatrixType, int Size> class SparseInnerVectorSet;
+template<typename MatrixType, int Mode> class SparseTriangularView;
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
+template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
+template<typename MatrixType> class SparseView;
+
+template<typename Lhs, typename Rhs> class SparseSparseProduct;
+template<typename Lhs, typename Rhs> class SparseTimeDenseProduct;
+template<typename Lhs, typename Rhs> class DenseTimeSparseProduct;
+template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
+
+template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
+template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct DenseSparseProductReturnType;
+template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct SparseDenseProductReturnType;
+template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
+
+namespace internal {
+
+template<typename T,int Rows,int Cols> struct sparse_eval;
+
+template<typename T> struct eval<T,Sparse>
+ : public sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime>
+{};
+
+template<typename T,int Cols> struct sparse_eval<T,1,Cols> {
+ typedef typename traits<T>::Scalar _Scalar;
+ enum { _Flags = traits<T>::Flags| RowMajorBit };
+ public:
+ typedef SparseVector<_Scalar, _Flags> type;
+};
+
+template<typename T,int Rows> struct sparse_eval<T,Rows,1> {
+ typedef typename traits<T>::Scalar _Scalar;
+ enum { _Flags = traits<T>::Flags & (~RowMajorBit) };
+ public:
+ typedef SparseVector<_Scalar, _Flags> type;
+};
+
+template<typename T,int Rows,int Cols> struct sparse_eval {
+ typedef typename traits<T>::Scalar _Scalar;
+ enum { _Flags = traits<T>::Flags };
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+template<typename T> struct sparse_eval<T,1,1> {
+ typedef typename traits<T>::Scalar _Scalar;
+ public:
+ typedef Matrix<_Scalar, 1, 1> type;
+};
+
+template<typename T> struct plain_matrix_type<T,Sparse>
+{
+ typedef typename traits<T>::Scalar _Scalar;
+ enum {
+ _Flags = traits<T>::Flags
+ };
+
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+} // end namespace internal
+
+/** \ingroup SparseCore_Module
+ *
+ * \class Triplet
+ *
+ * \brief A small structure to hold a non zero as a triplet (i,j,value).
+ *
+ * \sa SparseMatrix::setFromTriplets()
+ */
+template<typename Scalar, typename Index=unsigned int>
+class Triplet
+{
+public:
+ Triplet() : m_row(0), m_col(0), m_value(0) {}
+
+ Triplet(const Index& i, const Index& j, const Scalar& v = Scalar(0))
+ : m_row(i), m_col(j), m_value(v)
+ {}
+
+ /** \returns the row index of the element */
+ const Index& row() const { return m_row; }
+
+ /** \returns the column index of the element */
+ const Index& col() const { return m_col; }
+
+ /** \returns the value of the element */
+ const Scalar& value() const { return m_value; }
+protected:
+ Index m_row, m_col;
+ Scalar m_value;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEUTIL_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h b/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h
new file mode 100644
index 00000000000..e81347705b2
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h
@@ -0,0 +1,413 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEVECTOR_H
+#define EIGEN_SPARSEVECTOR_H
+
+namespace Eigen {
+
+/** \ingroup SparseCore_Module
+ * \class SparseVector
+ *
+ * \brief a sparse vector class
+ *
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseVector<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ IsColVector = (_Options & RowMajorBit) ? 0 : 1,
+
+ RowsAtCompileTime = IsColVector ? Dynamic : 1,
+ ColsAtCompileTime = IsColVector ? 1 : Dynamic,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit),
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseVector
+ : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
+
+ protected:
+ public:
+
+ typedef SparseMatrixBase<SparseVector> SparseBase;
+ enum { IsColVector = internal::traits<SparseVector>::IsColVector };
+
+ enum {
+ Options = _Options
+ };
+
+ internal::CompressedStorage<Scalar,Index> m_data;
+ Index m_size;
+
+ internal::CompressedStorage<Scalar,Index>& _data() { return m_data; }
+ internal::CompressedStorage<Scalar,Index>& _data() const { return m_data; }
+
+ public:
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
+ EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
+ EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
+ EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
+
+ EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); }
+ EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); }
+
+ EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); }
+ EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); }
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ eigen_assert((IsColVector ? col : row)==0);
+ return coeff(IsColVector ? row : col);
+ }
+ inline Scalar coeff(Index i) const { return m_data.at(i); }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ eigen_assert((IsColVector ? col : row)==0);
+ return coeff(IsColVector ? row : col);
+ }
+
+ /** \returns a reference to the coefficient value at given index \a i
+ * This operation involes a log(rho*size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ *
+ * This insertion might be very costly if the number of nonzeros above \a i is large.
+ */
+ inline Scalar& coeffRef(Index i)
+ {
+ return m_data.atWithInsertion(i);
+ }
+
+ public:
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ inline void setZero() { m_data.clear(); }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
+
+ inline void startVec(Index outer)
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ }
+
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ return insertBack(inner);
+ }
+ inline Scalar& insertBack(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
+
+ inline Scalar& insert(Index row, Index col)
+ {
+ Index inner = IsColVector ? row : col;
+ Index outer = IsColVector ? col : row;
+ eigen_assert(outer==0);
+ return insert(inner);
+ }
+ Scalar& insert(Index i)
+ {
+ Index startId = 0;
+ Index p = Index(m_data.size()) - 1;
+ // TODO smart realloc
+ m_data.resize(p+2,1);
+
+ while ( (p >= startId) && (m_data.index(p) > i) )
+ {
+ m_data.index(p+1) = m_data.index(p);
+ m_data.value(p+1) = m_data.value(p);
+ --p;
+ }
+ m_data.index(p+1) = i;
+ m_data.value(p+1) = 0;
+ return m_data.value(p+1);
+ }
+
+ /**
+ */
+ inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
+
+
+ inline void finalize() {}
+
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ m_data.prune(reference,epsilon);
+ }
+
+ void resize(Index rows, Index cols)
+ {
+ eigen_assert(rows==1 || cols==1);
+ resize(IsColVector ? rows : cols);
+ }
+
+ void resize(Index newSize)
+ {
+ m_size = newSize;
+ m_data.clear();
+ }
+
+ void resizeNonZeros(Index size) { m_data.resize(size); }
+
+ inline SparseVector() : m_size(0) { resize(0); }
+
+ inline SparseVector(Index size) : m_size(0) { resize(size); }
+
+ inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
+
+ template<typename OtherDerived>
+ inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ inline SparseVector(const SparseVector& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(SparseVector& other)
+ {
+ std::swap(m_size, other.m_size);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseVector& operator=(const SparseVector& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.size());
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
+ return assign(other.transpose());
+ else
+ return assign(other);
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Lhs, typename Rhs>
+ inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+ {
+ return Base::operator=(product);
+ }
+ #endif
+
+ friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
+ {
+ for (Index i=0; i<m.nonZeros(); ++i)
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ s << std::endl;
+ return s;
+ }
+
+ /** Destructor */
+ inline ~SparseVector() {}
+
+ /** Overloaded for performance */
+ Scalar sum() const;
+
+ public:
+
+ /** \deprecated use setZero() and reserve() */
+ EIGEN_DEPRECATED void startFill(Index reserve)
+ {
+ setZero();
+ m_data.reserve(reserve);
+ }
+
+ /** \deprecated use insertBack(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
+ {
+ eigen_assert(r==0 || c==0);
+ return fill(IsColVector ? r : c);
+ }
+
+ /** \deprecated use insertBack(Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
+
+ /** \deprecated use insert(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
+ {
+ eigen_assert(r==0 || c==0);
+ return fillrand(IsColVector ? r : c);
+ }
+
+ /** \deprecated use insert(Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index i)
+ {
+ return insert(i);
+ }
+
+ /** \deprecated use finalize() */
+ EIGEN_DEPRECATED void endFill() {}
+
+# ifdef EIGEN_SPARSEVECTOR_PLUGIN
+# include EIGEN_SPARSEVECTOR_PLUGIN
+# endif
+
+protected:
+ template<typename OtherDerived>
+ EIGEN_DONT_INLINE SparseVector& assign(const SparseMatrixBase<OtherDerived>& _other)
+ {
+ const OtherDerived& other(_other.derived());
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if(needToTranspose)
+ {
+ Index size = other.size();
+ Index nnz = other.nonZeros();
+ resize(size);
+ reserve(nnz);
+ for(Index i=0; i<size; ++i)
+ {
+ typename OtherDerived::InnerIterator it(other, i);
+ if(it)
+ insert(i) = it.value();
+ }
+ return *this;
+ }
+ else
+ {
+ // there is no special optimization
+ return Base::operator=(other);
+ }
+ }
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseVector& vec, Index outer=0)
+ : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ }
+
+ InnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
+ : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_data.value(m_id); }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
+
+ inline Index index() const { return m_data.index(m_id); }
+ inline Index row() const { return IsColVector ? index() : 0; }
+ inline Index col() const { return IsColVector ? 0 : index(); }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const internal::CompressedStorage<Scalar,Index>& m_data;
+ Index m_id;
+ const Index m_end;
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
+{
+ public:
+ ReverseInnerIterator(const SparseVector& vec, Index outer=0)
+ : m_data(vec.m_data), m_id(static_cast<Index>(m_data.size())), m_start(0)
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ }
+
+ ReverseInnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
+ : m_data(data), m_id(static_cast<Index>(m_data.size())), m_start(0)
+ {}
+
+ inline ReverseInnerIterator& operator--() { m_id--; return *this; }
+
+ inline Scalar value() const { return m_data.value(m_id-1); }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id-1)); }
+
+ inline Index index() const { return m_data.index(m_id-1); }
+ inline Index row() const { return IsColVector ? index() : 0; }
+ inline Index col() const { return IsColVector ? 0 : index(); }
+
+ inline operator bool() const { return (m_id > m_start); }
+
+ protected:
+ const internal::CompressedStorage<Scalar,Index>& m_data;
+ Index m_id;
+ const Index m_start;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseView.h
new file mode 100644
index 00000000000..43a3adb240e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseView.h
@@ -0,0 +1,113 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEVIEW_H
+#define EIGEN_SPARSEVIEW_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename MatrixType>
+struct traits<SparseView<MatrixType> > : traits<MatrixType>
+{
+ typedef int Index;
+ typedef Sparse StorageKind;
+ enum {
+ Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
+ };
+};
+
+} // end namespace internal
+
+template<typename MatrixType>
+class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
+{
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
+
+ SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0),
+ typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) :
+ m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
+
+ class InnerIterator;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ inline Index innerSize() const { return m_matrix.innerSize(); }
+ inline Index outerSize() const { return m_matrix.outerSize(); }
+
+protected:
+ MatrixTypeNested m_matrix;
+ Scalar m_reference;
+ typename NumTraits<Scalar>::Real m_epsilon;
+};
+
+template<typename MatrixType>
+class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
+{
+public:
+ typedef typename _MatrixTypeNested::InnerIterator IterBase;
+ InnerIterator(const SparseView& view, Index outer) :
+ IterBase(view.m_matrix, outer), m_view(view)
+ {
+ incrementToNonZero();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ IterBase::operator++();
+ incrementToNonZero();
+ return *this;
+ }
+
+ using IterBase::value;
+
+protected:
+ const SparseView& m_view;
+
+private:
+ void incrementToNonZero()
+ {
+ while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon))
+ {
+ IterBase::operator++();
+ }
+ }
+};
+
+template<typename Derived>
+const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
+ typename NumTraits<Scalar>::Real m_epsilon) const
+{
+ return SparseView<Derived>(derived(), m_reference, m_epsilon);
+}
+
+} // end namespace Eigen
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h b/extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h
new file mode 100644
index 00000000000..9a45e8f417f
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h
@@ -0,0 +1,349 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
+#define EIGEN_SPARSETRIANGULARSOLVER_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+ int UpLo = (Mode & Lower)
+ ? Lower
+ : (Mode & Upper)
+ ? Upper
+ : -1,
+ int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
+struct sparse_solve_triangular_selector;
+
+// forward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.rows(); ++i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ Scalar lastVal(0);
+ int lastIndex = 0;
+ for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
+ {
+ lastVal = it.value();
+ lastIndex = it.index();
+ if(lastIndex==i)
+ break;
+ tmp -= lastVal * other.coeff(lastIndex,col);
+ }
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ eigen_assert(lastIndex==i);
+ other.coeffRef(i,col) = tmp/lastVal;
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.rows()-1 ; i>=0 ; --i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ Scalar l_ii = 0;
+ typename Lhs::InnerIterator it(lhs, i);
+ while(it && it.index()<i)
+ ++it;
+ if(!(Mode & UnitDiag))
+ {
+ eigen_assert(it && it.index()==i);
+ l_ii = it.value();
+ ++it;
+ }
+ else if (it && it.index() == i)
+ ++it;
+ for(; it; ++it)
+ {
+ tmp -= it.value() * other.coeff(it.index(),col);
+ }
+
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ other.coeffRef(i,col) = tmp/l_ii;
+ }
+ }
+ }
+};
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.cols(); ++i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ typename Lhs::InnerIterator it(lhs, i);
+ while(it && it.index()<i)
+ ++it;
+ if(!(Mode & UnitDiag))
+ {
+ eigen_assert(it && it.index()==i);
+ tmp /= it.value();
+ }
+ if (it && it.index()==i)
+ ++it;
+ for(; it; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.cols()-1; i>=0; --i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ if(!(Mode & UnitDiag))
+ {
+ // TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements
+ typename Lhs::ReverseInnerIterator it(lhs, i);
+ while(it && it.index()!=i)
+ --it;
+ eigen_assert(it && it.index()==i);
+ other.coeffRef(i,col) /= it.value();
+ }
+ typename Lhs::InnerIterator it(lhs, i);
+ for(; it && it.index()<i; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDerived>& other) const
+{
+ eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows());
+ eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
+
+ enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+ typedef typename internal::conditional<copy,
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(m_matrix, otherCopy);
+
+ if (copy)
+ other = otherCopy;
+}
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseTriangularView<ExpressionType,Mode>::solve(const MatrixBase<OtherDerived>& other) const
+{
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+ solveInPlace(res);
+ return res;
+}
+
+// pure sparse path
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+ int UpLo = (Mode & Lower)
+ ? Lower
+ : (Mode & Upper)
+ ? Upper
+ : -1,
+ int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>
+struct sparse_solve_triangular_sparse_selector;
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode, int UpLo>
+struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ const bool IsLower = (UpLo==Lower);
+ AmbiVector<Scalar,Index> tempVector(other.rows()*2);
+ tempVector.setBounds(0,other.rows());
+
+ Rhs res(other.rows(), other.cols());
+ res.reserve(other.nonZeros());
+
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ // FIXME estimate number of non zeros
+ tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
+ tempVector.setZero();
+ tempVector.restart();
+ for (typename Rhs::InnerIterator rhsIt(other, col); rhsIt; ++rhsIt)
+ {
+ tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
+ }
+
+ for(int i=IsLower?0:lhs.cols()-1;
+ IsLower?i<lhs.cols():i>=0;
+ i+=IsLower?1:-1)
+ {
+ tempVector.restart();
+ Scalar& ci = tempVector.coeffRef(i);
+ if (ci!=Scalar(0))
+ {
+ // find
+ typename Lhs::InnerIterator it(lhs, i);
+ if(!(Mode & UnitDiag))
+ {
+ if (IsLower)
+ {
+ eigen_assert(it.index()==i);
+ ci /= it.value();
+ }
+ else
+ ci /= lhs.coeff(i,i);
+ }
+ tempVector.restart();
+ if (IsLower)
+ {
+ if (it.index()==i)
+ ++it;
+ for(; it; ++it)
+ tempVector.coeffRef(it.index()) -= ci * it.value();
+ }
+ else
+ {
+ for(; it && it.index()<i; ++it)
+ tempVector.coeffRef(it.index()) -= ci * it.value();
+ }
+ }
+ }
+
+
+ int count = 0;
+ // FIXME compute a reference value to filter zeros
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector/*,1e-12*/); it; ++it)
+ {
+ ++ count;
+// std::cerr << "fill " << it.index() << ", " << col << "\n";
+// std::cout << it.value() << " ";
+ // FIXME use insertBack
+ res.insert(it.index(), col) = it.value();
+ }
+// std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n";
+ }
+ res.finalize();
+ other = res.markAsRValue();
+ }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
+{
+ eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows());
+ eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
+
+// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+// typedef typename internal::conditional<copy,
+// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+// OtherCopy otherCopy(other.derived());
+
+ internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(m_matrix, other.derived());
+
+// if (copy)
+// other = otherCopy;
+}
+
+#ifdef EIGEN2_SUPPORT
+
+// deprecated stuff:
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+void SparseMatrixBase<Derived>::solveTriangularInPlace(MatrixBase<OtherDerived>& other) const
+{
+ this->template triangular<Flags&(Upper|Lower)>().solveInPlace(other);
+}
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseMatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
+{
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+ derived().solveTriangularInPlace(res);
+ return res;
+}
+#endif // EIGEN2_SUPPORT
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSETRIANGULARSOLVER_H
diff --git a/extern/Eigen3/Eigen/src/StlSupport/CMakeLists.txt b/extern/Eigen3/Eigen/src/StlSupport/CMakeLists.txt
new file mode 100644
index 00000000000..0f094f637ac
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/StlSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_StlSupport_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_StlSupport_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/StlSupport COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/SuperLUSupport/CMakeLists.txt b/extern/Eigen3/Eigen/src/SuperLUSupport/CMakeLists.txt
new file mode 100644
index 00000000000..b28ebe5839c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SuperLUSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SuperLUSupport_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SuperLUSupport_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SuperLUSupport COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h b/extern/Eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h
new file mode 100644
index 00000000000..6c3eb685849
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h
@@ -0,0 +1,1040 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SUPERLUSUPPORT_H
+#define EIGEN_SUPERLUSUPPORT_H
+
+namespace Eigen {
+
+#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE) \
+ extern "C" { \
+ typedef struct { FLOATTYPE for_lu; FLOATTYPE total_needed; int expansions; } PREFIX##mem_usage_t; \
+ extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \
+ char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \
+ void *, int, SuperMatrix *, SuperMatrix *, \
+ FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, \
+ PREFIX##mem_usage_t *, SuperLUStat_t *, int *); \
+ } \
+ inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \
+ int *perm_c, int *perm_r, int *etree, char *equed, \
+ FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
+ SuperMatrix *U, void *work, int lwork, \
+ SuperMatrix *B, SuperMatrix *X, \
+ FLOATTYPE *recip_pivot_growth, \
+ FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \
+ SuperLUStat_t *stats, int *info, KEYTYPE) { \
+ PREFIX##mem_usage_t mem_usage; \
+ PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L, \
+ U, work, lwork, B, X, recip_pivot_growth, rcond, \
+ ferr, berr, &mem_usage, stats, info); \
+ return mem_usage.for_lu; /* bytes used by the factor storage */ \
+ }
+
+DECL_GSSVX(s,float,float)
+DECL_GSSVX(c,float,std::complex<float>)
+DECL_GSSVX(d,double,double)
+DECL_GSSVX(z,double,std::complex<double>)
+
+#ifdef MILU_ALPHA
+#define EIGEN_SUPERLU_HAS_ILU
+#endif
+
+#ifdef EIGEN_SUPERLU_HAS_ILU
+
+// similarly for the incomplete factorization using gsisx
+#define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE) \
+ extern "C" { \
+ extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *, \
+ char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *, \
+ void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *, \
+ PREFIX##mem_usage_t *, SuperLUStat_t *, int *); \
+ } \
+ inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A, \
+ int *perm_c, int *perm_r, int *etree, char *equed, \
+ FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
+ SuperMatrix *U, void *work, int lwork, \
+ SuperMatrix *B, SuperMatrix *X, \
+ FLOATTYPE *recip_pivot_growth, \
+ FLOATTYPE *rcond, \
+ SuperLUStat_t *stats, int *info, KEYTYPE) { \
+ PREFIX##mem_usage_t mem_usage; \
+ PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L, \
+ U, work, lwork, B, X, recip_pivot_growth, rcond, \
+ &mem_usage, stats, info); \
+ return mem_usage.for_lu; /* bytes used by the factor storage */ \
+ }
+
+DECL_GSISX(s,float,float)
+DECL_GSISX(c,float,std::complex<float>)
+DECL_GSISX(d,double,double)
+DECL_GSISX(z,double,std::complex<double>)
+
+#endif
+
+template<typename MatrixType>
+struct SluMatrixMapHelper;
+
+/** \internal
+ *
+ * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices
+ * and dense matrices. Supernodal and other fancy format are not supported by this wrapper.
+ *
+ * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure.
+ */
+struct SluMatrix : SuperMatrix
+{
+ SluMatrix()
+ {
+ Store = &storage;
+ }
+
+ SluMatrix(const SluMatrix& other)
+ : SuperMatrix(other)
+ {
+ Store = &storage;
+ storage = other.storage;
+ }
+
+ SluMatrix& operator=(const SluMatrix& other)
+ {
+ SuperMatrix::operator=(static_cast<const SuperMatrix&>(other));
+ Store = &storage;
+ storage = other.storage;
+ return *this;
+ }
+
+ struct
+ {
+ union {int nnz;int lda;};
+ void *values;
+ int *innerInd;
+ int *outerInd;
+ } storage;
+
+ void setStorageType(Stype_t t)
+ {
+ Stype = t;
+ if (t==SLU_NC || t==SLU_NR || t==SLU_DN)
+ Store = &storage;
+ else
+ {
+ eigen_assert(false && "storage type not supported");
+ Store = 0;
+ }
+ }
+
+ template<typename Scalar>
+ void setScalarType()
+ {
+ if (internal::is_same<Scalar,float>::value)
+ Dtype = SLU_S;
+ else if (internal::is_same<Scalar,double>::value)
+ Dtype = SLU_D;
+ else if (internal::is_same<Scalar,std::complex<float> >::value)
+ Dtype = SLU_C;
+ else if (internal::is_same<Scalar,std::complex<double> >::value)
+ Dtype = SLU_Z;
+ else
+ {
+ eigen_assert(false && "Scalar type not supported by SuperLU");
+ }
+ }
+
+ template<typename MatrixType>
+ static SluMatrix Map(MatrixBase<MatrixType>& _mat)
+ {
+ MatrixType& mat(_mat.derived());
+ eigen_assert( ((MatrixType::Flags&RowMajorBit)!=RowMajorBit) && "row-major dense matrices are not supported by SuperLU");
+ SluMatrix res;
+ res.setStorageType(SLU_DN);
+ res.setScalarType<typename MatrixType::Scalar>();
+ res.Mtype = SLU_GE;
+
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+
+ res.storage.lda = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride();
+ res.storage.values = mat.data();
+ return res;
+ }
+
+ template<typename MatrixType>
+ static SluMatrix Map(SparseMatrixBase<MatrixType>& mat)
+ {
+ SluMatrix res;
+ if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+ {
+ res.setStorageType(SLU_NR);
+ res.nrow = mat.cols();
+ res.ncol = mat.rows();
+ }
+ else
+ {
+ res.setStorageType(SLU_NC);
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ }
+
+ res.Mtype = SLU_GE;
+
+ res.storage.nnz = mat.nonZeros();
+ res.storage.values = mat.derived().valuePtr();
+ res.storage.innerInd = mat.derived().innerIndexPtr();
+ res.storage.outerInd = mat.derived().outerIndexPtr();
+
+ res.setScalarType<typename MatrixType::Scalar>();
+
+ // FIXME the following is not very accurate
+ if (MatrixType::Flags & Upper)
+ res.Mtype = SLU_TRU;
+ if (MatrixType::Flags & Lower)
+ res.Mtype = SLU_TRL;
+
+ eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
+
+ return res;
+ }
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
+struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
+{
+ typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
+ static void run(MatrixType& mat, SluMatrix& res)
+ {
+ eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+ res.setStorageType(SLU_DN);
+ res.setScalarType<Scalar>();
+ res.Mtype = SLU_GE;
+
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+
+ res.storage.lda = mat.outerStride();
+ res.storage.values = mat.data();
+ }
+};
+
+template<typename Derived>
+struct SluMatrixMapHelper<SparseMatrixBase<Derived> >
+{
+ typedef Derived MatrixType;
+ static void run(MatrixType& mat, SluMatrix& res)
+ {
+ if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+ {
+ res.setStorageType(SLU_NR);
+ res.nrow = mat.cols();
+ res.ncol = mat.rows();
+ }
+ else
+ {
+ res.setStorageType(SLU_NC);
+ res.nrow = mat.rows();
+ res.ncol = mat.cols();
+ }
+
+ res.Mtype = SLU_GE;
+
+ res.storage.nnz = mat.nonZeros();
+ res.storage.values = mat.valuePtr();
+ res.storage.innerInd = mat.innerIndexPtr();
+ res.storage.outerInd = mat.outerIndexPtr();
+
+ res.setScalarType<typename MatrixType::Scalar>();
+
+ // FIXME the following is not very accurate
+ if (MatrixType::Flags & Upper)
+ res.Mtype = SLU_TRU;
+ if (MatrixType::Flags & Lower)
+ res.Mtype = SLU_TRL;
+
+ eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
+ }
+};
+
+namespace internal {
+
+template<typename MatrixType>
+SluMatrix asSluMatrix(MatrixType& mat)
+{
+ return SluMatrix::Map(mat);
+}
+
+/** View a Super LU matrix as an Eigen expression */
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)
+{
+ eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
+ || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);
+
+ Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
+
+ return MappedSparseMatrix<Scalar,Flags,Index>(
+ sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize],
+ sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );
+}
+
+} // end namespace internal
+
+/** \ingroup SuperLUSupport_Module
+ * \class SuperLUBase
+ * \brief The base class for the direct and incomplete LU factorization of SuperLU
+ */
+template<typename _MatrixType, typename Derived>
+class SuperLUBase : internal::noncopyable
+{
+ public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef SparseMatrix<Scalar> LUMatrixType;
+
+ public:
+
+ SuperLUBase() {}
+
+ ~SuperLUBase()
+ {
+ clearFactors();
+ }
+
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ /** \returns a reference to the Super LU option object to configure the Super LU algorithms. */
+ inline superlu_options_t& options() { return m_sluOptions; }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ /** Computes the sparse Cholesky decomposition of \a matrix */
+ void compute(const MatrixType& matrix)
+ {
+ derived().analyzePattern(matrix);
+ derived().factorize(matrix);
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<SuperLUBase, Rhs> solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "SuperLU is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<SuperLUBase, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+// template<typename Rhs>
+// inline const internal::sparse_solve_retval<SuperLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+// {
+// eigen_assert(m_isInitialized && "SuperLU is not initialized.");
+// eigen_assert(rows()==b.rows()
+// && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
+// return internal::sparse_solve_retval<SuperLU, Rhs>(*this, b.derived());
+// }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& /*matrix*/)
+ {
+ m_isInitialized = true;
+ m_info = Success;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+ }
+
+ template<typename Stream>
+ void dumpMemory(Stream& s)
+ {}
+
+ protected:
+
+ void initFactorization(const MatrixType& a)
+ {
+ set_default_options(&this->m_sluOptions);
+
+ const int size = a.rows();
+ m_matrix = a;
+
+ m_sluA = internal::asSluMatrix(m_matrix);
+ clearFactors();
+
+ m_p.resize(size);
+ m_q.resize(size);
+ m_sluRscale.resize(size);
+ m_sluCscale.resize(size);
+ m_sluEtree.resize(size);
+
+ // set empty B and X
+ m_sluB.setStorageType(SLU_DN);
+ m_sluB.setScalarType<Scalar>();
+ m_sluB.Mtype = SLU_GE;
+ m_sluB.storage.values = 0;
+ m_sluB.nrow = 0;
+ m_sluB.ncol = 0;
+ m_sluB.storage.lda = size;
+ m_sluX = m_sluB;
+
+ m_extractedDataAreDirty = true;
+ }
+
+ void init()
+ {
+ m_info = InvalidInput;
+ m_isInitialized = false;
+ m_sluL.Store = 0;
+ m_sluU.Store = 0;
+ }
+
+ void extractData() const;
+
+ void clearFactors()
+ {
+ if(m_sluL.Store)
+ Destroy_SuperNode_Matrix(&m_sluL);
+ if(m_sluU.Store)
+ Destroy_CompCol_Matrix(&m_sluU);
+
+ m_sluL.Store = 0;
+ m_sluU.Store = 0;
+
+ memset(&m_sluL,0,sizeof m_sluL);
+ memset(&m_sluU,0,sizeof m_sluU);
+ }
+
+ // cached data to reduce reallocation, etc.
+ mutable LUMatrixType m_l;
+ mutable LUMatrixType m_u;
+ mutable IntColVectorType m_p;
+ mutable IntRowVectorType m_q;
+
+ mutable LUMatrixType m_matrix; // copy of the factorized matrix
+ mutable SluMatrix m_sluA;
+ mutable SuperMatrix m_sluL, m_sluU;
+ mutable SluMatrix m_sluB, m_sluX;
+ mutable SuperLUStat_t m_sluStat;
+ mutable superlu_options_t m_sluOptions;
+ mutable std::vector<int> m_sluEtree;
+ mutable Matrix<RealScalar,Dynamic,1> m_sluRscale, m_sluCscale;
+ mutable Matrix<RealScalar,Dynamic,1> m_sluFerr, m_sluBerr;
+ mutable char m_sluEqued;
+
+ mutable ComputationInfo m_info;
+ bool m_isInitialized;
+ int m_factorizationIsOk;
+ int m_analysisIsOk;
+ mutable bool m_extractedDataAreDirty;
+
+ private:
+ SuperLUBase(SuperLUBase& ) { }
+};
+
+
+/** \ingroup SuperLUSupport_Module
+ * \class SuperLU
+ * \brief A sparse direct LU factorization and solver based on the SuperLU library
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
+ * using the SuperLU library. The sparse matrix A must be squared and invertible. The vectors or matrices
+ * X and B can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ */
+template<typename _MatrixType>
+class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >
+{
+ public:
+ typedef SuperLUBase<_MatrixType,SuperLU> Base;
+ typedef _MatrixType MatrixType;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ typedef typename Base::Index Index;
+ typedef typename Base::IntRowVectorType IntRowVectorType;
+ typedef typename Base::IntColVectorType IntColVectorType;
+ typedef typename Base::LUMatrixType LUMatrixType;
+ typedef TriangularView<LUMatrixType, Lower|UnitDiag> LMatrixType;
+ typedef TriangularView<LUMatrixType, Upper> UMatrixType;
+
+ public:
+
+ SuperLU() : Base() { init(); }
+
+ SuperLU(const MatrixType& matrix) : Base()
+ {
+ Base::init();
+ compute(matrix);
+ }
+
+ ~SuperLU()
+ {
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+ m_info = InvalidInput;
+ m_isInitialized = false;
+ Base::analyzePattern(matrix);
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& matrix);
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
+ #endif // EIGEN_PARSED_BY_DOXYGEN
+
+ inline const LMatrixType& matrixL() const
+ {
+ if (m_extractedDataAreDirty) this->extractData();
+ return m_l;
+ }
+
+ inline const UMatrixType& matrixU() const
+ {
+ if (m_extractedDataAreDirty) this->extractData();
+ return m_u;
+ }
+
+ inline const IntColVectorType& permutationP() const
+ {
+ if (m_extractedDataAreDirty) this->extractData();
+ return m_p;
+ }
+
+ inline const IntRowVectorType& permutationQ() const
+ {
+ if (m_extractedDataAreDirty) this->extractData();
+ return m_q;
+ }
+
+ Scalar determinant() const;
+
+ protected:
+
+ using Base::m_matrix;
+ using Base::m_sluOptions;
+ using Base::m_sluA;
+ using Base::m_sluB;
+ using Base::m_sluX;
+ using Base::m_p;
+ using Base::m_q;
+ using Base::m_sluEtree;
+ using Base::m_sluEqued;
+ using Base::m_sluRscale;
+ using Base::m_sluCscale;
+ using Base::m_sluL;
+ using Base::m_sluU;
+ using Base::m_sluStat;
+ using Base::m_sluFerr;
+ using Base::m_sluBerr;
+ using Base::m_l;
+ using Base::m_u;
+
+ using Base::m_analysisIsOk;
+ using Base::m_factorizationIsOk;
+ using Base::m_extractedDataAreDirty;
+ using Base::m_isInitialized;
+ using Base::m_info;
+
+ void init()
+ {
+ Base::init();
+
+ set_default_options(&this->m_sluOptions);
+ m_sluOptions.PrintStat = NO;
+ m_sluOptions.ConditionNumber = NO;
+ m_sluOptions.Trans = NOTRANS;
+ m_sluOptions.ColPerm = COLAMD;
+ }
+
+
+ private:
+ SuperLU(SuperLU& ) { }
+};
+
+template<typename MatrixType>
+void SuperLU<MatrixType>::factorize(const MatrixType& a)
+{
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ if(!m_analysisIsOk)
+ {
+ m_info = InvalidInput;
+ return;
+ }
+
+ this->initFactorization(a);
+
+ int info = 0;
+ RealScalar recip_pivot_growth, rcond;
+ RealScalar ferr, berr;
+
+ StatInit(&m_sluStat);
+ SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+ &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_growth, &rcond,
+ &ferr, &berr,
+ &m_sluStat, &info, Scalar());
+ StatFree(&m_sluStat);
+
+ m_extractedDataAreDirty = true;
+
+ // FIXME how to better check for errors ???
+ m_info = info == 0 ? Success : NumericalIssue;
+ m_factorizationIsOk = true;
+}
+
+template<typename MatrixType>
+template<typename Rhs,typename Dest>
+void SuperLU<MatrixType>::_solve(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
+{
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
+
+ const int size = m_matrix.rows();
+ const int rhsCols = b.cols();
+ eigen_assert(size==b.rows());
+
+ m_sluOptions.Trans = NOTRANS;
+ m_sluOptions.Fact = FACTORED;
+ m_sluOptions.IterRefine = NOREFINE;
+
+
+ m_sluFerr.resize(rhsCols);
+ m_sluBerr.resize(rhsCols);
+ m_sluB = SluMatrix::Map(b.const_cast_derived());
+ m_sluX = SluMatrix::Map(x.derived());
+
+ typename Rhs::PlainObject b_cpy;
+ if(m_sluEqued!='N')
+ {
+ b_cpy = b;
+ m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());
+ }
+
+ StatInit(&m_sluStat);
+ int info = 0;
+ RealScalar recip_pivot_growth, rcond;
+ SuperLU_gssvx(&m_sluOptions, &m_sluA,
+ m_q.data(), m_p.data(),
+ &m_sluEtree[0], &m_sluEqued,
+ &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_growth, &rcond,
+ &m_sluFerr[0], &m_sluBerr[0],
+ &m_sluStat, &info, Scalar());
+ StatFree(&m_sluStat);
+ m_info = info==0 ? Success : NumericalIssue;
+}
+
+// the code of this extractData() function has been adapted from the SuperLU's Matlab support code,
+//
+// Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+//
+// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+//
+template<typename MatrixType, typename Derived>
+void SuperLUBase<MatrixType,Derived>::extractData() const
+{
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for extracting factors, you must first call either compute() or analyzePattern()/factorize()");
+ if (m_extractedDataAreDirty)
+ {
+ int upper;
+ int fsupc, istart, nsupr;
+ int lastl = 0, lastu = 0;
+ SCformat *Lstore = static_cast<SCformat*>(m_sluL.Store);
+ NCformat *Ustore = static_cast<NCformat*>(m_sluU.Store);
+ Scalar *SNptr;
+
+ const int size = m_matrix.rows();
+ m_l.resize(size,size);
+ m_l.resizeNonZeros(Lstore->nnz);
+ m_u.resize(size,size);
+ m_u.resizeNonZeros(Ustore->nnz);
+
+ int* Lcol = m_l.outerIndexPtr();
+ int* Lrow = m_l.innerIndexPtr();
+ Scalar* Lval = m_l.valuePtr();
+
+ int* Ucol = m_u.outerIndexPtr();
+ int* Urow = m_u.innerIndexPtr();
+ Scalar* Uval = m_u.valuePtr();
+
+ Ucol[0] = 0;
+ Ucol[0] = 0;
+
+ /* for each supernode */
+ for (int k = 0; k <= Lstore->nsuper; ++k)
+ {
+ fsupc = L_FST_SUPC(k);
+ istart = L_SUB_START(fsupc);
+ nsupr = L_SUB_START(fsupc+1) - istart;
+ upper = 1;
+
+ /* for each column in the supernode */
+ for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)
+ {
+ SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)];
+
+ /* Extract U */
+ for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i)
+ {
+ Uval[lastu] = ((Scalar*)Ustore->nzval)[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Uval[lastu] != 0.0)
+ Urow[lastu++] = U_SUB(i);
+ }
+ for (int i = 0; i < upper; ++i)
+ {
+ /* upper triangle in the supernode */
+ Uval[lastu] = SNptr[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Uval[lastu] != 0.0)
+ Urow[lastu++] = L_SUB(istart+i);
+ }
+ Ucol[j+1] = lastu;
+
+ /* Extract L */
+ Lval[lastl] = 1.0; /* unit diagonal */
+ Lrow[lastl++] = L_SUB(istart + upper - 1);
+ for (int i = upper; i < nsupr; ++i)
+ {
+ Lval[lastl] = SNptr[i];
+ /* Matlab doesn't like explicit zero. */
+ if (Lval[lastl] != 0.0)
+ Lrow[lastl++] = L_SUB(istart+i);
+ }
+ Lcol[j+1] = lastl;
+
+ ++upper;
+ } /* for j ... */
+
+ } /* for k ... */
+
+ // squeeze the matrices :
+ m_l.resizeNonZeros(lastl);
+ m_u.resizeNonZeros(lastu);
+
+ m_extractedDataAreDirty = false;
+ }
+}
+
+template<typename MatrixType>
+typename SuperLU<MatrixType>::Scalar SuperLU<MatrixType>::determinant() const
+{
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for computing the determinant, you must first call either compute() or analyzePattern()/factorize()");
+
+ if (m_extractedDataAreDirty)
+ this->extractData();
+
+ Scalar det = Scalar(1);
+ for (int j=0; j<m_u.cols(); ++j)
+ {
+ if (m_u.outerIndexPtr()[j+1]-m_u.outerIndexPtr()[j] > 0)
+ {
+ int lastId = m_u.outerIndexPtr()[j+1]-1;
+ eigen_assert(m_u.innerIndexPtr()[lastId]<=j);
+ if (m_u.innerIndexPtr()[lastId]==j)
+ det *= m_u.valuePtr()[lastId];
+ }
+ }
+ if(m_sluEqued!='N')
+ return det/m_sluRscale.prod()/m_sluCscale.prod();
+ else
+ return det;
+}
+
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+#define EIGEN_SUPERLU_HAS_ILU
+#endif
+
+#ifdef EIGEN_SUPERLU_HAS_ILU
+
+/** \ingroup SuperLUSupport_Module
+ * \class SuperILU
+ * \brief A sparse direct \b incomplete LU factorization and solver based on the SuperLU library
+ *
+ * This class allows to solve for an approximate solution of A.X = B sparse linear problems via an incomplete LU factorization
+ * using the SuperLU library. This class is aimed to be used as a preconditioner of the iterative linear solvers.
+ *
+ * \warning This class requires SuperLU 4 or later.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ *
+ * \sa \ref TutorialSparseDirectSolvers, class ConjugateGradient, class BiCGSTAB
+ */
+
+template<typename _MatrixType>
+class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> >
+{
+ public:
+ typedef SuperLUBase<_MatrixType,SuperILU> Base;
+ typedef _MatrixType MatrixType;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::RealScalar RealScalar;
+ typedef typename Base::Index Index;
+
+ public:
+
+ SuperILU() : Base() { init(); }
+
+ SuperILU(const MatrixType& matrix) : Base()
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~SuperILU()
+ {
+ }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+ Base::analyzePattern(matrix);
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ *
+ * \sa analyzePattern()
+ */
+ void factorize(const MatrixType& matrix);
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
+ #endif // EIGEN_PARSED_BY_DOXYGEN
+
+ protected:
+
+ using Base::m_matrix;
+ using Base::m_sluOptions;
+ using Base::m_sluA;
+ using Base::m_sluB;
+ using Base::m_sluX;
+ using Base::m_p;
+ using Base::m_q;
+ using Base::m_sluEtree;
+ using Base::m_sluEqued;
+ using Base::m_sluRscale;
+ using Base::m_sluCscale;
+ using Base::m_sluL;
+ using Base::m_sluU;
+ using Base::m_sluStat;
+ using Base::m_sluFerr;
+ using Base::m_sluBerr;
+ using Base::m_l;
+ using Base::m_u;
+
+ using Base::m_analysisIsOk;
+ using Base::m_factorizationIsOk;
+ using Base::m_extractedDataAreDirty;
+ using Base::m_isInitialized;
+ using Base::m_info;
+
+ void init()
+ {
+ Base::init();
+
+ ilu_set_default_options(&m_sluOptions);
+ m_sluOptions.PrintStat = NO;
+ m_sluOptions.ConditionNumber = NO;
+ m_sluOptions.Trans = NOTRANS;
+ m_sluOptions.ColPerm = MMD_AT_PLUS_A;
+
+ // no attempt to preserve column sum
+ m_sluOptions.ILU_MILU = SILU;
+ // only basic ILU(k) support -- no direct control over memory consumption
+ // better to use ILU_DropRule = DROP_BASIC | DROP_AREA
+ // and set ILU_FillFactor to max memory growth
+ m_sluOptions.ILU_DropRule = DROP_BASIC;
+ m_sluOptions.ILU_DropTol = NumTraits<Scalar>::dummy_precision()*10;
+ }
+
+ private:
+ SuperILU(SuperILU& ) { }
+};
+
+template<typename MatrixType>
+void SuperILU<MatrixType>::factorize(const MatrixType& a)
+{
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ if(!m_analysisIsOk)
+ {
+ m_info = InvalidInput;
+ return;
+ }
+
+ this->initFactorization(a);
+
+ int info = 0;
+ RealScalar recip_pivot_growth, rcond;
+
+ StatInit(&m_sluStat);
+ SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+ &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_growth, &rcond,
+ &m_sluStat, &info, Scalar());
+ StatFree(&m_sluStat);
+
+ // FIXME how to better check for errors ???
+ m_info = info == 0 ? Success : NumericalIssue;
+ m_factorizationIsOk = true;
+}
+
+template<typename MatrixType>
+template<typename Rhs,typename Dest>
+void SuperILU<MatrixType>::_solve(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
+{
+ eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
+
+ const int size = m_matrix.rows();
+ const int rhsCols = b.cols();
+ eigen_assert(size==b.rows());
+
+ m_sluOptions.Trans = NOTRANS;
+ m_sluOptions.Fact = FACTORED;
+ m_sluOptions.IterRefine = NOREFINE;
+
+ m_sluFerr.resize(rhsCols);
+ m_sluBerr.resize(rhsCols);
+ m_sluB = SluMatrix::Map(b.const_cast_derived());
+ m_sluX = SluMatrix::Map(x.derived());
+
+ typename Rhs::PlainObject b_cpy;
+ if(m_sluEqued!='N')
+ {
+ b_cpy = b;
+ m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());
+ }
+
+ int info = 0;
+ RealScalar recip_pivot_growth, rcond;
+
+ StatInit(&m_sluStat);
+ SuperLU_gsisx(&m_sluOptions, &m_sluA,
+ m_q.data(), m_p.data(),
+ &m_sluEtree[0], &m_sluEqued,
+ &m_sluRscale[0], &m_sluCscale[0],
+ &m_sluL, &m_sluU,
+ NULL, 0,
+ &m_sluB, &m_sluX,
+ &recip_pivot_growth, &rcond,
+ &m_sluStat, &info, Scalar());
+ StatFree(&m_sluStat);
+
+ m_info = info==0 ? Success : NumericalIssue;
+}
+#endif
+
+namespace internal {
+
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
+ : solve_retval_base<SuperLUBase<_MatrixType,Derived>, Rhs>
+{
+ typedef SuperLUBase<_MatrixType,Derived> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve(rhs(),dst);
+ }
+};
+
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct sparse_solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
+ : sparse_solve_retval_base<SuperLUBase<_MatrixType,Derived>, Rhs>
+{
+ typedef SuperLUBase<_MatrixType,Derived> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec().derived()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SUPERLUSUPPORT_H
diff --git a/extern/Eigen3/Eigen/src/UmfPackSupport/CMakeLists.txt b/extern/Eigen3/Eigen/src/UmfPackSupport/CMakeLists.txt
new file mode 100644
index 00000000000..a57de00209d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/UmfPackSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_UmfPackSupport_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_UmfPackSupport_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/UmfPackSupport COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h b/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
new file mode 100644
index 00000000000..f98a4c8c0e3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
@@ -0,0 +1,446 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_UMFPACKSUPPORT_H
+#define EIGEN_UMFPACKSUPPORT_H
+
+namespace Eigen {
+
+/* TODO extract L, extract U, compute det, etc... */
+
+// generic double/complex<double> wrapper functions:
+
+inline void umfpack_free_numeric(void **Numeric, double)
+{ umfpack_di_free_numeric(Numeric); *Numeric = 0; }
+
+inline void umfpack_free_numeric(void **Numeric, std::complex<double>)
+{ umfpack_zi_free_numeric(Numeric); *Numeric = 0; }
+
+inline void umfpack_free_symbolic(void **Symbolic, double)
+{ umfpack_di_free_symbolic(Symbolic); *Symbolic = 0; }
+
+inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>)
+{ umfpack_zi_free_symbolic(Symbolic); *Symbolic = 0; }
+
+inline int umfpack_symbolic(int n_row,int n_col,
+ const int Ap[], const int Ai[], const double Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info);
+}
+
+inline int umfpack_symbolic(int n_row,int n_col,
+ const int Ap[], const int Ai[], const std::complex<double> Ax[], void **Symbolic,
+ const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+ return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Control,Info);
+}
+
+inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info);
+}
+
+inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<double> Ax[],
+ void *Symbolic, void **Numeric,
+ const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+ return umfpack_zi_numeric(Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
+}
+
+inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
+ double X[], const double B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info);
+}
+
+inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex<double> Ax[],
+ std::complex<double> X[], const std::complex<double> B[], void *Numeric,
+ const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+ return umfpack_zi_solve(sys,Ap,Ai,&internal::real_ref(Ax[0]),0,&internal::real_ref(X[0]),0,&internal::real_ref(B[0]),0,Numeric,Control,Info);
+}
+
+inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
+{
+ return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex<double>)
+{
+ return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[],
+ int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric)
+{
+ return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric);
+}
+
+inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], int Up[], int Ui[], std::complex<double> Ux[],
+ int P[], int Q[], std::complex<double> Dx[], int *do_recip, double Rs[], void *Numeric)
+{
+ double& lx0_real = internal::real_ref(Lx[0]);
+ double& ux0_real = internal::real_ref(Ux[0]);
+ double& dx0_real = internal::real_ref(Dx[0]);
+ return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,
+ Dx?&dx0_real:0,0,do_recip,Rs,Numeric);
+}
+
+inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+{
+ return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info);
+}
+
+inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+{
+ double& mx_real = internal::real_ref(*Mx);
+ return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
+}
+
+/** \ingroup UmfPackSupport_Module
+ * \brief A sparse LU factorization and solver based on UmfPack
+ *
+ * This class allows to solve for A.X = B sparse linear problems via a LU factorization
+ * using the UmfPack library. The sparse matrix A must be squared and full rank.
+ * The vectors or matrices X and B can be either dense or sparse.
+ *
+ * \WARNING The input matrix A should be in a \b compressed and \b column-major form.
+ * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
+ * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ */
+template<typename _MatrixType>
+class UmfPackLU : internal::noncopyable
+{
+ public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef SparseMatrix<Scalar> LUMatrixType;
+ typedef SparseMatrix<Scalar,ColMajor,int> UmfpackMatrixType;
+
+ public:
+
+ UmfPackLU() { init(); }
+
+ UmfPackLU(const MatrixType& matrix)
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~UmfPackLU()
+ {
+ if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
+ if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
+ }
+
+ inline Index rows() const { return m_copyMatrix.rows(); }
+ inline Index cols() const { return m_copyMatrix.cols(); }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ inline const LUMatrixType& matrixL() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_l;
+ }
+
+ inline const LUMatrixType& matrixU() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_u;
+ }
+
+ inline const IntColVectorType& permutationP() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_p;
+ }
+
+ inline const IntRowVectorType& permutationQ() const
+ {
+ if (m_extractedDataAreDirty) extractData();
+ return m_q;
+ }
+
+ /** Computes the sparse Cholesky decomposition of \a matrix
+ * Note that the matrix should be column-major, and in compressed format for best performance.
+ * \sa SparseMatrix::makeCompressed().
+ */
+ void compute(const MatrixType& matrix)
+ {
+ analyzePattern(matrix);
+ factorize(matrix);
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<UmfPackLU, Rhs> solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "UmfPackLU is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "UmfPackLU::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<UmfPackLU, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+// template<typename Rhs>
+// inline const internal::sparse_solve_retval<UmfPAckLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+// {
+// eigen_assert(m_isInitialized && "UmfPAckLU is not initialized.");
+// eigen_assert(rows()==b.rows()
+// && "UmfPAckLU::solve(): invalid number of rows of the right hand side matrix b");
+// return internal::sparse_solve_retval<UmfPAckLU, Rhs>(*this, b.derived());
+// }
+
+ /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ *
+ * This function is particularly useful when solving for several problems having the same structure.
+ *
+ * \sa factorize(), compute()
+ */
+ void analyzePattern(const MatrixType& matrix)
+ {
+ if(m_symbolic)
+ umfpack_free_symbolic(&m_symbolic,Scalar());
+ if(m_numeric)
+ umfpack_free_numeric(&m_numeric,Scalar());
+
+ grapInput(matrix);
+
+ int errorCode = 0;
+ errorCode = umfpack_symbolic(matrix.rows(), matrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
+ &m_symbolic, 0, 0);
+
+ m_isInitialized = true;
+ m_info = errorCode ? InvalidInput : Success;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+ }
+
+ /** Performs a numeric decomposition of \a matrix
+ *
+ * The given matrix must has the same sparcity than the matrix on which the pattern anylysis has been performed.
+ *
+ * \sa analyzePattern(), compute()
+ */
+ void factorize(const MatrixType& matrix)
+ {
+ eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
+ if(m_numeric)
+ umfpack_free_numeric(&m_numeric,Scalar());
+
+ grapInput(matrix);
+
+ int errorCode;
+ errorCode = umfpack_numeric(m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
+ m_symbolic, &m_numeric, 0, 0);
+
+ m_info = errorCode ? NumericalIssue : Success;
+ m_factorizationIsOk = true;
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal */
+ template<typename BDerived,typename XDerived>
+ bool _solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const;
+ #endif
+
+ Scalar determinant() const;
+
+ void extractData() const;
+
+ protected:
+
+
+ void init()
+ {
+ m_info = InvalidInput;
+ m_isInitialized = false;
+ m_numeric = 0;
+ m_symbolic = 0;
+ m_outerIndexPtr = 0;
+ m_innerIndexPtr = 0;
+ m_valuePtr = 0;
+ }
+
+ void grapInput(const MatrixType& mat)
+ {
+ m_copyMatrix.resize(mat.rows(), mat.cols());
+ if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::Index)!=sizeof(int) || !mat.isCompressed() )
+ {
+ // non supported input -> copy
+ m_copyMatrix = mat;
+ m_outerIndexPtr = m_copyMatrix.outerIndexPtr();
+ m_innerIndexPtr = m_copyMatrix.innerIndexPtr();
+ m_valuePtr = m_copyMatrix.valuePtr();
+ }
+ else
+ {
+ m_outerIndexPtr = mat.outerIndexPtr();
+ m_innerIndexPtr = mat.innerIndexPtr();
+ m_valuePtr = mat.valuePtr();
+ }
+ }
+
+ // cached data to reduce reallocation, etc.
+ mutable LUMatrixType m_l;
+ mutable LUMatrixType m_u;
+ mutable IntColVectorType m_p;
+ mutable IntRowVectorType m_q;
+
+ UmfpackMatrixType m_copyMatrix;
+ const Scalar* m_valuePtr;
+ const int* m_outerIndexPtr;
+ const int* m_innerIndexPtr;
+ void* m_numeric;
+ void* m_symbolic;
+
+ mutable ComputationInfo m_info;
+ bool m_isInitialized;
+ int m_factorizationIsOk;
+ int m_analysisIsOk;
+ mutable bool m_extractedDataAreDirty;
+
+ private:
+ UmfPackLU(UmfPackLU& ) { }
+};
+
+
+template<typename MatrixType>
+void UmfPackLU<MatrixType>::extractData() const
+{
+ if (m_extractedDataAreDirty)
+ {
+ // get size of the data
+ int lnz, unz, rows, cols, nz_udiag;
+ umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
+
+ // allocate data
+ m_l.resize(rows,(std::min)(rows,cols));
+ m_l.resizeNonZeros(lnz);
+
+ m_u.resize((std::min)(rows,cols),cols);
+ m_u.resizeNonZeros(unz);
+
+ m_p.resize(rows);
+ m_q.resize(cols);
+
+ // extract
+ umfpack_get_numeric(m_l.outerIndexPtr(), m_l.innerIndexPtr(), m_l.valuePtr(),
+ m_u.outerIndexPtr(), m_u.innerIndexPtr(), m_u.valuePtr(),
+ m_p.data(), m_q.data(), 0, 0, 0, m_numeric);
+
+ m_extractedDataAreDirty = false;
+ }
+}
+
+template<typename MatrixType>
+typename UmfPackLU<MatrixType>::Scalar UmfPackLU<MatrixType>::determinant() const
+{
+ Scalar det;
+ umfpack_get_determinant(&det, 0, m_numeric, 0);
+ return det;
+}
+
+template<typename MatrixType>
+template<typename BDerived,typename XDerived>
+bool UmfPackLU<MatrixType>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const
+{
+ const int rhsCols = b.cols();
+ eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet");
+ eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet");
+
+ int errorCode;
+ for (int j=0; j<rhsCols; ++j)
+ {
+ errorCode = umfpack_solve(UMFPACK_A,
+ m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
+ &x.col(j).coeffRef(0), &b.const_cast_derived().col(j).coeffRef(0), m_numeric, 0, 0);
+ if (errorCode!=0)
+ return false;
+ }
+
+ return true;
+}
+
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<UmfPackLU<_MatrixType>, Rhs>
+ : solve_retval_base<UmfPackLU<_MatrixType>, Rhs>
+{
+ typedef UmfPackLU<_MatrixType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+template<typename _MatrixType, typename Rhs>
+struct sparse_solve_retval<UmfPackLU<_MatrixType>, Rhs>
+ : sparse_solve_retval_base<UmfPackLU<_MatrixType>, Rhs>
+{
+ typedef UmfPackLU<_MatrixType> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_UMFPACKSUPPORT_H
diff --git a/extern/Eigen3/Eigen/src/misc/CMakeLists.txt b/extern/Eigen3/Eigen/src/misc/CMakeLists.txt
new file mode 100644
index 00000000000..a58ffb7457e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/misc/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_misc_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_misc_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/misc COMPONENT Devel
+ )
diff --git a/extern/Eigen3/Eigen/src/misc/SparseSolve.h b/extern/Eigen3/Eigen/src/misc/SparseSolve.h
new file mode 100644
index 00000000000..aca34b2d1c4
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/misc/SparseSolve.h
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_SOLVE_H
+#define EIGEN_SPARSE_SOLVE_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval_base;
+template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval;
+
+template<typename DecompositionType, typename Rhs>
+struct traits<sparse_solve_retval_base<DecompositionType, Rhs> >
+{
+ typedef typename DecompositionType::MatrixType MatrixType;
+ typedef SparseMatrix<typename Rhs::Scalar, Rhs::Options, typename Rhs::Index> ReturnType;
+};
+
+template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval_base
+ : public ReturnByValue<sparse_solve_retval_base<_DecompositionType, Rhs> >
+{
+ typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned;
+ typedef _DecompositionType DecompositionType;
+ typedef ReturnByValue<sparse_solve_retval_base> Base;
+ typedef typename Base::Index Index;
+
+ sparse_solve_retval_base(const DecompositionType& dec, const Rhs& rhs)
+ : m_dec(dec), m_rhs(rhs)
+ {}
+
+ inline Index rows() const { return m_dec.cols(); }
+ inline Index cols() const { return m_rhs.cols(); }
+ inline const DecompositionType& dec() const { return m_dec; }
+ inline const RhsNestedCleaned& rhs() const { return m_rhs; }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ static_cast<const sparse_solve_retval<DecompositionType,Rhs>*>(this)->evalTo(dst);
+ }
+
+ protected:
+ const DecompositionType& m_dec;
+ typename Rhs::Nested m_rhs;
+};
+
+#define EIGEN_MAKE_SPARSE_SOLVE_HELPERS(DecompositionType,Rhs) \
+ typedef typename DecompositionType::MatrixType MatrixType; \
+ typedef typename MatrixType::Scalar Scalar; \
+ typedef typename MatrixType::RealScalar RealScalar; \
+ typedef typename MatrixType::Index Index; \
+ typedef Eigen::internal::sparse_solve_retval_base<DecompositionType,Rhs> Base; \
+ using Base::dec; \
+ using Base::rhs; \
+ using Base::rows; \
+ using Base::cols; \
+ sparse_solve_retval(const DecompositionType& dec, const Rhs& rhs) \
+ : Base(dec, rhs) {}
+
+
+
+template<typename DecompositionType, typename Rhs, typename Guess> struct solve_retval_with_guess;
+
+template<typename DecompositionType, typename Rhs, typename Guess>
+struct traits<solve_retval_with_guess<DecompositionType, Rhs, Guess> >
+{
+ typedef typename DecompositionType::MatrixType MatrixType;
+ typedef Matrix<typename Rhs::Scalar,
+ MatrixType::ColsAtCompileTime,
+ Rhs::ColsAtCompileTime,
+ Rhs::PlainObject::Options,
+ MatrixType::MaxColsAtCompileTime,
+ Rhs::MaxColsAtCompileTime> ReturnType;
+};
+
+template<typename DecompositionType, typename Rhs, typename Guess> struct solve_retval_with_guess
+ : public ReturnByValue<solve_retval_with_guess<DecompositionType, Rhs, Guess> >
+{
+ typedef typename DecompositionType::Index Index;
+
+ solve_retval_with_guess(const DecompositionType& dec, const Rhs& rhs, const Guess& guess)
+ : m_dec(dec), m_rhs(rhs), m_guess(guess)
+ {}
+
+ inline Index rows() const { return m_dec.cols(); }
+ inline Index cols() const { return m_rhs.cols(); }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ dst = m_guess;
+ m_dec._solveWithGuess(m_rhs,dst);
+ }
+
+ protected:
+ const DecompositionType& m_dec;
+ const typename Rhs::Nested m_rhs;
+ const typename Guess::Nested m_guess;
+};
+
+} // namepsace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_SOLVE_H
diff --git a/extern/Eigen3/Eigen/src/misc/blas.h b/extern/Eigen3/Eigen/src/misc/blas.h
new file mode 100644
index 00000000000..6fce99ed5c4
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/misc/blas.h
@@ -0,0 +1,658 @@
+#ifndef BLAS_H
+#define BLAS_H
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define BLASFUNC(FUNC) FUNC##_
+
+#ifdef __WIN64__
+typedef long long BLASLONG;
+typedef unsigned long long BLASULONG;
+#else
+typedef long BLASLONG;
+typedef unsigned long BLASULONG;
+#endif
+
+int BLASFUNC(xerbla)(const char *, int *info, int);
+
+float BLASFUNC(sdot) (int *, float *, int *, float *, int *);
+float BLASFUNC(sdsdot)(int *, float *, float *, int *, float *, int *);
+
+double BLASFUNC(dsdot) (int *, float *, int *, float *, int *);
+double BLASFUNC(ddot) (int *, double *, int *, double *, int *);
+double BLASFUNC(qdot) (int *, double *, int *, double *, int *);
+
+int BLASFUNC(cdotuw) (int *, float *, int *, float *, int *, float*);
+int BLASFUNC(cdotcw) (int *, float *, int *, float *, int *, float*);
+int BLASFUNC(zdotuw) (int *, double *, int *, double *, int *, double*);
+int BLASFUNC(zdotcw) (int *, double *, int *, double *, int *, double*);
+
+int BLASFUNC(saxpy) (int *, float *, float *, int *, float *, int *);
+int BLASFUNC(daxpy) (int *, double *, double *, int *, double *, int *);
+int BLASFUNC(qaxpy) (int *, double *, double *, int *, double *, int *);
+int BLASFUNC(caxpy) (int *, float *, float *, int *, float *, int *);
+int BLASFUNC(zaxpy) (int *, double *, double *, int *, double *, int *);
+int BLASFUNC(xaxpy) (int *, double *, double *, int *, double *, int *);
+int BLASFUNC(caxpyc)(int *, float *, float *, int *, float *, int *);
+int BLASFUNC(zaxpyc)(int *, double *, double *, int *, double *, int *);
+int BLASFUNC(xaxpyc)(int *, double *, double *, int *, double *, int *);
+
+int BLASFUNC(scopy) (int *, float *, int *, float *, int *);
+int BLASFUNC(dcopy) (int *, double *, int *, double *, int *);
+int BLASFUNC(qcopy) (int *, double *, int *, double *, int *);
+int BLASFUNC(ccopy) (int *, float *, int *, float *, int *);
+int BLASFUNC(zcopy) (int *, double *, int *, double *, int *);
+int BLASFUNC(xcopy) (int *, double *, int *, double *, int *);
+
+int BLASFUNC(sswap) (int *, float *, int *, float *, int *);
+int BLASFUNC(dswap) (int *, double *, int *, double *, int *);
+int BLASFUNC(qswap) (int *, double *, int *, double *, int *);
+int BLASFUNC(cswap) (int *, float *, int *, float *, int *);
+int BLASFUNC(zswap) (int *, double *, int *, double *, int *);
+int BLASFUNC(xswap) (int *, double *, int *, double *, int *);
+
+float BLASFUNC(sasum) (int *, float *, int *);
+float BLASFUNC(scasum)(int *, float *, int *);
+double BLASFUNC(dasum) (int *, double *, int *);
+double BLASFUNC(qasum) (int *, double *, int *);
+double BLASFUNC(dzasum)(int *, double *, int *);
+double BLASFUNC(qxasum)(int *, double *, int *);
+
+int BLASFUNC(isamax)(int *, float *, int *);
+int BLASFUNC(idamax)(int *, double *, int *);
+int BLASFUNC(iqamax)(int *, double *, int *);
+int BLASFUNC(icamax)(int *, float *, int *);
+int BLASFUNC(izamax)(int *, double *, int *);
+int BLASFUNC(ixamax)(int *, double *, int *);
+
+int BLASFUNC(ismax) (int *, float *, int *);
+int BLASFUNC(idmax) (int *, double *, int *);
+int BLASFUNC(iqmax) (int *, double *, int *);
+int BLASFUNC(icmax) (int *, float *, int *);
+int BLASFUNC(izmax) (int *, double *, int *);
+int BLASFUNC(ixmax) (int *, double *, int *);
+
+int BLASFUNC(isamin)(int *, float *, int *);
+int BLASFUNC(idamin)(int *, double *, int *);
+int BLASFUNC(iqamin)(int *, double *, int *);
+int BLASFUNC(icamin)(int *, float *, int *);
+int BLASFUNC(izamin)(int *, double *, int *);
+int BLASFUNC(ixamin)(int *, double *, int *);
+
+int BLASFUNC(ismin)(int *, float *, int *);
+int BLASFUNC(idmin)(int *, double *, int *);
+int BLASFUNC(iqmin)(int *, double *, int *);
+int BLASFUNC(icmin)(int *, float *, int *);
+int BLASFUNC(izmin)(int *, double *, int *);
+int BLASFUNC(ixmin)(int *, double *, int *);
+
+float BLASFUNC(samax) (int *, float *, int *);
+double BLASFUNC(damax) (int *, double *, int *);
+double BLASFUNC(qamax) (int *, double *, int *);
+float BLASFUNC(scamax)(int *, float *, int *);
+double BLASFUNC(dzamax)(int *, double *, int *);
+double BLASFUNC(qxamax)(int *, double *, int *);
+
+float BLASFUNC(samin) (int *, float *, int *);
+double BLASFUNC(damin) (int *, double *, int *);
+double BLASFUNC(qamin) (int *, double *, int *);
+float BLASFUNC(scamin)(int *, float *, int *);
+double BLASFUNC(dzamin)(int *, double *, int *);
+double BLASFUNC(qxamin)(int *, double *, int *);
+
+float BLASFUNC(smax) (int *, float *, int *);
+double BLASFUNC(dmax) (int *, double *, int *);
+double BLASFUNC(qmax) (int *, double *, int *);
+float BLASFUNC(scmax) (int *, float *, int *);
+double BLASFUNC(dzmax) (int *, double *, int *);
+double BLASFUNC(qxmax) (int *, double *, int *);
+
+float BLASFUNC(smin) (int *, float *, int *);
+double BLASFUNC(dmin) (int *, double *, int *);
+double BLASFUNC(qmin) (int *, double *, int *);
+float BLASFUNC(scmin) (int *, float *, int *);
+double BLASFUNC(dzmin) (int *, double *, int *);
+double BLASFUNC(qxmin) (int *, double *, int *);
+
+int BLASFUNC(sscal) (int *, float *, float *, int *);
+int BLASFUNC(dscal) (int *, double *, double *, int *);
+int BLASFUNC(qscal) (int *, double *, double *, int *);
+int BLASFUNC(cscal) (int *, float *, float *, int *);
+int BLASFUNC(zscal) (int *, double *, double *, int *);
+int BLASFUNC(xscal) (int *, double *, double *, int *);
+int BLASFUNC(csscal)(int *, float *, float *, int *);
+int BLASFUNC(zdscal)(int *, double *, double *, int *);
+int BLASFUNC(xqscal)(int *, double *, double *, int *);
+
+float BLASFUNC(snrm2) (int *, float *, int *);
+float BLASFUNC(scnrm2)(int *, float *, int *);
+
+double BLASFUNC(dnrm2) (int *, double *, int *);
+double BLASFUNC(qnrm2) (int *, double *, int *);
+double BLASFUNC(dznrm2)(int *, double *, int *);
+double BLASFUNC(qxnrm2)(int *, double *, int *);
+
+int BLASFUNC(srot) (int *, float *, int *, float *, int *, float *, float *);
+int BLASFUNC(drot) (int *, double *, int *, double *, int *, double *, double *);
+int BLASFUNC(qrot) (int *, double *, int *, double *, int *, double *, double *);
+int BLASFUNC(csrot) (int *, float *, int *, float *, int *, float *, float *);
+int BLASFUNC(zdrot) (int *, double *, int *, double *, int *, double *, double *);
+int BLASFUNC(xqrot) (int *, double *, int *, double *, int *, double *, double *);
+
+int BLASFUNC(srotg) (float *, float *, float *, float *);
+int BLASFUNC(drotg) (double *, double *, double *, double *);
+int BLASFUNC(qrotg) (double *, double *, double *, double *);
+int BLASFUNC(crotg) (float *, float *, float *, float *);
+int BLASFUNC(zrotg) (double *, double *, double *, double *);
+int BLASFUNC(xrotg) (double *, double *, double *, double *);
+
+int BLASFUNC(srotmg)(float *, float *, float *, float *, float *);
+int BLASFUNC(drotmg)(double *, double *, double *, double *, double *);
+
+int BLASFUNC(srotm) (int *, float *, int *, float *, int *, float *);
+int BLASFUNC(drotm) (int *, double *, int *, double *, int *, double *);
+int BLASFUNC(qrotm) (int *, double *, int *, double *, int *, double *);
+
+/* Level 2 routines */
+
+int BLASFUNC(sger)(int *, int *, float *, float *, int *,
+ float *, int *, float *, int *);
+int BLASFUNC(dger)(int *, int *, double *, double *, int *,
+ double *, int *, double *, int *);
+int BLASFUNC(qger)(int *, int *, double *, double *, int *,
+ double *, int *, double *, int *);
+int BLASFUNC(cgeru)(int *, int *, float *, float *, int *,
+ float *, int *, float *, int *);
+int BLASFUNC(cgerc)(int *, int *, float *, float *, int *,
+ float *, int *, float *, int *);
+int BLASFUNC(zgeru)(int *, int *, double *, double *, int *,
+ double *, int *, double *, int *);
+int BLASFUNC(zgerc)(int *, int *, double *, double *, int *,
+ double *, int *, double *, int *);
+int BLASFUNC(xgeru)(int *, int *, double *, double *, int *,
+ double *, int *, double *, int *);
+int BLASFUNC(xgerc)(int *, int *, double *, double *, int *,
+ double *, int *, double *, int *);
+
+int BLASFUNC(sgemv)(char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(dgemv)(char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(qgemv)(char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(cgemv)(char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zgemv)(char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xgemv)(char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(strsv) (char *, char *, char *, int *, float *, int *,
+ float *, int *);
+int BLASFUNC(dtrsv) (char *, char *, char *, int *, double *, int *,
+ double *, int *);
+int BLASFUNC(qtrsv) (char *, char *, char *, int *, double *, int *,
+ double *, int *);
+int BLASFUNC(ctrsv) (char *, char *, char *, int *, float *, int *,
+ float *, int *);
+int BLASFUNC(ztrsv) (char *, char *, char *, int *, double *, int *,
+ double *, int *);
+int BLASFUNC(xtrsv) (char *, char *, char *, int *, double *, int *,
+ double *, int *);
+
+int BLASFUNC(stpsv) (char *, char *, char *, int *, float *, float *, int *);
+int BLASFUNC(dtpsv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(qtpsv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(ctpsv) (char *, char *, char *, int *, float *, float *, int *);
+int BLASFUNC(ztpsv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(xtpsv) (char *, char *, char *, int *, double *, double *, int *);
+
+int BLASFUNC(strmv) (char *, char *, char *, int *, float *, int *,
+ float *, int *);
+int BLASFUNC(dtrmv) (char *, char *, char *, int *, double *, int *,
+ double *, int *);
+int BLASFUNC(qtrmv) (char *, char *, char *, int *, double *, int *,
+ double *, int *);
+int BLASFUNC(ctrmv) (char *, char *, char *, int *, float *, int *,
+ float *, int *);
+int BLASFUNC(ztrmv) (char *, char *, char *, int *, double *, int *,
+ double *, int *);
+int BLASFUNC(xtrmv) (char *, char *, char *, int *, double *, int *,
+ double *, int *);
+
+int BLASFUNC(stpmv) (char *, char *, char *, int *, float *, float *, int *);
+int BLASFUNC(dtpmv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(qtpmv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(ctpmv) (char *, char *, char *, int *, float *, float *, int *);
+int BLASFUNC(ztpmv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(xtpmv) (char *, char *, char *, int *, double *, double *, int *);
+
+int BLASFUNC(stbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *);
+int BLASFUNC(dtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(qtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(ctbmv) (char *, char *, char *, int *, int *, float *, int *, float *, int *);
+int BLASFUNC(ztbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(xtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+
+int BLASFUNC(stbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *);
+int BLASFUNC(dtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(qtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(ctbsv) (char *, char *, char *, int *, int *, float *, int *, float *, int *);
+int BLASFUNC(ztbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(xtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+
+int BLASFUNC(ssymv) (char *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(dsymv) (char *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(qsymv) (char *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(csymv) (char *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zsymv) (char *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xsymv) (char *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(sspmv) (char *, int *, float *, float *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(dspmv) (char *, int *, double *, double *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(qspmv) (char *, int *, double *, double *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(cspmv) (char *, int *, float *, float *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zspmv) (char *, int *, double *, double *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xspmv) (char *, int *, double *, double *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(ssyr) (char *, int *, float *, float *, int *,
+ float *, int *);
+int BLASFUNC(dsyr) (char *, int *, double *, double *, int *,
+ double *, int *);
+int BLASFUNC(qsyr) (char *, int *, double *, double *, int *,
+ double *, int *);
+int BLASFUNC(csyr) (char *, int *, float *, float *, int *,
+ float *, int *);
+int BLASFUNC(zsyr) (char *, int *, double *, double *, int *,
+ double *, int *);
+int BLASFUNC(xsyr) (char *, int *, double *, double *, int *,
+ double *, int *);
+
+int BLASFUNC(ssyr2) (char *, int *, float *,
+ float *, int *, float *, int *, float *, int *);
+int BLASFUNC(dsyr2) (char *, int *, double *,
+ double *, int *, double *, int *, double *, int *);
+int BLASFUNC(qsyr2) (char *, int *, double *,
+ double *, int *, double *, int *, double *, int *);
+int BLASFUNC(csyr2) (char *, int *, float *,
+ float *, int *, float *, int *, float *, int *);
+int BLASFUNC(zsyr2) (char *, int *, double *,
+ double *, int *, double *, int *, double *, int *);
+int BLASFUNC(xsyr2) (char *, int *, double *,
+ double *, int *, double *, int *, double *, int *);
+
+int BLASFUNC(sspr) (char *, int *, float *, float *, int *,
+ float *);
+int BLASFUNC(dspr) (char *, int *, double *, double *, int *,
+ double *);
+int BLASFUNC(qspr) (char *, int *, double *, double *, int *,
+ double *);
+int BLASFUNC(cspr) (char *, int *, float *, float *, int *,
+ float *);
+int BLASFUNC(zspr) (char *, int *, double *, double *, int *,
+ double *);
+int BLASFUNC(xspr) (char *, int *, double *, double *, int *,
+ double *);
+
+int BLASFUNC(sspr2) (char *, int *, float *,
+ float *, int *, float *, int *, float *);
+int BLASFUNC(dspr2) (char *, int *, double *,
+ double *, int *, double *, int *, double *);
+int BLASFUNC(qspr2) (char *, int *, double *,
+ double *, int *, double *, int *, double *);
+int BLASFUNC(cspr2) (char *, int *, float *,
+ float *, int *, float *, int *, float *);
+int BLASFUNC(zspr2) (char *, int *, double *,
+ double *, int *, double *, int *, double *);
+int BLASFUNC(xspr2) (char *, int *, double *,
+ double *, int *, double *, int *, double *);
+
+int BLASFUNC(cher) (char *, int *, float *, float *, int *,
+ float *, int *);
+int BLASFUNC(zher) (char *, int *, double *, double *, int *,
+ double *, int *);
+int BLASFUNC(xher) (char *, int *, double *, double *, int *,
+ double *, int *);
+
+int BLASFUNC(chpr) (char *, int *, float *, float *, int *, float *);
+int BLASFUNC(zhpr) (char *, int *, double *, double *, int *, double *);
+int BLASFUNC(xhpr) (char *, int *, double *, double *, int *, double *);
+
+int BLASFUNC(cher2) (char *, int *, float *,
+ float *, int *, float *, int *, float *, int *);
+int BLASFUNC(zher2) (char *, int *, double *,
+ double *, int *, double *, int *, double *, int *);
+int BLASFUNC(xher2) (char *, int *, double *,
+ double *, int *, double *, int *, double *, int *);
+
+int BLASFUNC(chpr2) (char *, int *, float *,
+ float *, int *, float *, int *, float *);
+int BLASFUNC(zhpr2) (char *, int *, double *,
+ double *, int *, double *, int *, double *);
+int BLASFUNC(xhpr2) (char *, int *, double *,
+ double *, int *, double *, int *, double *);
+
+int BLASFUNC(chemv) (char *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zhemv) (char *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xhemv) (char *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(chpmv) (char *, int *, float *, float *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zhpmv) (char *, int *, double *, double *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xhpmv) (char *, int *, double *, double *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(snorm)(char *, int *, int *, float *, int *);
+int BLASFUNC(dnorm)(char *, int *, int *, double *, int *);
+int BLASFUNC(cnorm)(char *, int *, int *, float *, int *);
+int BLASFUNC(znorm)(char *, int *, int *, double *, int *);
+
+int BLASFUNC(sgbmv)(char *, int *, int *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(dgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(qgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(cgbmv)(char *, int *, int *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(ssbmv)(char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(dsbmv)(char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(qsbmv)(char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(csbmv)(char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zsbmv)(char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xsbmv)(char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(chbmv)(char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zhbmv)(char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xhbmv)(char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+
+/* Level 3 routines */
+
+int BLASFUNC(sgemm)(char *, char *, int *, int *, int *, float *,
+ float *, int *, float *, int *, float *, float *, int *);
+int BLASFUNC(dgemm)(char *, char *, int *, int *, int *, double *,
+ double *, int *, double *, int *, double *, double *, int *);
+int BLASFUNC(qgemm)(char *, char *, int *, int *, int *, double *,
+ double *, int *, double *, int *, double *, double *, int *);
+int BLASFUNC(cgemm)(char *, char *, int *, int *, int *, float *,
+ float *, int *, float *, int *, float *, float *, int *);
+int BLASFUNC(zgemm)(char *, char *, int *, int *, int *, double *,
+ double *, int *, double *, int *, double *, double *, int *);
+int BLASFUNC(xgemm)(char *, char *, int *, int *, int *, double *,
+ double *, int *, double *, int *, double *, double *, int *);
+
+int BLASFUNC(cgemm3m)(char *, char *, int *, int *, int *, float *,
+ float *, int *, float *, int *, float *, float *, int *);
+int BLASFUNC(zgemm3m)(char *, char *, int *, int *, int *, double *,
+ double *, int *, double *, int *, double *, double *, int *);
+int BLASFUNC(xgemm3m)(char *, char *, int *, int *, int *, double *,
+ double *, int *, double *, int *, double *, double *, int *);
+
+int BLASFUNC(sge2mm)(char *, char *, char *, int *, int *,
+ float *, float *, int *, float *, int *,
+ float *, float *, int *);
+int BLASFUNC(dge2mm)(char *, char *, char *, int *, int *,
+ double *, double *, int *, double *, int *,
+ double *, double *, int *);
+int BLASFUNC(cge2mm)(char *, char *, char *, int *, int *,
+ float *, float *, int *, float *, int *,
+ float *, float *, int *);
+int BLASFUNC(zge2mm)(char *, char *, char *, int *, int *,
+ double *, double *, int *, double *, int *,
+ double *, double *, int *);
+
+int BLASFUNC(strsm)(char *, char *, char *, char *, int *, int *,
+ float *, float *, int *, float *, int *);
+int BLASFUNC(dtrsm)(char *, char *, char *, char *, int *, int *,
+ double *, double *, int *, double *, int *);
+int BLASFUNC(qtrsm)(char *, char *, char *, char *, int *, int *,
+ double *, double *, int *, double *, int *);
+int BLASFUNC(ctrsm)(char *, char *, char *, char *, int *, int *,
+ float *, float *, int *, float *, int *);
+int BLASFUNC(ztrsm)(char *, char *, char *, char *, int *, int *,
+ double *, double *, int *, double *, int *);
+int BLASFUNC(xtrsm)(char *, char *, char *, char *, int *, int *,
+ double *, double *, int *, double *, int *);
+
+int BLASFUNC(strmm)(char *, char *, char *, char *, int *, int *,
+ float *, float *, int *, float *, int *);
+int BLASFUNC(dtrmm)(char *, char *, char *, char *, int *, int *,
+ double *, double *, int *, double *, int *);
+int BLASFUNC(qtrmm)(char *, char *, char *, char *, int *, int *,
+ double *, double *, int *, double *, int *);
+int BLASFUNC(ctrmm)(char *, char *, char *, char *, int *, int *,
+ float *, float *, int *, float *, int *);
+int BLASFUNC(ztrmm)(char *, char *, char *, char *, int *, int *,
+ double *, double *, int *, double *, int *);
+int BLASFUNC(xtrmm)(char *, char *, char *, char *, int *, int *,
+ double *, double *, int *, double *, int *);
+
+int BLASFUNC(ssymm)(char *, char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(dsymm)(char *, char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(qsymm)(char *, char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(csymm)(char *, char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zsymm)(char *, char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xsymm)(char *, char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(csymm3m)(char *, char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zsymm3m)(char *, char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xsymm3m)(char *, char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(ssyrk)(char *, char *, int *, int *, float *, float *, int *,
+ float *, float *, int *);
+int BLASFUNC(dsyrk)(char *, char *, int *, int *, double *, double *, int *,
+ double *, double *, int *);
+int BLASFUNC(qsyrk)(char *, char *, int *, int *, double *, double *, int *,
+ double *, double *, int *);
+int BLASFUNC(csyrk)(char *, char *, int *, int *, float *, float *, int *,
+ float *, float *, int *);
+int BLASFUNC(zsyrk)(char *, char *, int *, int *, double *, double *, int *,
+ double *, double *, int *);
+int BLASFUNC(xsyrk)(char *, char *, int *, int *, double *, double *, int *,
+ double *, double *, int *);
+
+int BLASFUNC(ssyr2k)(char *, char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(dsyr2k)(char *, char *, int *, int *, double *, double *, int *,
+ double*, int *, double *, double *, int *);
+int BLASFUNC(qsyr2k)(char *, char *, int *, int *, double *, double *, int *,
+ double*, int *, double *, double *, int *);
+int BLASFUNC(csyr2k)(char *, char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zsyr2k)(char *, char *, int *, int *, double *, double *, int *,
+ double*, int *, double *, double *, int *);
+int BLASFUNC(xsyr2k)(char *, char *, int *, int *, double *, double *, int *,
+ double*, int *, double *, double *, int *);
+
+int BLASFUNC(chemm)(char *, char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zhemm)(char *, char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xhemm)(char *, char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(chemm3m)(char *, char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zhemm3m)(char *, char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+int BLASFUNC(xhemm3m)(char *, char *, int *, int *, double *, double *, int *,
+ double *, int *, double *, double *, int *);
+
+int BLASFUNC(cherk)(char *, char *, int *, int *, float *, float *, int *,
+ float *, float *, int *);
+int BLASFUNC(zherk)(char *, char *, int *, int *, double *, double *, int *,
+ double *, double *, int *);
+int BLASFUNC(xherk)(char *, char *, int *, int *, double *, double *, int *,
+ double *, double *, int *);
+
+int BLASFUNC(cher2k)(char *, char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zher2k)(char *, char *, int *, int *, double *, double *, int *,
+ double*, int *, double *, double *, int *);
+int BLASFUNC(xher2k)(char *, char *, int *, int *, double *, double *, int *,
+ double*, int *, double *, double *, int *);
+int BLASFUNC(cher2m)(char *, char *, char *, int *, int *, float *, float *, int *,
+ float *, int *, float *, float *, int *);
+int BLASFUNC(zher2m)(char *, char *, char *, int *, int *, double *, double *, int *,
+ double*, int *, double *, double *, int *);
+int BLASFUNC(xher2m)(char *, char *, char *, int *, int *, double *, double *, int *,
+ double*, int *, double *, double *, int *);
+
+int BLASFUNC(sgemt)(char *, int *, int *, float *, float *, int *,
+ float *, int *);
+int BLASFUNC(dgemt)(char *, int *, int *, double *, double *, int *,
+ double *, int *);
+int BLASFUNC(cgemt)(char *, int *, int *, float *, float *, int *,
+ float *, int *);
+int BLASFUNC(zgemt)(char *, int *, int *, double *, double *, int *,
+ double *, int *);
+
+int BLASFUNC(sgema)(char *, char *, int *, int *, float *,
+ float *, int *, float *, float *, int *, float *, int *);
+int BLASFUNC(dgema)(char *, char *, int *, int *, double *,
+ double *, int *, double*, double *, int *, double*, int *);
+int BLASFUNC(cgema)(char *, char *, int *, int *, float *,
+ float *, int *, float *, float *, int *, float *, int *);
+int BLASFUNC(zgema)(char *, char *, int *, int *, double *,
+ double *, int *, double*, double *, int *, double*, int *);
+
+int BLASFUNC(sgems)(char *, char *, int *, int *, float *,
+ float *, int *, float *, float *, int *, float *, int *);
+int BLASFUNC(dgems)(char *, char *, int *, int *, double *,
+ double *, int *, double*, double *, int *, double*, int *);
+int BLASFUNC(cgems)(char *, char *, int *, int *, float *,
+ float *, int *, float *, float *, int *, float *, int *);
+int BLASFUNC(zgems)(char *, char *, int *, int *, double *,
+ double *, int *, double*, double *, int *, double*, int *);
+
+int BLASFUNC(sgetf2)(int *, int *, float *, int *, int *, int *);
+int BLASFUNC(dgetf2)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(qgetf2)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(cgetf2)(int *, int *, float *, int *, int *, int *);
+int BLASFUNC(zgetf2)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(xgetf2)(int *, int *, double *, int *, int *, int *);
+
+int BLASFUNC(sgetrf)(int *, int *, float *, int *, int *, int *);
+int BLASFUNC(dgetrf)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(qgetrf)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(cgetrf)(int *, int *, float *, int *, int *, int *);
+int BLASFUNC(zgetrf)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(xgetrf)(int *, int *, double *, int *, int *, int *);
+
+int BLASFUNC(slaswp)(int *, float *, int *, int *, int *, int *, int *);
+int BLASFUNC(dlaswp)(int *, double *, int *, int *, int *, int *, int *);
+int BLASFUNC(qlaswp)(int *, double *, int *, int *, int *, int *, int *);
+int BLASFUNC(claswp)(int *, float *, int *, int *, int *, int *, int *);
+int BLASFUNC(zlaswp)(int *, double *, int *, int *, int *, int *, int *);
+int BLASFUNC(xlaswp)(int *, double *, int *, int *, int *, int *, int *);
+
+int BLASFUNC(sgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *);
+int BLASFUNC(dgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
+int BLASFUNC(qgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
+int BLASFUNC(cgetrs)(char *, int *, int *, float *, int *, int *, float *, int *, int *);
+int BLASFUNC(zgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
+int BLASFUNC(xgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
+
+int BLASFUNC(sgesv)(int *, int *, float *, int *, int *, float *, int *, int *);
+int BLASFUNC(dgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
+int BLASFUNC(qgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
+int BLASFUNC(cgesv)(int *, int *, float *, int *, int *, float *, int *, int *);
+int BLASFUNC(zgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
+int BLASFUNC(xgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
+
+int BLASFUNC(spotf2)(char *, int *, float *, int *, int *);
+int BLASFUNC(dpotf2)(char *, int *, double *, int *, int *);
+int BLASFUNC(qpotf2)(char *, int *, double *, int *, int *);
+int BLASFUNC(cpotf2)(char *, int *, float *, int *, int *);
+int BLASFUNC(zpotf2)(char *, int *, double *, int *, int *);
+int BLASFUNC(xpotf2)(char *, int *, double *, int *, int *);
+
+int BLASFUNC(spotrf)(char *, int *, float *, int *, int *);
+int BLASFUNC(dpotrf)(char *, int *, double *, int *, int *);
+int BLASFUNC(qpotrf)(char *, int *, double *, int *, int *);
+int BLASFUNC(cpotrf)(char *, int *, float *, int *, int *);
+int BLASFUNC(zpotrf)(char *, int *, double *, int *, int *);
+int BLASFUNC(xpotrf)(char *, int *, double *, int *, int *);
+
+int BLASFUNC(slauu2)(char *, int *, float *, int *, int *);
+int BLASFUNC(dlauu2)(char *, int *, double *, int *, int *);
+int BLASFUNC(qlauu2)(char *, int *, double *, int *, int *);
+int BLASFUNC(clauu2)(char *, int *, float *, int *, int *);
+int BLASFUNC(zlauu2)(char *, int *, double *, int *, int *);
+int BLASFUNC(xlauu2)(char *, int *, double *, int *, int *);
+
+int BLASFUNC(slauum)(char *, int *, float *, int *, int *);
+int BLASFUNC(dlauum)(char *, int *, double *, int *, int *);
+int BLASFUNC(qlauum)(char *, int *, double *, int *, int *);
+int BLASFUNC(clauum)(char *, int *, float *, int *, int *);
+int BLASFUNC(zlauum)(char *, int *, double *, int *, int *);
+int BLASFUNC(xlauum)(char *, int *, double *, int *, int *);
+
+int BLASFUNC(strti2)(char *, char *, int *, float *, int *, int *);
+int BLASFUNC(dtrti2)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(qtrti2)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(ctrti2)(char *, char *, int *, float *, int *, int *);
+int BLASFUNC(ztrti2)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(xtrti2)(char *, char *, int *, double *, int *, int *);
+
+int BLASFUNC(strtri)(char *, char *, int *, float *, int *, int *);
+int BLASFUNC(dtrtri)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(qtrtri)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(ctrtri)(char *, char *, int *, float *, int *, int *);
+int BLASFUNC(ztrtri)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(xtrtri)(char *, char *, int *, double *, int *, int *);
+
+int BLASFUNC(spotri)(char *, int *, float *, int *, int *);
+int BLASFUNC(dpotri)(char *, int *, double *, int *, int *);
+int BLASFUNC(qpotri)(char *, int *, double *, int *, int *);
+int BLASFUNC(cpotri)(char *, int *, float *, int *, int *);
+int BLASFUNC(zpotri)(char *, int *, double *, int *, int *);
+int BLASFUNC(xpotri)(char *, int *, double *, int *, int *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/plugins/CMakeLists.txt b/extern/Eigen3/Eigen/src/plugins/CMakeLists.txt
new file mode 100644
index 00000000000..1a1d3ffbd72
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/plugins/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_plugins_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_plugins_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/plugins COMPONENT Devel
+ )