Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2014-03-21 14:04:53 +0400
committerSergey Sharybin <sergey.vfx@gmail.com>2014-03-21 14:04:53 +0400
commit3411146984316c97f56543333a46f47aeb7f9d35 (patch)
tree5de608a3c18ff2a5459fd2191609dd882ad86213 /extern/Eigen3
parent1781928f9d720fa1dc4811afb69935b35aa89878 (diff)
Update Eigen to version 3.2.1
Main purpose of this is to have SparseLU solver which we can use now as a replacement to opennl library.
Diffstat (limited to 'extern/Eigen3')
-rw-r--r--extern/Eigen3/Eigen/Core48
-rw-r--r--extern/Eigen3/Eigen/Eigen2Support15
-rw-r--r--extern/Eigen3/Eigen/Eigenvalues2
-rw-r--r--extern/Eigen3/Eigen/IterativeLinearSolvers2
-rw-r--r--extern/Eigen3/Eigen/MetisSupport28
-rw-r--r--extern/Eigen3/Eigen/OrderingMethods51
-rw-r--r--extern/Eigen3/Eigen/SPQRSupport29
-rw-r--r--extern/Eigen3/Eigen/Sparse14
-rw-r--r--extern/Eigen3/Eigen/SparseCholesky21
-rw-r--r--extern/Eigen3/Eigen/SparseCore4
-rw-r--r--extern/Eigen3/Eigen/SparseLU49
-rw-r--r--extern/Eigen3/Eigen/SparseQR33
-rw-r--r--extern/Eigen3/Eigen/src/Cholesky/LDLT.h74
-rw-r--r--extern/Eigen3/Eigen/src/Cholesky/LLT.h20
-rw-r--r--extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h66
-rw-r--r--extern/Eigen3/Eigen/src/Core/Array.h34
-rw-r--r--extern/Eigen3/Eigen/src/Core/ArrayBase.h2
-rw-r--r--extern/Eigen3/Eigen/src/Core/ArrayWrapper.h68
-rw-r--r--extern/Eigen3/Eigen/src/Core/Assign.h32
-rw-r--r--extern/Eigen3/Eigen/src/Core/Assign_MKL.h2
-rw-r--r--extern/Eigen3/Eigen/src/Core/Block.h182
-rw-r--r--extern/Eigen3/Eigen/src/Core/BooleanRedux.h36
-rw-r--r--extern/Eigen3/Eigen/src/Core/CommaInitializer.h4
-rw-r--r--extern/Eigen3/Eigen/src/Core/CoreIterators.h61
-rw-r--r--extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h22
-rw-r--r--extern/Eigen3/Eigen/src/Core/CwiseNullaryOp.h134
-rw-r--r--extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h8
-rw-r--r--extern/Eigen3/Eigen/src/Core/CwiseUnaryView.h16
-rw-r--r--extern/Eigen3/Eigen/src/Core/DenseBase.h72
-rw-r--r--extern/Eigen3/Eigen/src/Core/DenseCoeffsBase.h12
-rw-r--r--extern/Eigen3/Eigen/src/Core/DenseStorage.h114
-rw-r--r--extern/Eigen3/Eigen/src/Core/Diagonal.h41
-rw-r--r--extern/Eigen3/Eigen/src/Core/DiagonalMatrix.h26
-rw-r--r--extern/Eigen3/Eigen/src/Core/DiagonalProduct.h51
-rw-r--r--extern/Eigen3/Eigen/src/Core/Dot.h12
-rw-r--r--extern/Eigen3/Eigen/src/Core/EigenBase.h29
-rw-r--r--extern/Eigen3/Eigen/src/Core/Functors.h130
-rw-r--r--extern/Eigen3/Eigen/src/Core/Fuzzy.h22
-rw-r--r--extern/Eigen3/Eigen/src/Core/GeneralProduct.h100
-rw-r--r--extern/Eigen3/Eigen/src/Core/GenericPacketMath.h50
-rw-r--r--extern/Eigen3/Eigen/src/Core/GlobalFunctions.h49
-rw-r--r--extern/Eigen3/Eigen/src/Core/IO.h13
-rw-r--r--extern/Eigen3/Eigen/src/Core/Map.h30
-rw-r--r--extern/Eigen3/Eigen/src/Core/MapBase.h50
-rw-r--r--extern/Eigen3/Eigen/src/Core/MathFunctions.h354
-rw-r--r--extern/Eigen3/Eigen/src/Core/Matrix.h10
-rw-r--r--extern/Eigen3/Eigen/src/Core/MatrixBase.h79
-rw-r--r--extern/Eigen3/Eigen/src/Core/NoAlias.h9
-rw-r--r--extern/Eigen3/Eigen/src/Core/NumTraits.h3
-rw-r--r--extern/Eigen3/Eigen/src/Core/PermutationMatrix.h46
-rw-r--r--extern/Eigen3/Eigen/src/Core/PlainObjectBase.h179
-rw-r--r--extern/Eigen3/Eigen/src/Core/ProductBase.h20
-rw-r--r--extern/Eigen3/Eigen/src/Core/Random.h14
-rw-r--r--extern/Eigen3/Eigen/src/Core/Redux.h6
-rw-r--r--extern/Eigen3/Eigen/src/Core/Ref.h256
-rw-r--r--extern/Eigen3/Eigen/src/Core/Replicate.h28
-rw-r--r--extern/Eigen3/Eigen/src/Core/ReturnByValue.h2
-rw-r--r--extern/Eigen3/Eigen/src/Core/Select.h14
-rw-r--r--extern/Eigen3/Eigen/src/Core/SelfAdjointView.h16
-rw-r--r--extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h5
-rw-r--r--extern/Eigen3/Eigen/src/Core/StableNorm.h192
-rw-r--r--extern/Eigen3/Eigen/src/Core/Swap.h34
-rw-r--r--extern/Eigen3/Eigen/src/Core/Transpose.h45
-rw-r--r--extern/Eigen3/Eigen/src/Core/Transpositions.h18
-rw-r--r--extern/Eigen3/Eigen/src/Core/TriangularMatrix.h17
-rw-r--r--extern/Eigen3/Eigen/src/Core/VectorBlock.h189
-rw-r--r--extern/Eigen3/Eigen/src/Core/VectorwiseOp.h52
-rw-r--r--extern/Eigen3/Eigen/src/Core/Visitor.h28
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h3
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h10
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h50
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/SSE/Complex.h22
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h101
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/SSE/PacketMath.h29
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h2
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h302
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h7
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h112
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector.h54
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h6
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h66
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h10
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h33
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h4
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointProduct.h20
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h18
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h40
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h24
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector.h32
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h16
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h18
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h4
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/BlasUtil.h10
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Constants.h15
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h7
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Macros.h37
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Memory.h74
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Meta.h20
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/StaticAssert.h3
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/XprHelper.h28
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AlignedBox.h4
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AngleAxis.h2
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h4
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h4
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h2
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h2
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h2
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h2
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h2
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h2
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h2
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/MathFunctions.h24
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/SVD.h13
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h26
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur.h102
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur_MKL.h4
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h71
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h341
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h6
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h3
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/RealQZ.h624
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h123
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h2
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h33
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h4
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h12
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/AlignedBox.h14
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/AngleAxis.h13
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/EulerAngles.h60
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Homogeneous.h2
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Hyperplane.h11
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/OrthoMethods.h30
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/ParametrizedLine.h8
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Quaternion.h96
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Rotation2D.h15
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Scaling.h6
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Transform.h22
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Umeyama.h25
-rw-r--r--extern/Eigen3/Eigen/src/Householder/Householder.h13
-rw-r--r--extern/Eigen3/Eigen/src/Householder/HouseholderSequence.h14
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h31
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h26
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h215
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h2
-rw-r--r--extern/Eigen3/Eigen/src/Jacobi/Jacobi.h99
-rw-r--r--extern/Eigen3/Eigen/src/LU/Determinant.h2
-rw-r--r--extern/Eigen3/Eigen/src/LU/FullPivLU.h10
-rw-r--r--extern/Eigen3/Eigen/src/LU/Inverse.h6
-rw-r--r--extern/Eigen3/Eigen/src/LU/PartialPivLU.h9
-rw-r--r--extern/Eigen3/Eigen/src/MetisSupport/MetisSupport.h137
-rw-r--r--extern/Eigen3/Eigen/src/OrderingMethods/Amd.h6
-rw-r--r--extern/Eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h1850
-rw-r--r--extern/Eigen3/Eigen/src/OrderingMethods/Ordering.h150
-rw-r--r--extern/Eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h23
-rw-r--r--extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h30
-rw-r--r--extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h107
-rw-r--r--extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR_MKL.h7
-rw-r--r--extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h92
-rw-r--r--extern/Eigen3/Eigen/src/QR/HouseholderQR.h47
-rw-r--r--extern/Eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h314
-rw-r--r--extern/Eigen3/Eigen/src/SVD/JacobiSVD.h89
-rw-r--r--extern/Eigen3/Eigen/src/SVD/JacobiSVD_MKL.h2
-rw-r--r--extern/Eigen3/Eigen/src/SVD/UpperBidiagonalization.h4
-rw-r--r--extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h214
-rw-r--r--extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h199
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h10
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h8
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h40
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h2
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h436
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseColEtree.h206
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h6
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h19
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h34
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseDot.h19
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h615
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h77
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h4
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h12
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h45
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h23
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h12
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h31
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h24
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseVector.h159
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseView.h5
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU.h758
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLUImpl.h64
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_Memory.h227
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_Structs.h111
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h298
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_Utils.h80
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h180
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_dfs.h177
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h106
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_gemm_kernel.h279
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h127
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h130
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_bmod.h223
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_dfs.h258
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h134
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_pruneL.h135
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_relax_snode.h83
-rw-r--r--extern/Eigen3/Eigen/src/SparseQR/SparseQR.h657
-rw-r--r--extern/Eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h29
-rw-r--r--extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h37
-rw-r--r--extern/Eigen3/Eigen/src/misc/SparseSolve.h17
-rw-r--r--extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h20
-rw-r--r--extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h1
-rw-r--r--extern/Eigen3/Eigen/src/plugins/BlockMethods.h435
-rw-r--r--extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h4
-rwxr-xr-xextern/Eigen3/eigen-update.sh2
212 files changed, 13026 insertions, 3400 deletions
diff --git a/extern/Eigen3/Eigen/Core b/extern/Eigen3/Eigen/Core
index d4801702261..9131cc3fc9d 100644
--- a/extern/Eigen3/Eigen/Core
+++ b/extern/Eigen3/Eigen/Core
@@ -19,6 +19,12 @@
// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization.
#include "src/Core/util/Macros.h"
+// Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3)
+// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details.
+#if defined(__MINGW32__) && EIGEN_GNUC_AT_LEAST(4,6)
+ #pragma GCC optimize ("-fno-ipa-cp-clone")
+#endif
+
#include <complex>
// this include file manages BLAS and MKL related macros
@@ -44,7 +50,7 @@
#endif
#else
// Remember that usage of defined() in a #define is undefined by the standard
- #if (defined __SSE2__) && ( (!defined __GNUC__) || EIGEN_GNUC_AT_LEAST(4,2) )
+ #if (defined __SSE2__) && ( (!defined __GNUC__) || (defined __INTEL_COMPILER) || EIGEN_GNUC_AT_LEAST(4,2) )
#define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
#endif
#endif
@@ -87,19 +93,25 @@
// so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too.
// notice that since these are C headers, the extern "C" is theoretically needed anyways.
extern "C" {
- #include <emmintrin.h>
- #include <xmmintrin.h>
- #ifdef EIGEN_VECTORIZE_SSE3
- #include <pmmintrin.h>
- #endif
- #ifdef EIGEN_VECTORIZE_SSSE3
- #include <tmmintrin.h>
- #endif
- #ifdef EIGEN_VECTORIZE_SSE4_1
- #include <smmintrin.h>
- #endif
- #ifdef EIGEN_VECTORIZE_SSE4_2
- #include <nmmintrin.h>
+ // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
+ // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
+ #ifdef __INTEL_COMPILER
+ #include <immintrin.h>
+ #else
+ #include <emmintrin.h>
+ #include <xmmintrin.h>
+ #ifdef EIGEN_VECTORIZE_SSE3
+ #include <pmmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSSE3
+ #include <tmmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSE4_1
+ #include <smmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSE4_2
+ #include <nmmintrin.h>
+ #endif
#endif
} // end extern "C"
#elif defined __ALTIVEC__
@@ -236,15 +248,11 @@ using std::ptrdiff_t;
* \endcode
*/
-/** \defgroup Support_modules Support modules [category]
- * Category of modules which add support for external libraries.
- */
-
#include "src/Core/util/Constants.h"
#include "src/Core/util/ForwardDeclarations.h"
#include "src/Core/util/Meta.h"
-#include "src/Core/util/XprHelper.h"
#include "src/Core/util/StaticAssert.h"
+#include "src/Core/util/XprHelper.h"
#include "src/Core/util/Memory.h"
#include "src/Core/NumTraits.h"
@@ -297,6 +305,7 @@ using std::ptrdiff_t;
#include "src/Core/Map.h"
#include "src/Core/Block.h"
#include "src/Core/VectorBlock.h"
+#include "src/Core/Ref.h"
#include "src/Core/Transpose.h"
#include "src/Core/DiagonalMatrix.h"
#include "src/Core/Diagonal.h"
@@ -330,6 +339,7 @@ using std::ptrdiff_t;
#include "src/Core/products/TriangularSolverMatrix.h"
#include "src/Core/products/TriangularSolverVector.h"
#include "src/Core/BandMatrix.h"
+#include "src/Core/CoreIterators.h"
#include "src/Core/BooleanRedux.h"
#include "src/Core/Select.h"
diff --git a/extern/Eigen3/Eigen/Eigen2Support b/extern/Eigen3/Eigen/Eigen2Support
index 36156d29a92..6aa009d2093 100644
--- a/extern/Eigen3/Eigen/Eigen2Support
+++ b/extern/Eigen3/Eigen/Eigen2Support
@@ -14,12 +14,25 @@
#error Eigen2 support must be enabled by defining EIGEN2_SUPPORT before including any Eigen header
#endif
+#ifndef EIGEN_NO_EIGEN2_DEPRECATED_WARNING
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
+#warning "Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3. (Define EIGEN_NO_EIGEN2_DEPRECATED_WARNING to disable this warning)"
+#else
+#pragma message ("Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3. (Define EIGEN_NO_EIGEN2_DEPRECATED_WARNING to disable this warning)")
+#endif
+
+#endif // EIGEN_NO_EIGEN2_DEPRECATED_WARNING
+
#include "src/Core/util/DisableStupidWarnings.h"
/** \ingroup Support_modules
* \defgroup Eigen2Support_Module Eigen2 support module
- * This module provides a couple of deprecated functions improving the compatibility with Eigen2.
*
+ * \warning Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3.
+ *
+ * This module provides a couple of deprecated functions improving the compatibility with Eigen2.
+ *
* To use it, define EIGEN2_SUPPORT before including any Eigen header
* \code
* #define EIGEN2_SUPPORT
diff --git a/extern/Eigen3/Eigen/Eigenvalues b/extern/Eigen3/Eigen/Eigenvalues
index af99ccd1fab..53c5a73a278 100644
--- a/extern/Eigen3/Eigen/Eigenvalues
+++ b/extern/Eigen3/Eigen/Eigenvalues
@@ -33,6 +33,8 @@
#include "src/Eigenvalues/HessenbergDecomposition.h"
#include "src/Eigenvalues/ComplexSchur.h"
#include "src/Eigenvalues/ComplexEigenSolver.h"
+#include "src/Eigenvalues/RealQZ.h"
+#include "src/Eigenvalues/GeneralizedEigenSolver.h"
#include "src/Eigenvalues/MatrixBaseEigenvalues.h"
#ifdef EIGEN_USE_LAPACKE
#include "src/Eigenvalues/RealSchur_MKL.h"
diff --git a/extern/Eigen3/Eigen/IterativeLinearSolvers b/extern/Eigen3/Eigen/IterativeLinearSolvers
index 315c2dd1ee7..0f4159dc19f 100644
--- a/extern/Eigen3/Eigen/IterativeLinearSolvers
+++ b/extern/Eigen3/Eigen/IterativeLinearSolvers
@@ -6,7 +6,7 @@
#include "src/Core/util/DisableStupidWarnings.h"
-/** \ingroup Sparse_modules
+/**
* \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module
*
* This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a squared matrix, usually very large and sparse.
diff --git a/extern/Eigen3/Eigen/MetisSupport b/extern/Eigen3/Eigen/MetisSupport
new file mode 100644
index 00000000000..6a113f7a878
--- /dev/null
+++ b/extern/Eigen3/Eigen/MetisSupport
@@ -0,0 +1,28 @@
+#ifndef EIGEN_METISSUPPORT_MODULE_H
+#define EIGEN_METISSUPPORT_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+extern "C" {
+#include <metis.h>
+}
+
+
+/** \ingroup Support_modules
+ * \defgroup MetisSupport_Module MetisSupport module
+ *
+ * \code
+ * #include <Eigen/MetisSupport>
+ * \endcode
+ * This module defines an interface to the METIS reordering package (http://glaros.dtc.umn.edu/gkhome/views/metis).
+ * It can be used just as any other built-in method as explained in \link OrderingMethods_Module here. \endlink
+ */
+
+
+#include "src/MetisSupport/MetisSupport.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_METISSUPPORT_MODULE_H
diff --git a/extern/Eigen3/Eigen/OrderingMethods b/extern/Eigen3/Eigen/OrderingMethods
index 1e2d87452e5..7c0f1fffff6 100644
--- a/extern/Eigen3/Eigen/OrderingMethods
+++ b/extern/Eigen3/Eigen/OrderingMethods
@@ -5,19 +5,62 @@
#include "src/Core/util/DisableStupidWarnings.h"
-/** \ingroup Sparse_modules
+/**
* \defgroup OrderingMethods_Module OrderingMethods module
*
- * This module is currently for internal use only.
- *
- *
+ * This module is currently for internal use only
+ *
+ * It defines various built-in and external ordering methods for sparse matrices.
+ * They are typically used to reduce the number of elements during
+ * the sparse matrix decomposition (LLT, LU, QR).
+ * Precisely, in a preprocessing step, a permutation matrix P is computed using
+ * those ordering methods and applied to the columns of the matrix.
+ * Using for instance the sparse Cholesky decomposition, it is expected that
+ * the nonzeros elements in LLT(A*P) will be much smaller than that in LLT(A).
+ *
+ *
+ * Usage :
* \code
* #include <Eigen/OrderingMethods>
* \endcode
+ *
+ * A simple usage is as a template parameter in the sparse decomposition classes :
+ *
+ * \code
+ * SparseLU<MatrixType, COLAMDOrdering<int> > solver;
+ * \endcode
+ *
+ * \code
+ * SparseQR<MatrixType, COLAMDOrdering<int> > solver;
+ * \endcode
+ *
+ * It is possible as well to call directly a particular ordering method for your own purpose,
+ * \code
+ * AMDOrdering<int> ordering;
+ * PermutationMatrix<Dynamic, Dynamic, int> perm;
+ * SparseMatrix<double> A;
+ * //Fill the matrix ...
+ *
+ * ordering(A, perm); // Call AMD
+ * \endcode
+ *
+ * \note Some of these methods (like AMD or METIS), need the sparsity pattern
+ * of the input matrix to be symmetric. When the matrix is structurally unsymmetric,
+ * Eigen computes internally the pattern of \f$A^T*A\f$ before calling the method.
+ * If your matrix is already symmetric (at leat in structure), you can avoid that
+ * by calling the method with a SelfAdjointView type.
+ *
+ * \code
+ * // Call the ordering on the pattern of the lower triangular matrix A
+ * ordering(A.selfadjointView<Lower>(), perm);
+ * \endcode
*/
+#ifndef EIGEN_MPL2_ONLY
#include "src/OrderingMethods/Amd.h"
+#endif
+#include "src/OrderingMethods/Ordering.h"
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_ORDERINGMETHODS_MODULE_H
diff --git a/extern/Eigen3/Eigen/SPQRSupport b/extern/Eigen3/Eigen/SPQRSupport
new file mode 100644
index 00000000000..77016442ee7
--- /dev/null
+++ b/extern/Eigen3/Eigen/SPQRSupport
@@ -0,0 +1,29 @@
+#ifndef EIGEN_SPQRSUPPORT_MODULE_H
+#define EIGEN_SPQRSUPPORT_MODULE_H
+
+#include "SparseCore"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include "SuiteSparseQR.hpp"
+
+/** \ingroup Support_modules
+ * \defgroup SPQRSupport_Module SuiteSparseQR module
+ *
+ * This module provides an interface to the SPQR library, which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
+ *
+ * \code
+ * #include <Eigen/SPQRSupport>
+ * \endcode
+ *
+ * In order to use this module, the SPQR headers must be accessible from the include paths, and your binary must be linked to the SPQR library and its dependencies (Cholmod, AMD, COLAMD,...).
+ * For a cmake based project, you can use our FindSPQR.cmake and FindCholmod.Cmake modules
+ *
+ */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+#include "src/CholmodSupport/CholmodSupport.h"
+#include "src/SPQRSupport/SuiteSparseQRSupport.h"
+
+#endif
diff --git a/extern/Eigen3/Eigen/Sparse b/extern/Eigen3/Eigen/Sparse
index 2d1757172eb..7cc9c09133a 100644
--- a/extern/Eigen3/Eigen/Sparse
+++ b/extern/Eigen3/Eigen/Sparse
@@ -1,13 +1,15 @@
#ifndef EIGEN_SPARSE_MODULE_H
#define EIGEN_SPARSE_MODULE_H
-/** \defgroup Sparse_modules Sparse modules
+/** \defgroup Sparse_Module Sparse meta-module
*
* Meta-module including all related modules:
- * - SparseCore
- * - OrderingMethods
- * - SparseCholesky
- * - IterativeLinearSolvers
+ * - \ref SparseCore_Module
+ * - \ref OrderingMethods_Module
+ * - \ref SparseCholesky_Module
+ * - \ref SparseLU_Module
+ * - \ref SparseQR_Module
+ * - \ref IterativeLinearSolvers_Module
*
* \code
* #include <Eigen/Sparse>
@@ -17,6 +19,8 @@
#include "SparseCore"
#include "OrderingMethods"
#include "SparseCholesky"
+#include "SparseLU"
+#include "SparseQR"
#include "IterativeLinearSolvers"
#endif // EIGEN_SPARSE_MODULE_H
diff --git a/extern/Eigen3/Eigen/SparseCholesky b/extern/Eigen3/Eigen/SparseCholesky
index 5f82742f7d8..9f5056aa1a1 100644
--- a/extern/Eigen3/Eigen/SparseCholesky
+++ b/extern/Eigen3/Eigen/SparseCholesky
@@ -1,11 +1,21 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2013 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
#ifndef EIGEN_SPARSECHOLESKY_MODULE_H
#define EIGEN_SPARSECHOLESKY_MODULE_H
#include "SparseCore"
+#include "OrderingMethods"
#include "src/Core/util/DisableStupidWarnings.h"
-/** \ingroup Sparse_modules
+/**
* \defgroup SparseCholesky_Module SparseCholesky module
*
* This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) matrices.
@@ -20,11 +30,18 @@
* \endcode
*/
+#ifdef EIGEN_MPL2_ONLY
+#error The SparseCholesky module has nothing to offer in MPL2 only mode
+#endif
+
#include "src/misc/Solve.h"
#include "src/misc/SparseSolve.h"
-
#include "src/SparseCholesky/SimplicialCholesky.h"
+#ifndef EIGEN_MPL2_ONLY
+#include "src/SparseCholesky/SimplicialCholesky_impl.h"
+#endif
+
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_SPARSECHOLESKY_MODULE_H
diff --git a/extern/Eigen3/Eigen/SparseCore b/extern/Eigen3/Eigen/SparseCore
index 41d28c92824..9b5be5e15a9 100644
--- a/extern/Eigen3/Eigen/SparseCore
+++ b/extern/Eigen3/Eigen/SparseCore
@@ -11,7 +11,7 @@
#include <cstring>
#include <algorithm>
-/** \ingroup Sparse_modules
+/**
* \defgroup SparseCore_Module SparseCore module
*
* This module provides a sparse matrix representation, and basic associatd matrix manipulations
@@ -40,14 +40,12 @@ struct Sparse {};
#include "src/SparseCore/SparseMatrix.h"
#include "src/SparseCore/MappedSparseMatrix.h"
#include "src/SparseCore/SparseVector.h"
-#include "src/SparseCore/CoreIterators.h"
#include "src/SparseCore/SparseBlock.h"
#include "src/SparseCore/SparseTranspose.h"
#include "src/SparseCore/SparseCwiseUnaryOp.h"
#include "src/SparseCore/SparseCwiseBinaryOp.h"
#include "src/SparseCore/SparseDot.h"
#include "src/SparseCore/SparsePermutation.h"
-#include "src/SparseCore/SparseAssign.h"
#include "src/SparseCore/SparseRedux.h"
#include "src/SparseCore/SparseFuzzy.h"
#include "src/SparseCore/ConservativeSparseSparseProduct.h"
diff --git a/extern/Eigen3/Eigen/SparseLU b/extern/Eigen3/Eigen/SparseLU
new file mode 100644
index 00000000000..8527a49bd86
--- /dev/null
+++ b/extern/Eigen3/Eigen/SparseLU
@@ -0,0 +1,49 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSELU_MODULE_H
+#define EIGEN_SPARSELU_MODULE_H
+
+#include "SparseCore"
+
+/**
+ * \defgroup SparseLU_Module SparseLU module
+ * This module defines a supernodal factorization of general sparse matrices.
+ * The code is fully optimized for supernode-panel updates with specialized kernels.
+ * Please, see the documentation of the SparseLU class for more details.
+ */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+
+// Ordering interface
+#include "OrderingMethods"
+
+#include "src/SparseLU/SparseLU_gemm_kernel.h"
+
+#include "src/SparseLU/SparseLU_Structs.h"
+#include "src/SparseLU/SparseLU_SupernodalMatrix.h"
+#include "src/SparseLU/SparseLUImpl.h"
+#include "src/SparseCore/SparseColEtree.h"
+#include "src/SparseLU/SparseLU_Memory.h"
+#include "src/SparseLU/SparseLU_heap_relax_snode.h"
+#include "src/SparseLU/SparseLU_relax_snode.h"
+#include "src/SparseLU/SparseLU_pivotL.h"
+#include "src/SparseLU/SparseLU_panel_dfs.h"
+#include "src/SparseLU/SparseLU_kernel_bmod.h"
+#include "src/SparseLU/SparseLU_panel_bmod.h"
+#include "src/SparseLU/SparseLU_column_dfs.h"
+#include "src/SparseLU/SparseLU_column_bmod.h"
+#include "src/SparseLU/SparseLU_copy_to_ucol.h"
+#include "src/SparseLU/SparseLU_pruneL.h"
+#include "src/SparseLU/SparseLU_Utils.h"
+#include "src/SparseLU/SparseLU.h"
+
+#endif // EIGEN_SPARSELU_MODULE_H
diff --git a/extern/Eigen3/Eigen/SparseQR b/extern/Eigen3/Eigen/SparseQR
new file mode 100644
index 00000000000..4ee42065eed
--- /dev/null
+++ b/extern/Eigen3/Eigen/SparseQR
@@ -0,0 +1,33 @@
+#ifndef EIGEN_SPARSEQR_MODULE_H
+#define EIGEN_SPARSEQR_MODULE_H
+
+#include "SparseCore"
+#include "OrderingMethods"
+#include "src/Core/util/DisableStupidWarnings.h"
+
+/** \defgroup SparseQR_Module SparseQR module
+ * \brief Provides QR decomposition for sparse matrices
+ *
+ * This module provides a simplicial version of the left-looking Sparse QR decomposition.
+ * The columns of the input matrix should be reordered to limit the fill-in during the
+ * decomposition. Built-in methods (COLAMD, AMD) or external methods (METIS) can be used to this end.
+ * See the \link OrderingMethods_Module OrderingMethods\endlink module for the list
+ * of built-in and external ordering methods.
+ *
+ * \code
+ * #include <Eigen/SparseQR>
+ * \endcode
+ *
+ *
+ */
+
+#include "src/misc/Solve.h"
+#include "src/misc/SparseSolve.h"
+
+#include "OrderingMethods"
+#include "src/SparseCore/SparseColEtree.h"
+#include "src/SparseQR/SparseQR.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/Cholesky/LDLT.h b/extern/Eigen3/Eigen/src/Cholesky/LDLT.h
index 68e54b1d4ad..d026418f8a9 100644
--- a/extern/Eigen3/Eigen/src/Cholesky/LDLT.h
+++ b/extern/Eigen3/Eigen/src/Cholesky/LDLT.h
@@ -16,7 +16,10 @@
namespace Eigen {
namespace internal {
-template<typename MatrixType, int UpLo> struct LDLT_Traits;
+ template<typename MatrixType, int UpLo> struct LDLT_Traits;
+
+ // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef
+ enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite };
}
/** \ingroup Cholesky_Module
@@ -69,7 +72,12 @@ template<typename _MatrixType, int _UpLo> class LDLT
* The default constructor is useful in cases in which the user intends to
* perform decompositions via LDLT::compute(const MatrixType&).
*/
- LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {}
+ LDLT()
+ : m_matrix(),
+ m_transpositions(),
+ m_sign(internal::ZeroSign),
+ m_isInitialized(false)
+ {}
/** \brief Default Constructor with memory preallocation
*
@@ -81,6 +89,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
: m_matrix(size, size),
m_transpositions(size),
m_temporary(size),
+ m_sign(internal::ZeroSign),
m_isInitialized(false)
{}
@@ -93,6 +102,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
: m_matrix(matrix.rows(), matrix.cols()),
m_transpositions(matrix.rows()),
m_temporary(matrix.rows()),
+ m_sign(internal::ZeroSign),
m_isInitialized(false)
{
compute(matrix);
@@ -139,7 +149,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
inline bool isPositive() const
{
eigen_assert(m_isInitialized && "LDLT is not initialized.");
- return m_sign == 1;
+ return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign;
}
#ifdef EIGEN2_SUPPORT
@@ -153,7 +163,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
inline bool isNegative(void) const
{
eigen_assert(m_isInitialized && "LDLT is not initialized.");
- return m_sign == -1;
+ return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign;
}
/** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
@@ -196,7 +206,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
LDLT& compute(const MatrixType& matrix);
template <typename Derived>
- LDLT& rankUpdate(const MatrixBase<Derived>& w,RealScalar alpha=1);
+ LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1);
/** \returns the internal LDLT decomposition matrix
*
@@ -235,7 +245,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
MatrixType m_matrix;
TranspositionType m_transpositions;
TmpMatrixType m_temporary;
- int m_sign;
+ internal::SignMatrix m_sign;
bool m_isInitialized;
};
@@ -246,8 +256,9 @@ template<int UpLo> struct ldlt_inplace;
template<> struct ldlt_inplace<Lower>
{
template<typename MatrixType, typename TranspositionType, typename Workspace>
- static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
+ static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
{
+ using std::abs;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
@@ -257,8 +268,9 @@ template<> struct ldlt_inplace<Lower>
if (size <= 1)
{
transpositions.setIdentity();
- if(sign)
- *sign = real(mat.coeff(0,0))>0 ? 1:-1;
+ if (numext::real(mat.coeff(0,0)) > 0) sign = PositiveSemiDef;
+ else if (numext::real(mat.coeff(0,0)) < 0) sign = NegativeSemiDef;
+ else sign = ZeroSign;
return true;
}
@@ -277,9 +289,6 @@ template<> struct ldlt_inplace<Lower>
// are compared; if any diagonal is negligible compared
// to the largest overall, the algorithm bails.
cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
-
- if(sign)
- *sign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1;
}
// Finish early if the matrix is not full rank.
@@ -301,11 +310,11 @@ template<> struct ldlt_inplace<Lower>
for(int i=k+1;i<index_of_biggest_in_corner;++i)
{
Scalar tmp = mat.coeffRef(i,k);
- mat.coeffRef(i,k) = conj(mat.coeffRef(index_of_biggest_in_corner,i));
- mat.coeffRef(index_of_biggest_in_corner,i) = conj(tmp);
+ mat.coeffRef(i,k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner,i));
+ mat.coeffRef(index_of_biggest_in_corner,i) = numext::conj(tmp);
}
if(NumTraits<Scalar>::IsComplex)
- mat.coeffRef(index_of_biggest_in_corner,k) = conj(mat.coeff(index_of_biggest_in_corner,k));
+ mat.coeffRef(index_of_biggest_in_corner,k) = numext::conj(mat.coeff(index_of_biggest_in_corner,k));
}
// partition the matrix:
@@ -326,6 +335,16 @@ template<> struct ldlt_inplace<Lower>
}
if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
A21 /= mat.coeffRef(k,k);
+
+ RealScalar realAkk = numext::real(mat.coeffRef(k,k));
+ if (sign == PositiveSemiDef) {
+ if (realAkk < 0) sign = Indefinite;
+ } else if (sign == NegativeSemiDef) {
+ if (realAkk > 0) sign = Indefinite;
+ } else if (sign == ZeroSign) {
+ if (realAkk > 0) sign = PositiveSemiDef;
+ else if (realAkk < 0) sign = NegativeSemiDef;
+ }
}
return true;
@@ -339,9 +358,9 @@ template<> struct ldlt_inplace<Lower>
// Here only rank-1 updates are implemented, to reduce the
// requirement for intermediate storage and improve accuracy
template<typename MatrixType, typename WDerived>
- static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, typename MatrixType::RealScalar sigma=1)
+ static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, const typename MatrixType::RealScalar& sigma=1)
{
- using internal::isfinite;
+ using numext::isfinite;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
@@ -359,9 +378,9 @@ template<> struct ldlt_inplace<Lower>
break;
// Update the diagonal terms
- RealScalar dj = real(mat.coeff(j,j));
+ RealScalar dj = numext::real(mat.coeff(j,j));
Scalar wj = w.coeff(j);
- RealScalar swj2 = sigma*abs2(wj);
+ RealScalar swj2 = sigma*numext::abs2(wj);
RealScalar gamma = dj*alpha + swj2;
mat.coeffRef(j,j) += swj2/alpha;
@@ -372,13 +391,13 @@ template<> struct ldlt_inplace<Lower>
Index rs = size-j-1;
w.tail(rs) -= wj * mat.col(j).tail(rs);
if(gamma != 0)
- mat.col(j).tail(rs) += (sigma*conj(wj)/gamma)*w.tail(rs);
+ mat.col(j).tail(rs) += (sigma*numext::conj(wj)/gamma)*w.tail(rs);
}
return true;
}
template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
- static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, typename MatrixType::RealScalar sigma=1)
+ static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, const typename MatrixType::RealScalar& sigma=1)
{
// Apply the permutation to the input w
tmp = transpositions * w;
@@ -390,14 +409,14 @@ template<> struct ldlt_inplace<Lower>
template<> struct ldlt_inplace<Upper>
{
template<typename MatrixType, typename TranspositionType, typename Workspace>
- static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
+ static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
{
Transpose<MatrixType> matt(mat);
return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
}
template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
- static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, typename MatrixType::RealScalar sigma=1)
+ static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, const typename MatrixType::RealScalar& sigma=1)
{
Transpose<MatrixType> matt(mat);
return ldlt_inplace<Lower>::update(matt, transpositions, tmp, w.conjugate(), sigma);
@@ -436,7 +455,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
m_isInitialized = false;
m_temporary.resize(size);
- internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign);
+ internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign);
m_isInitialized = true;
return *this;
@@ -449,7 +468,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
*/
template<typename MatrixType, int _UpLo>
template<typename Derived>
-LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w,typename NumTraits<typename MatrixType::Scalar>::Real sigma)
+LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
{
const Index size = w.rows();
if (m_isInitialized)
@@ -464,7 +483,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri
for (Index i = 0; i < size; i++)
m_transpositions.coeffRef(i) = i;
m_temporary.resize(size);
- m_sign = sigma>=0 ? 1 : -1;
+ m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef;
m_isInitialized = true;
}
@@ -534,8 +553,7 @@ template<typename Derived>
bool LDLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
{
eigen_assert(m_isInitialized && "LDLT is not initialized.");
- const Index size = m_matrix.rows();
- eigen_assert(size == bAndX.rows());
+ eigen_assert(m_matrix.rows() == bAndX.rows());
bAndX = this->solve(bAndX);
diff --git a/extern/Eigen3/Eigen/src/Cholesky/LLT.h b/extern/Eigen3/Eigen/src/Cholesky/LLT.h
index 41d14e532f1..2e6189f7dab 100644
--- a/extern/Eigen3/Eigen/src/Cholesky/LLT.h
+++ b/extern/Eigen3/Eigen/src/Cholesky/LLT.h
@@ -190,6 +190,7 @@ template<typename Scalar, int UpLo> struct llt_inplace;
template<typename MatrixType, typename VectorType>
static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma)
{
+ using std::sqrt;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
@@ -199,7 +200,7 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
typedef Matrix<Scalar,Dynamic,1> TempVectorType;
typedef typename TempVectorType::SegmentReturnType TempVecSegment;
- int n = mat.cols();
+ Index n = mat.cols();
eigen_assert(mat.rows()==n && vec.size()==n);
TempVectorType temp;
@@ -211,12 +212,12 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
// i.e., for sigma > 0
temp = sqrt(sigma) * vec;
- for(int i=0; i<n; ++i)
+ for(Index i=0; i<n; ++i)
{
JacobiRotation<Scalar> g;
g.makeGivens(mat(i,i), -temp(i), &mat(i,i));
- int rs = n-i-1;
+ Index rs = n-i-1;
if(rs>0)
{
ColXprSegment x(mat.col(i).tail(rs));
@@ -229,12 +230,12 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
{
temp = vec;
RealScalar beta = 1;
- for(int j=0; j<n; ++j)
+ for(Index j=0; j<n; ++j)
{
- RealScalar Ljj = real(mat.coeff(j,j));
- RealScalar dj = abs2(Ljj);
+ RealScalar Ljj = numext::real(mat.coeff(j,j));
+ RealScalar dj = numext::abs2(Ljj);
Scalar wj = temp.coeff(j);
- RealScalar swj2 = sigma*abs2(wj);
+ RealScalar swj2 = sigma*numext::abs2(wj);
RealScalar gamma = dj*beta + swj2;
RealScalar x = dj + swj2/beta;
@@ -250,7 +251,7 @@ static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const V
{
temp.tail(rs) -= (wj/Ljj) * mat.col(j).tail(rs);
if(gamma != 0)
- mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*conj(wj)/gamma)*temp.tail(rs);
+ mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*numext::conj(wj)/gamma)*temp.tail(rs);
}
}
}
@@ -263,6 +264,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
template<typename MatrixType>
static typename MatrixType::Index unblocked(MatrixType& mat)
{
+ using std::sqrt;
typedef typename MatrixType::Index Index;
eigen_assert(mat.rows()==mat.cols());
@@ -275,7 +277,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
- RealScalar x = real(mat.coeff(k,k));
+ RealScalar x = numext::real(mat.coeff(k,k));
if (k>0) x -= A10.squaredNorm();
if (x<=RealScalar(0))
return k;
diff --git a/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h b/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h
index 37f142150ff..c449960de4a 100644
--- a/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h
+++ b/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h
@@ -51,7 +51,6 @@ void cholmod_configure_matrix(CholmodType& mat)
template<typename _Scalar, int _Options, typename _Index>
cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
{
- typedef SparseMatrix<_Scalar,_Options,_Index> MatrixType;
cholmod_sparse res;
res.nzmax = mat.nonZeros();
res.nrow = mat.rows();;
@@ -59,10 +58,12 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
res.p = mat.outerIndexPtr();
res.i = mat.innerIndexPtr();
res.x = mat.valuePtr();
+ res.z = 0;
res.sorted = 1;
if(mat.isCompressed())
{
res.packed = 1;
+ res.nz = 0;
}
else
{
@@ -77,9 +78,13 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
{
res.itype = CHOLMOD_INT;
}
+ else if (internal::is_same<_Index,UF_long>::value)
+ {
+ res.itype = CHOLMOD_LONG;
+ }
else
{
- eigen_assert(false && "Index type different than int is not supported yet");
+ eigen_assert(false && "Index type not supported yet");
}
// setup res.xtype
@@ -123,7 +128,7 @@ cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)
res.ncol = mat.cols();
res.nzmax = res.nrow * res.ncol;
res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride();
- res.x = mat.derived().data();
+ res.x = (void*)(mat.derived().data());
res.z = 0;
internal::cholmod_configure_matrix<Scalar>(res);
@@ -137,8 +142,8 @@ template<typename Scalar, int Flags, typename Index>
MappedSparseMatrix<Scalar,Flags,Index> viewAsEigen(cholmod_sparse& cm)
{
return MappedSparseMatrix<Scalar,Flags,Index>
- (cm.nrow, cm.ncol, reinterpret_cast<Index*>(cm.p)[cm.ncol],
- reinterpret_cast<Index*>(cm.p), reinterpret_cast<Index*>(cm.i),reinterpret_cast<Scalar*>(cm.x) );
+ (cm.nrow, cm.ncol, static_cast<Index*>(cm.p)[cm.ncol],
+ static_cast<Index*>(cm.p), static_cast<Index*>(cm.i),static_cast<Scalar*>(cm.x) );
}
enum CholmodMode {
@@ -167,12 +172,14 @@ class CholmodBase : internal::noncopyable
CholmodBase()
: m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
{
+ m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0);
cholmod_start(&m_cholmod);
}
CholmodBase(const MatrixType& matrix)
: m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
{
+ m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0);
cholmod_start(&m_cholmod);
compute(matrix);
}
@@ -237,7 +244,7 @@ class CholmodBase : internal::noncopyable
return internal::sparse_solve_retval<CholmodBase, Rhs>(*this, b.derived());
}
- /** Performs a symbolic decomposition on the sparcity of \a matrix.
+ /** Performs a symbolic decomposition on the sparsity pattern of \a matrix.
*
* This function is particularly useful when solving for several problems having the same structure.
*
@@ -261,7 +268,7 @@ class CholmodBase : internal::noncopyable
/** Performs a numeric decomposition of \a matrix
*
- * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+ * The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed.
*
* \sa analyzePattern()
*/
@@ -269,9 +276,10 @@ class CholmodBase : internal::noncopyable
{
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
- cholmod_factorize(&A, m_cholmodFactor, &m_cholmod);
+ cholmod_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, &m_cholmod);
- this->m_info = Success;
+ // If the factorization failed, minor is the column at which it did. On success minor == n.
+ this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue);
m_factorizationIsOk = true;
}
@@ -286,16 +294,18 @@ class CholmodBase : internal::noncopyable
{
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
const Index size = m_cholmodFactor->n;
+ EIGEN_UNUSED_VARIABLE(size);
eigen_assert(size==b.rows());
// note: cd stands for Cholmod Dense
- cholmod_dense b_cd = viewAsCholmod(b.const_cast_derived());
+ Rhs& b_ref(b.const_cast_derived());
+ cholmod_dense b_cd = viewAsCholmod(b_ref);
cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod);
if(!x_cd)
{
this->m_info = NumericalIssue;
}
- // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
+ // TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
cholmod_free_dense(&x_cd, &m_cholmod);
}
@@ -306,6 +316,7 @@ class CholmodBase : internal::noncopyable
{
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
const Index size = m_cholmodFactor->n;
+ EIGEN_UNUSED_VARIABLE(size);
eigen_assert(size==b.rows());
// note: cs stands for Cholmod Sparse
@@ -315,19 +326,36 @@ class CholmodBase : internal::noncopyable
{
this->m_info = NumericalIssue;
}
- // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
+ // TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
dest = viewAsEigen<DestScalar,DestOptions,DestIndex>(*x_cs);
cholmod_free_sparse(&x_cs, &m_cholmod);
}
#endif // EIGEN_PARSED_BY_DOXYGEN
+
+ /** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization.
+ *
+ * During the numerical factorization, an offset term is added to the diagonal coefficients:\n
+ * \c d_ii = \a offset + \c d_ii
+ *
+ * The default is \a offset=0.
+ *
+ * \returns a reference to \c *this.
+ */
+ Derived& setShift(const RealScalar& offset)
+ {
+ m_shiftOffset[0] = offset;
+ return derived();
+ }
+
template<typename Stream>
- void dumpMemory(Stream& s)
+ void dumpMemory(Stream& /*s*/)
{}
protected:
mutable cholmod_common m_cholmod;
cholmod_factor* m_cholmodFactor;
+ RealScalar m_shiftOffset[2];
mutable ComputationInfo m_info;
bool m_isInitialized;
int m_factorizationIsOk;
@@ -340,8 +368,8 @@ class CholmodBase : internal::noncopyable
*
* This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization
* using the Cholmod library.
- * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Thefore, it has little practical interest.
- * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+ * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical interest.
+ * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
* X and B can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
@@ -387,8 +415,8 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
*
* This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization
* using the Cholmod library.
- * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Thefore, it has little practical interest.
- * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+ * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical interest.
+ * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
* X and B can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
@@ -433,7 +461,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
* This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization
* using the Cholmod library.
* This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM.
- * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+ * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
* X and B can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
@@ -476,7 +504,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
* \brief A general Cholesky factorization and solver based on Cholmod
*
* This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization
- * using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
+ * using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
* X and B can be either dense or sparse.
*
* This variant permits to change the underlying Cholesky method at runtime.
diff --git a/extern/Eigen3/Eigen/src/Core/Array.h b/extern/Eigen3/Eigen/src/Core/Array.h
index aaa38997838..0ab03eff0f0 100644
--- a/extern/Eigen3/Eigen/src/Core/Array.h
+++ b/extern/Eigen3/Eigen/src/Core/Array.h
@@ -107,10 +107,10 @@ class Array
*
* \sa resize(Index,Index)
*/
- EIGEN_STRONG_INLINE explicit Array() : Base()
+ EIGEN_STRONG_INLINE Array() : Base()
{
Base::_check_template_params();
- EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -120,7 +120,7 @@ class Array
: Base(internal::constructor_without_unaligned_array_assert())
{
Base::_check_template_params();
- EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
#endif
@@ -137,15 +137,15 @@ class Array
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Array)
eigen_assert(dim >= 0);
eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
- EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename T0, typename T1>
- EIGEN_STRONG_INLINE Array(const T0& x, const T1& y)
+ EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1)
{
Base::_check_template_params();
- this->template _init2<T0,T1>(x, y);
+ this->template _init2<T0,T1>(val0, val1);
}
#else
/** constructs an uninitialized matrix with \a rows rows and \a cols columns.
@@ -155,27 +155,27 @@ class Array
* Matrix() instead. */
Array(Index rows, Index cols);
/** constructs an initialized 2D vector with given coefficients */
- Array(const Scalar& x, const Scalar& y);
+ Array(const Scalar& val0, const Scalar& val1);
#endif
/** constructs an initialized 3D vector with given coefficients */
- EIGEN_STRONG_INLINE Array(const Scalar& x, const Scalar& y, const Scalar& z)
+ EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2)
{
Base::_check_template_params();
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3)
- m_storage.data()[0] = x;
- m_storage.data()[1] = y;
- m_storage.data()[2] = z;
+ m_storage.data()[0] = val0;
+ m_storage.data()[1] = val1;
+ m_storage.data()[2] = val2;
}
/** constructs an initialized 4D vector with given coefficients */
- EIGEN_STRONG_INLINE Array(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w)
+ EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3)
{
Base::_check_template_params();
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4)
- m_storage.data()[0] = x;
- m_storage.data()[1] = y;
- m_storage.data()[2] = z;
- m_storage.data()[3] = w;
+ m_storage.data()[0] = val0;
+ m_storage.data()[1] = val1;
+ m_storage.data()[2] = val2;
+ m_storage.data()[3] = val3;
}
explicit Array(const Scalar *data);
@@ -210,7 +210,7 @@ class Array
: Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
{
Base::_check_template_params();
- Base::resize(other.rows(), other.cols());
+ Base::_resize_to_match(other);
*this = other;
}
diff --git a/extern/Eigen3/Eigen/src/Core/ArrayBase.h b/extern/Eigen3/Eigen/src/Core/ArrayBase.h
index 004b117c933..38852600dc2 100644
--- a/extern/Eigen3/Eigen/src/Core/ArrayBase.h
+++ b/extern/Eigen3/Eigen/src/Core/ArrayBase.h
@@ -143,7 +143,7 @@ template<typename Derived> class ArrayBase
ArrayBase<Derived>& array() { return *this; }
const ArrayBase<Derived>& array() const { return *this; }
- /** \returns an \link MatrixBase Matrix \endlink expression of this array
+ /** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array
* \sa MatrixBase::array() */
MatrixWrapper<Derived> matrix() { return derived(); }
const MatrixWrapper<const Derived> matrix() const { return derived(); }
diff --git a/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h b/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h
index 87af7fda937..a791bc3581a 100644
--- a/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h
+++ b/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h
@@ -55,22 +55,22 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
inline Index outerStride() const { return m_expression.outerStride(); }
inline Index innerStride() const { return m_expression.innerStride(); }
- inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
+ inline ScalarWithConstIfNotLvalue* data() { return m_expression.const_cast_derived().data(); }
inline const Scalar* data() const { return m_expression.data(); }
- inline CoeffReturnType coeff(Index row, Index col) const
+ inline CoeffReturnType coeff(Index rowId, Index colId) const
{
- return m_expression.coeff(row, col);
+ return m_expression.coeff(rowId, colId);
}
- inline Scalar& coeffRef(Index row, Index col)
+ inline Scalar& coeffRef(Index rowId, Index colId)
{
- return m_expression.const_cast_derived().coeffRef(row, col);
+ return m_expression.const_cast_derived().coeffRef(rowId, colId);
}
- inline const Scalar& coeffRef(Index row, Index col) const
+ inline const Scalar& coeffRef(Index rowId, Index colId) const
{
- return m_expression.const_cast_derived().coeffRef(row, col);
+ return m_expression.const_cast_derived().coeffRef(rowId, colId);
}
inline CoeffReturnType coeff(Index index) const
@@ -89,15 +89,15 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
}
template<int LoadMode>
- inline const PacketScalar packet(Index row, Index col) const
+ inline const PacketScalar packet(Index rowId, Index colId) const
{
- return m_expression.template packet<LoadMode>(row, col);
+ return m_expression.template packet<LoadMode>(rowId, colId);
}
template<int LoadMode>
- inline void writePacket(Index row, Index col, const PacketScalar& x)
+ inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
{
- m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
+ m_expression.const_cast_derived().template writePacket<LoadMode>(rowId, colId, val);
}
template<int LoadMode>
@@ -107,9 +107,9 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
}
template<int LoadMode>
- inline void writePacket(Index index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& val)
{
- m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
+ m_expression.const_cast_derived().template writePacket<LoadMode>(index, val);
}
template<typename Dest>
@@ -121,6 +121,13 @@ class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
return m_expression;
}
+ /** Forwards the resizing request to the nested expression
+ * \sa DenseBase::resize(Index) */
+ void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); }
+ /** Forwards the resizing request to the nested expression
+ * \sa DenseBase::resize(Index,Index)*/
+ void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); }
+
protected:
NestedExpressionType m_expression;
};
@@ -161,29 +168,29 @@ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
typedef typename internal::nested<ExpressionType>::type NestedExpressionType;
- inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {}
+ inline MatrixWrapper(ExpressionType& a_matrix) : m_expression(a_matrix) {}
inline Index rows() const { return m_expression.rows(); }
inline Index cols() const { return m_expression.cols(); }
inline Index outerStride() const { return m_expression.outerStride(); }
inline Index innerStride() const { return m_expression.innerStride(); }
- inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
+ inline ScalarWithConstIfNotLvalue* data() { return m_expression.const_cast_derived().data(); }
inline const Scalar* data() const { return m_expression.data(); }
- inline CoeffReturnType coeff(Index row, Index col) const
+ inline CoeffReturnType coeff(Index rowId, Index colId) const
{
- return m_expression.coeff(row, col);
+ return m_expression.coeff(rowId, colId);
}
- inline Scalar& coeffRef(Index row, Index col)
+ inline Scalar& coeffRef(Index rowId, Index colId)
{
- return m_expression.const_cast_derived().coeffRef(row, col);
+ return m_expression.const_cast_derived().coeffRef(rowId, colId);
}
- inline const Scalar& coeffRef(Index row, Index col) const
+ inline const Scalar& coeffRef(Index rowId, Index colId) const
{
- return m_expression.derived().coeffRef(row, col);
+ return m_expression.derived().coeffRef(rowId, colId);
}
inline CoeffReturnType coeff(Index index) const
@@ -202,15 +209,15 @@ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
}
template<int LoadMode>
- inline const PacketScalar packet(Index row, Index col) const
+ inline const PacketScalar packet(Index rowId, Index colId) const
{
- return m_expression.template packet<LoadMode>(row, col);
+ return m_expression.template packet<LoadMode>(rowId, colId);
}
template<int LoadMode>
- inline void writePacket(Index row, Index col, const PacketScalar& x)
+ inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
{
- m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
+ m_expression.const_cast_derived().template writePacket<LoadMode>(rowId, colId, val);
}
template<int LoadMode>
@@ -220,9 +227,9 @@ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
}
template<int LoadMode>
- inline void writePacket(Index index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& val)
{
- m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
+ m_expression.const_cast_derived().template writePacket<LoadMode>(index, val);
}
const typename internal::remove_all<NestedExpressionType>::type&
@@ -231,6 +238,13 @@ class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
return m_expression;
}
+ /** Forwards the resizing request to the nested expression
+ * \sa DenseBase::resize(Index) */
+ void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); }
+ /** Forwards the resizing request to the nested expression
+ * \sa DenseBase::resize(Index,Index)*/
+ void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); }
+
protected:
NestedExpressionType m_expression;
};
diff --git a/extern/Eigen3/Eigen/src/Core/Assign.h b/extern/Eigen3/Eigen/src/Core/Assign.h
index cd29a88f0da..1dccc2f4212 100644
--- a/extern/Eigen3/Eigen/src/Core/Assign.h
+++ b/extern/Eigen3/Eigen/src/Core/Assign.h
@@ -155,7 +155,7 @@ struct assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
template<typename Derived1, typename Derived2, int Index, int Stop>
struct assign_DefaultTraversal_InnerUnrolling
{
- static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, int outer)
+ static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer)
{
dst.copyCoeffByOuterInner(outer, Index, src);
assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src, outer);
@@ -165,7 +165,7 @@ struct assign_DefaultTraversal_InnerUnrolling
template<typename Derived1, typename Derived2, int Stop>
struct assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Stop, Stop>
{
- static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, int) {}
+ static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index) {}
};
/***********************
@@ -218,7 +218,7 @@ struct assign_innervec_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
template<typename Derived1, typename Derived2, int Index, int Stop>
struct assign_innervec_InnerUnrolling
{
- static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, int outer)
+ static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer)
{
dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, Index, src);
assign_innervec_InnerUnrolling<Derived1, Derived2,
@@ -229,7 +229,7 @@ struct assign_innervec_InnerUnrolling
template<typename Derived1, typename Derived2, int Stop>
struct assign_innervec_InnerUnrolling<Derived1, Derived2, Stop, Stop>
{
- static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, int) {}
+ static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index) {}
};
/***************************************************************************
@@ -507,19 +507,19 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>
namespace internal {
template<typename Derived, typename OtherDerived,
- bool EvalBeforeAssigning = (int(OtherDerived::Flags) & EvalBeforeAssigningBit) != 0,
- bool NeedToTranspose = Derived::IsVectorAtCompileTime
- && OtherDerived::IsVectorAtCompileTime
- && ((int(Derived::RowsAtCompileTime) == 1 && int(OtherDerived::ColsAtCompileTime) == 1)
- | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
- // revert to || as soon as not needed anymore.
- (int(Derived::ColsAtCompileTime) == 1 && int(OtherDerived::RowsAtCompileTime) == 1))
- && int(Derived::SizeAtCompileTime) != 1>
+ bool EvalBeforeAssigning = (int(internal::traits<OtherDerived>::Flags) & EvalBeforeAssigningBit) != 0,
+ bool NeedToTranspose = ((int(Derived::RowsAtCompileTime) == 1 && int(OtherDerived::ColsAtCompileTime) == 1)
+ | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
+ // revert to || as soon as not needed anymore.
+ (int(Derived::ColsAtCompileTime) == 1 && int(OtherDerived::RowsAtCompileTime) == 1))
+ && int(Derived::SizeAtCompileTime) != 1>
struct assign_selector;
template<typename Derived, typename OtherDerived>
struct assign_selector<Derived,OtherDerived,false,false> {
static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); }
+ template<typename ActualDerived, typename ActualOtherDerived>
+ static EIGEN_STRONG_INLINE Derived& evalTo(ActualDerived& dst, const ActualOtherDerived& other) { other.evalTo(dst); return dst; }
};
template<typename Derived, typename OtherDerived>
struct assign_selector<Derived,OtherDerived,true,false> {
@@ -528,6 +528,8 @@ struct assign_selector<Derived,OtherDerived,true,false> {
template<typename Derived, typename OtherDerived>
struct assign_selector<Derived,OtherDerived,false,true> {
static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); }
+ template<typename ActualDerived, typename ActualOtherDerived>
+ static EIGEN_STRONG_INLINE Derived& evalTo(ActualDerived& dst, const ActualOtherDerived& other) { Transpose<ActualDerived> dstTrans(dst); other.evalTo(dstTrans); return dst; }
};
template<typename Derived, typename OtherDerived>
struct assign_selector<Derived,OtherDerived,true,true> {
@@ -566,16 +568,14 @@ template<typename Derived>
template <typename OtherDerived>
EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const EigenBase<OtherDerived>& other)
{
- other.derived().evalTo(derived());
- return derived();
+ return internal::assign_selector<Derived,OtherDerived,false>::evalTo(derived(), other.derived());
}
template<typename Derived>
template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
{
- other.evalTo(derived());
- return derived();
+ return internal::assign_selector<Derived,OtherDerived,false>::evalTo(derived(), other.derived());
}
} // end namespace Eigen
diff --git a/extern/Eigen3/Eigen/src/Core/Assign_MKL.h b/extern/Eigen3/Eigen/src/Core/Assign_MKL.h
index 428c6367b92..7772951b915 100644
--- a/extern/Eigen3/Eigen/src/Core/Assign_MKL.h
+++ b/extern/Eigen3/Eigen/src/Core/Assign_MKL.h
@@ -210,7 +210,7 @@ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sqrt, Sqrt)
EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr)
// The vm*powx functions are not avaibale in the windows version of MKL.
-#ifdef _WIN32
+#ifndef _WIN32
EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmspowx_, float, float)
EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdpowx_, double, double)
EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcpowx_, scomplex, MKL_Complex8)
diff --git a/extern/Eigen3/Eigen/src/Core/Block.h b/extern/Eigen3/Eigen/src/Core/Block.h
index 5f29cb3d1b3..358b3188b38 100644
--- a/extern/Eigen3/Eigen/src/Core/Block.h
+++ b/extern/Eigen3/Eigen/src/Core/Block.h
@@ -21,7 +21,6 @@ namespace Eigen {
* \param XprType the type of the expression in which we are taking a block
* \param BlockRows the number of rows of the block we are taking at compile time (optional)
* \param BlockCols the number of columns of the block we are taking at compile time (optional)
- * \param _DirectAccessStatus \internal used for partial specialization
*
* This class represents an expression of either a fixed-size or dynamic-size block. It is the return
* type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
@@ -47,8 +46,8 @@ namespace Eigen {
*/
namespace internal {
-template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess>
-struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> > : traits<XprType>
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprType>
{
typedef typename traits<XprType>::Scalar Scalar;
typedef typename traits<XprType>::StorageKind StorageKind;
@@ -92,30 +91,27 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess>
Flags = Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit
};
};
-}
-
-template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class Block
- : public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> >::type
-{
- public:
- typedef typename internal::dense_xpr_base<Block>::type Base;
- EIGEN_DENSE_PUBLIC_INTERFACE(Block)
+template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false,
+ bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class BlockImpl_dense;
+
+} // end namespace internal
- class InnerIterator;
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, typename StorageKind> class BlockImpl;
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class Block
+ : public BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind>
+{
+ typedef BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind> Impl;
+ public:
+ //typedef typename Impl::Base Base;
+ typedef Impl Base;
+ EIGEN_GENERIC_PUBLIC_INTERFACE(Block)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
+
/** Column or Row constructor
*/
- inline Block(XprType& xpr, Index i)
- : m_xpr(xpr),
- // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
- // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
- // all other cases are invalid.
- // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
- m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
- m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
- m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
- m_blockCols(BlockCols==1 ? 1 : xpr.cols())
+ inline Block(XprType& xpr, Index i) : Impl(xpr,i)
{
eigen_assert( (i>=0) && (
((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
@@ -124,50 +120,109 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
/** Fixed-size constructor
*/
- inline Block(XprType& xpr, Index startRow, Index startCol)
- : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
- m_blockRows(BlockRows), m_blockCols(BlockCols)
+ inline Block(XprType& xpr, Index a_startRow, Index a_startCol)
+ : Impl(xpr, a_startRow, a_startCol)
{
EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
- eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows()
- && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols());
+ eigen_assert(a_startRow >= 0 && BlockRows >= 1 && a_startRow + BlockRows <= xpr.rows()
+ && a_startCol >= 0 && BlockCols >= 1 && a_startCol + BlockCols <= xpr.cols());
}
/** Dynamic-size constructor
*/
inline Block(XprType& xpr,
- Index startRow, Index startCol,
+ Index a_startRow, Index a_startCol,
Index blockRows, Index blockCols)
- : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
- m_blockRows(blockRows), m_blockCols(blockCols)
+ : Impl(xpr, a_startRow, a_startCol, blockRows, blockCols)
{
eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
&& (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
- eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows()
- && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols());
+ eigen_assert(a_startRow >= 0 && blockRows >= 0 && a_startRow <= xpr.rows() - blockRows
+ && a_startCol >= 0 && blockCols >= 0 && a_startCol <= xpr.cols() - blockCols);
}
+};
+
+// The generic default implementation for dense block simplu forward to the internal::BlockImpl_dense
+// that must be specialized for direct and non-direct access...
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+class BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense>
+ : public internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel>
+{
+ typedef internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> Impl;
+ typedef typename XprType::Index Index;
+ public:
+ typedef Impl Base;
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
+ inline BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {}
+ inline BlockImpl(XprType& xpr, Index a_startRow, Index a_startCol) : Impl(xpr, a_startRow, a_startCol) {}
+ inline BlockImpl(XprType& xpr, Index a_startRow, Index a_startCol, Index blockRows, Index blockCols)
+ : Impl(xpr, a_startRow, a_startCol, blockRows, blockCols) {}
+};
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
+namespace internal {
+
+/** \internal Internal implementation of dense Blocks in the general case. */
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class BlockImpl_dense
+ : public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel> >::type
+{
+ typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
+ public:
+
+ typedef typename internal::dense_xpr_base<BlockType>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(BlockType)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense)
+
+ class InnerIterator;
+
+ /** Column or Row constructor
+ */
+ inline BlockImpl_dense(XprType& xpr, Index i)
+ : m_xpr(xpr),
+ // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
+ // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
+ // all other cases are invalid.
+ // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
+ m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
+ m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
+ m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
+ m_blockCols(BlockCols==1 ? 1 : xpr.cols())
+ {}
+
+ /** Fixed-size constructor
+ */
+ inline BlockImpl_dense(XprType& xpr, Index a_startRow, Index a_startCol)
+ : m_xpr(xpr), m_startRow(a_startRow), m_startCol(a_startCol),
+ m_blockRows(BlockRows), m_blockCols(BlockCols)
+ {}
+
+ /** Dynamic-size constructor
+ */
+ inline BlockImpl_dense(XprType& xpr,
+ Index a_startRow, Index a_startCol,
+ Index blockRows, Index blockCols)
+ : m_xpr(xpr), m_startRow(a_startRow), m_startCol(a_startCol),
+ m_blockRows(blockRows), m_blockCols(blockCols)
+ {}
inline Index rows() const { return m_blockRows.value(); }
inline Index cols() const { return m_blockCols.value(); }
- inline Scalar& coeffRef(Index row, Index col)
+ inline Scalar& coeffRef(Index rowId, Index colId)
{
EIGEN_STATIC_ASSERT_LVALUE(XprType)
return m_xpr.const_cast_derived()
- .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+ .coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
}
- inline const Scalar& coeffRef(Index row, Index col) const
+ inline const Scalar& coeffRef(Index rowId, Index colId) const
{
return m_xpr.derived()
- .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+ .coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
}
- EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const
+ EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const
{
- return m_xpr.coeff(row + m_startRow.value(), col + m_startCol.value());
+ return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value());
}
inline Scalar& coeffRef(Index index)
@@ -193,17 +248,17 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
}
template<int LoadMode>
- inline PacketScalar packet(Index row, Index col) const
+ inline PacketScalar packet(Index rowId, Index colId) const
{
return m_xpr.template packet<Unaligned>
- (row + m_startRow.value(), col + m_startCol.value());
+ (rowId + m_startRow.value(), colId + m_startCol.value());
}
template<int LoadMode>
- inline void writePacket(Index row, Index col, const PacketScalar& x)
+ inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
{
m_xpr.const_cast_derived().template writePacket<Unaligned>
- (row + m_startRow.value(), col + m_startCol.value(), x);
+ (rowId + m_startRow.value(), colId + m_startCol.value(), val);
}
template<int LoadMode>
@@ -215,11 +270,11 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
}
template<int LoadMode>
- inline void writePacket(Index index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& val)
{
m_xpr.const_cast_derived().template writePacket<Unaligned>
(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), x);
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val);
}
#ifdef EIGEN_PARSED_BY_DOXYGEN
@@ -253,21 +308,21 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
};
-/** \internal */
+/** \internal Internal implementation of dense Blocks in the direct access case.*/
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
-class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
- : public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel, true> >
+class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
+ : public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel> >
{
+ typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
public:
- typedef MapBase<Block> Base;
- EIGEN_DENSE_PUBLIC_INTERFACE(Block)
-
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
+ typedef MapBase<BlockType> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(BlockType)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense)
/** Column or Row constructor
*/
- inline Block(XprType& xpr, Index i)
+ inline BlockImpl_dense(XprType& xpr, Index i)
: Base(internal::const_cast_ptr(&xpr.coeffRef(
(BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0,
(BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)),
@@ -275,34 +330,25 @@ class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
BlockCols==1 ? 1 : xpr.cols()),
m_xpr(xpr)
{
- eigen_assert( (i>=0) && (
- ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
- ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));
init();
}
/** Fixed-size constructor
*/
- inline Block(XprType& xpr, Index startRow, Index startCol)
+ inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
: Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol))), m_xpr(xpr)
{
- eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows()
- && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols());
init();
}
/** Dynamic-size constructor
*/
- inline Block(XprType& xpr,
+ inline BlockImpl_dense(XprType& xpr,
Index startRow, Index startCol,
Index blockRows, Index blockCols)
: Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol)), blockRows, blockCols),
m_xpr(xpr)
{
- eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
- && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
- eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows()
- && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols());
init();
}
@@ -314,7 +360,7 @@ class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
/** \sa MapBase::innerStride() */
inline Index innerStride() const
{
- return internal::traits<Block>::HasSameStorageOrderAsXprType
+ return internal::traits<BlockType>::HasSameStorageOrderAsXprType
? m_xpr.innerStride()
: m_xpr.outerStride();
}
@@ -333,7 +379,7 @@ class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal used by allowAligned() */
- inline Block(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
+ inline BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
: Base(data, blockRows, blockCols), m_xpr(xpr)
{
init();
@@ -343,7 +389,7 @@ class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
protected:
void init()
{
- m_outerStride = internal::traits<Block>::HasSameStorageOrderAsXprType
+ m_outerStride = internal::traits<BlockType>::HasSameStorageOrderAsXprType
? m_xpr.outerStride()
: m_xpr.innerStride();
}
@@ -352,6 +398,8 @@ class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
Index m_outerStride;
};
+} // end namespace internal
+
} // end namespace Eigen
#endif // EIGEN_BLOCK_H
diff --git a/extern/Eigen3/Eigen/src/Core/BooleanRedux.h b/extern/Eigen3/Eigen/src/Core/BooleanRedux.h
index 57efd8e6953..be9f48a8c71 100644
--- a/extern/Eigen3/Eigen/src/Core/BooleanRedux.h
+++ b/extern/Eigen3/Eigen/src/Core/BooleanRedux.h
@@ -29,9 +29,9 @@ struct all_unroller
};
template<typename Derived>
-struct all_unroller<Derived, 1>
+struct all_unroller<Derived, 0>
{
- static inline bool run(const Derived &mat) { return mat.coeff(0, 0); }
+ static inline bool run(const Derived &/*mat*/) { return true; }
};
template<typename Derived>
@@ -55,9 +55,9 @@ struct any_unroller
};
template<typename Derived>
-struct any_unroller<Derived, 1>
+struct any_unroller<Derived, 0>
{
- static inline bool run(const Derived &mat) { return mat.coeff(0, 0); }
+ static inline bool run(const Derived & /*mat*/) { return false; }
};
template<typename Derived>
@@ -85,9 +85,7 @@ inline bool DenseBase<Derived>::all() const
&& SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
};
if(unroll)
- return internal::all_unroller<Derived,
- unroll ? int(SizeAtCompileTime) : Dynamic
- >::run(derived());
+ return internal::all_unroller<Derived, unroll ? int(SizeAtCompileTime) : Dynamic>::run(derived());
else
{
for(Index j = 0; j < cols(); ++j)
@@ -111,9 +109,7 @@ inline bool DenseBase<Derived>::any() const
&& SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
};
if(unroll)
- return internal::any_unroller<Derived,
- unroll ? int(SizeAtCompileTime) : Dynamic
- >::run(derived());
+ return internal::any_unroller<Derived, unroll ? int(SizeAtCompileTime) : Dynamic>::run(derived());
else
{
for(Index j = 0; j < cols(); ++j)
@@ -133,6 +129,26 @@ inline typename DenseBase<Derived>::Index DenseBase<Derived>::count() const
return derived().template cast<bool>().template cast<Index>().sum();
}
+/** \returns true is \c *this contains at least one Not A Number (NaN).
+ *
+ * \sa allFinite()
+ */
+template<typename Derived>
+inline bool DenseBase<Derived>::hasNaN() const
+{
+ return !((derived().array()==derived().array()).all());
+}
+
+/** \returns true if \c *this contains only finite numbers, i.e., no NaN and no +/-INF values.
+ *
+ * \sa hasNaN()
+ */
+template<typename Derived>
+inline bool DenseBase<Derived>::allFinite() const
+{
+ return !((derived()-derived()).hasNaN());
+}
+
} // end namespace Eigen
#endif // EIGEN_ALLANDANY_H
diff --git a/extern/Eigen3/Eigen/src/Core/CommaInitializer.h b/extern/Eigen3/Eigen/src/Core/CommaInitializer.h
index 4adce64143c..a96867af4d5 100644
--- a/extern/Eigen3/Eigen/src/Core/CommaInitializer.h
+++ b/extern/Eigen3/Eigen/src/Core/CommaInitializer.h
@@ -65,6 +65,8 @@ struct CommaInitializer
template<typename OtherDerived>
CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
{
+ if(other.cols()==0 || other.rows()==0)
+ return *this;
if (m_col==m_xpr.cols())
{
m_row+=m_currentBlockRows;
@@ -116,6 +118,8 @@ struct CommaInitializer
*
* Example: \include MatrixBase_set.cpp
* Output: \verbinclude MatrixBase_set.out
+ *
+ * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
*
* \sa CommaInitializer::finished(), class CommaInitializer
*/
diff --git a/extern/Eigen3/Eigen/src/Core/CoreIterators.h b/extern/Eigen3/Eigen/src/Core/CoreIterators.h
new file mode 100644
index 00000000000..6da4683d2c2
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/CoreIterators.h
@@ -0,0 +1,61 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COREITERATORS_H
+#define EIGEN_COREITERATORS_H
+
+namespace Eigen {
+
+/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
+ */
+
+/** \ingroup SparseCore_Module
+ * \class InnerIterator
+ * \brief An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression
+ *
+ * todo
+ */
+
+// generic version for dense matrix and expressions
+template<typename Derived> class DenseBase<Derived>::InnerIterator
+{
+ protected:
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+
+ enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit };
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, Index outer)
+ : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.innerSize())
+ {}
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ {
+ return (IsRowMajor) ? m_expression.coeff(m_outer, m_inner)
+ : m_expression.coeff(m_inner, m_outer);
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_inner; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
+
+ protected:
+ const Derived& m_expression;
+ Index m_inner;
+ const Index m_outer;
+ const Index m_end;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_COREITERATORS_H
diff --git a/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h b/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h
index 1b93af31b60..586f77aaf32 100644
--- a/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h
+++ b/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h
@@ -94,8 +94,8 @@ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
// add together a float matrix and a double matrix.
#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
- EIGEN_STATIC_ASSERT((internal::functor_allows_mixing_real_and_complex<BINOP>::ret \
- ? int(internal::is_same<typename NumTraits<LHS>::Real, typename NumTraits<RHS>::Real>::value) \
+ EIGEN_STATIC_ASSERT((internal::functor_is_product_like<BINOP>::ret \
+ ? int(internal::scalar_product_traits<LHS, RHS>::Defined) \
: int(internal::is_same<LHS, RHS>::value)), \
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
@@ -122,13 +122,13 @@ class CwiseBinaryOp : internal::no_assignment_operator,
typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
typedef typename internal::remove_reference<RhsNested>::type _RhsNested;
- EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp())
- : m_lhs(lhs), m_rhs(rhs), m_functor(func)
+ EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp())
+ : m_lhs(aLhs), m_rhs(aRhs), m_functor(func)
{
EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar);
// require the sizes to match
EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
- eigen_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols());
+ eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols());
}
EIGEN_STRONG_INLINE Index rows() const {
@@ -169,17 +169,17 @@ class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Dense>
typedef typename internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
- EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const
{
- return derived().functor()(derived().lhs().coeff(row, col),
- derived().rhs().coeff(row, col));
+ return derived().functor()(derived().lhs().coeff(rowId, colId),
+ derived().rhs().coeff(rowId, colId));
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
{
- return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(row, col),
- derived().rhs().template packet<LoadMode>(row, col));
+ return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(rowId, colId),
+ derived().rhs().template packet<LoadMode>(rowId, colId));
}
EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
diff --git a/extern/Eigen3/Eigen/src/Core/CwiseNullaryOp.h b/extern/Eigen3/Eigen/src/Core/CwiseNullaryOp.h
index 2635a62b07b..a93bab2d0f9 100644
--- a/extern/Eigen3/Eigen/src/Core/CwiseNullaryOp.h
+++ b/extern/Eigen3/Eigen/src/Core/CwiseNullaryOp.h
@@ -54,27 +54,27 @@ class CwiseNullaryOp : internal::no_assignment_operator,
typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
- CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp())
- : m_rows(rows), m_cols(cols), m_functor(func)
+ CwiseNullaryOp(Index nbRows, Index nbCols, const NullaryOp& func = NullaryOp())
+ : m_rows(nbRows), m_cols(nbCols), m_functor(func)
{
- eigen_assert(rows >= 0
- && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
- && cols >= 0
- && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
+ eigen_assert(nbRows >= 0
+ && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == nbRows)
+ && nbCols >= 0
+ && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == nbCols));
}
EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }
EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }
- EIGEN_STRONG_INLINE const Scalar coeff(Index rows, Index cols) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const
{
- return m_functor(rows, cols);
+ return m_functor(rowId, colId);
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
{
- return m_functor.packetOp(row, col);
+ return m_functor.packetOp(rowId, colId);
}
EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
@@ -163,11 +163,11 @@ DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
/** \returns an expression of a constant matrix of value \a value
*
- * The parameters \a rows and \a cols are the number of rows and of columns of
+ * The parameters \a nbRows and \a nbCols are the number of rows and of columns of
* the returned matrix. Must be compatible with this DenseBase type.
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
- * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
+ * it is redundant to pass \a nbRows and \a nbCols as arguments, so Zero() should be used
* instead.
*
* The template parameter \a CustomNullaryOp is the type of the functor.
@@ -176,9 +176,9 @@ DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
+DenseBase<Derived>::Constant(Index nbRows, Index nbCols, const Scalar& value)
{
- return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value));
+ return DenseBase<Derived>::NullaryExpr(nbRows, nbCols, internal::scalar_constant_op<Scalar>(value));
}
/** \returns an expression of a constant matrix of value \a value
@@ -292,14 +292,14 @@ DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,true>(low,high,Derived::SizeAtCompileTime));
}
-/** \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
+/** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */
template<typename Derived>
bool DenseBase<Derived>::isApproxToConstant
-(const Scalar& value, RealScalar prec) const
+(const Scalar& val, const RealScalar& prec) const
{
for(Index j = 0; j < cols(); ++j)
for(Index i = 0; i < rows(); ++i)
- if(!internal::isApprox(this->coeff(i, j), value, prec))
+ if(!internal::isApprox(this->coeff(i, j), val, prec))
return false;
return true;
}
@@ -309,19 +309,19 @@ bool DenseBase<Derived>::isApproxToConstant
* \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
template<typename Derived>
bool DenseBase<Derived>::isConstant
-(const Scalar& value, RealScalar prec) const
+(const Scalar& val, const RealScalar& prec) const
{
- return isApproxToConstant(value, prec);
+ return isApproxToConstant(val, prec);
}
-/** Alias for setConstant(): sets all coefficients in this expression to \a value.
+/** Alias for setConstant(): sets all coefficients in this expression to \a val.
*
* \sa setConstant(), Constant(), class CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& value)
+EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val)
{
- setConstant(value);
+ setConstant(val);
}
/** Sets all coefficients in this expression to \a value.
@@ -329,9 +329,9 @@ EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& value)
* \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value)
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val)
{
- return derived() = Constant(rows(), cols(), value);
+ return derived() = Constant(rows(), cols(), val);
}
/** Resizes to the given \a size, and sets all coefficients in this expression to the given \a value.
@@ -345,17 +345,17 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setConstant(Index size, const Scalar& value)
+PlainObjectBase<Derived>::setConstant(Index size, const Scalar& val)
{
resize(size);
- return setConstant(value);
+ return setConstant(val);
}
/** Resizes to the given size, and sets all coefficients in this expression to the given \a value.
*
- * \param rows the new number of rows
- * \param cols the new number of columns
- * \param value the value to which all coefficients are set
+ * \param nbRows the new number of rows
+ * \param nbCols the new number of columns
+ * \param val the value to which all coefficients are set
*
* Example: \include Matrix_setConstant_int_int.cpp
* Output: \verbinclude Matrix_setConstant_int_int.out
@@ -364,10 +364,10 @@ PlainObjectBase<Derived>::setConstant(Index size, const Scalar& value)
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& value)
+PlainObjectBase<Derived>::setConstant(Index nbRows, Index nbCols, const Scalar& val)
{
- resize(rows, cols);
- return setConstant(value);
+ resize(nbRows, nbCols);
+ return setConstant(val);
}
/**
@@ -384,10 +384,10 @@ PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& valu
* \sa CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index size, const Scalar& low, const Scalar& high)
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return derived() = Derived::NullaryExpr(size, internal::linspaced_op<Scalar,false>(low,high,size));
+ return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,false>(low,high,newSize));
}
/**
@@ -425,9 +425,9 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low,
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Zero(Index rows, Index cols)
+DenseBase<Derived>::Zero(Index nbRows, Index nbCols)
{
- return Constant(rows, cols, Scalar(0));
+ return Constant(nbRows, nbCols, Scalar(0));
}
/** \returns an expression of a zero vector.
@@ -479,7 +479,7 @@ DenseBase<Derived>::Zero()
* \sa class CwiseNullaryOp, Zero()
*/
template<typename Derived>
-bool DenseBase<Derived>::isZero(RealScalar prec) const
+bool DenseBase<Derived>::isZero(const RealScalar& prec) const
{
for(Index j = 0; j < cols(); ++j)
for(Index i = 0; i < rows(); ++i)
@@ -512,16 +512,16 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setZero(Index size)
+PlainObjectBase<Derived>::setZero(Index newSize)
{
- resize(size);
+ resize(newSize);
return setConstant(Scalar(0));
}
/** Resizes to the given size, and sets all coefficients in this expression to zero.
*
- * \param rows the new number of rows
- * \param cols the new number of columns
+ * \param nbRows the new number of rows
+ * \param nbCols the new number of columns
*
* Example: \include Matrix_setZero_int_int.cpp
* Output: \verbinclude Matrix_setZero_int_int.out
@@ -530,9 +530,9 @@ PlainObjectBase<Derived>::setZero(Index size)
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setZero(Index rows, Index cols)
+PlainObjectBase<Derived>::setZero(Index nbRows, Index nbCols)
{
- resize(rows, cols);
+ resize(nbRows, nbCols);
return setConstant(Scalar(0));
}
@@ -540,7 +540,7 @@ PlainObjectBase<Derived>::setZero(Index rows, Index cols)
/** \returns an expression of a matrix where all coefficients equal one.
*
- * The parameters \a rows and \a cols are the number of rows and of columns of
+ * The parameters \a nbRows and \a nbCols are the number of rows and of columns of
* the returned matrix. Must be compatible with this MatrixBase type.
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
@@ -554,14 +554,14 @@ PlainObjectBase<Derived>::setZero(Index rows, Index cols)
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Ones(Index rows, Index cols)
+DenseBase<Derived>::Ones(Index nbRows, Index nbCols)
{
- return Constant(rows, cols, Scalar(1));
+ return Constant(nbRows, nbCols, Scalar(1));
}
/** \returns an expression of a vector where all coefficients equal one.
*
- * The parameter \a size is the size of the returned vector.
+ * The parameter \a newSize is the size of the returned vector.
* Must be compatible with this MatrixBase type.
*
* \only_for_vectors
@@ -577,9 +577,9 @@ DenseBase<Derived>::Ones(Index rows, Index cols)
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
-DenseBase<Derived>::Ones(Index size)
+DenseBase<Derived>::Ones(Index newSize)
{
- return Constant(size, Scalar(1));
+ return Constant(newSize, Scalar(1));
}
/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one.
@@ -609,7 +609,7 @@ DenseBase<Derived>::Ones()
*/
template<typename Derived>
bool DenseBase<Derived>::isOnes
-(RealScalar prec) const
+(const RealScalar& prec) const
{
return isApproxToConstant(Scalar(1), prec);
}
@@ -627,7 +627,7 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
return setConstant(Scalar(1));
}
-/** Resizes to the given \a size, and sets all coefficients in this expression to one.
+/** Resizes to the given \a newSize, and sets all coefficients in this expression to one.
*
* \only_for_vectors
*
@@ -638,16 +638,16 @@ EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setOnes(Index size)
+PlainObjectBase<Derived>::setOnes(Index newSize)
{
- resize(size);
+ resize(newSize);
return setConstant(Scalar(1));
}
/** Resizes to the given size, and sets all coefficients in this expression to one.
*
- * \param rows the new number of rows
- * \param cols the new number of columns
+ * \param nbRows the new number of rows
+ * \param nbCols the new number of columns
*
* Example: \include Matrix_setOnes_int_int.cpp
* Output: \verbinclude Matrix_setOnes_int_int.out
@@ -656,9 +656,9 @@ PlainObjectBase<Derived>::setOnes(Index size)
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setOnes(Index rows, Index cols)
+PlainObjectBase<Derived>::setOnes(Index nbRows, Index nbCols)
{
- resize(rows, cols);
+ resize(nbRows, nbCols);
return setConstant(Scalar(1));
}
@@ -666,7 +666,7 @@ PlainObjectBase<Derived>::setOnes(Index rows, Index cols)
/** \returns an expression of the identity matrix (not necessarily square).
*
- * The parameters \a rows and \a cols are the number of rows and of columns of
+ * The parameters \a nbRows and \a nbCols are the number of rows and of columns of
* the returned matrix. Must be compatible with this MatrixBase type.
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
@@ -680,9 +680,9 @@ PlainObjectBase<Derived>::setOnes(Index rows, Index cols)
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
-MatrixBase<Derived>::Identity(Index rows, Index cols)
+MatrixBase<Derived>::Identity(Index nbRows, Index nbCols)
{
- return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_identity_op<Scalar>());
+ return DenseBase<Derived>::NullaryExpr(nbRows, nbCols, internal::scalar_identity_op<Scalar>());
}
/** \returns an expression of the identity matrix (not necessarily square).
@@ -714,7 +714,7 @@ MatrixBase<Derived>::Identity()
*/
template<typename Derived>
bool MatrixBase<Derived>::isIdentity
-(RealScalar prec) const
+(const RealScalar& prec) const
{
for(Index j = 0; j < cols(); ++j)
{
@@ -776,8 +776,8 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
/** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this.
*
- * \param rows the new number of rows
- * \param cols the new number of columns
+ * \param nbRows the new number of rows
+ * \param nbCols the new number of columns
*
* Example: \include Matrix_setIdentity_int_int.cpp
* Output: \verbinclude Matrix_setIdentity_int_int.out
@@ -785,9 +785,9 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols)
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index nbRows, Index nbCols)
{
- derived().resize(rows, cols);
+ derived().resize(nbRows, nbCols);
return setIdentity();
}
@@ -798,10 +798,10 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index
* \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index size, Index i)
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index newSize, Index i)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return BasisReturnType(SquareMatrixType::Identity(size,size), i);
+ return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i);
}
/** \returns an expression of the i-th unit (basis) vector.
diff --git a/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h b/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h
index 063355ae521..f2de749f92b 100644
--- a/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h
+++ b/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h
@@ -98,15 +98,15 @@ class CwiseUnaryOpImpl<UnaryOp,XprType,Dense>
typedef typename internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
- EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const
{
- return derived().functor()(derived().nestedExpression().coeff(row, col));
+ return derived().functor()(derived().nestedExpression().coeff(rowId, colId));
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
{
- return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(row, col));
+ return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(rowId, colId));
}
EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
diff --git a/extern/Eigen3/Eigen/src/Core/CwiseUnaryView.h b/extern/Eigen3/Eigen/src/Core/CwiseUnaryView.h
index 66f73a9505b..b2638d3265b 100644
--- a/extern/Eigen3/Eigen/src/Core/CwiseUnaryView.h
+++ b/extern/Eigen3/Eigen/src/Core/CwiseUnaryView.h
@@ -44,9 +44,10 @@ struct traits<CwiseUnaryView<ViewOp, MatrixType> >
// "error: no integral type can represent all of the enumerator values
InnerStrideAtCompileTime = MatrixTypeInnerStride == Dynamic
? int(Dynamic)
- : int(MatrixTypeInnerStride)
- * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)),
- OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret
+ : int(MatrixTypeInnerStride) * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)),
+ OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret == Dynamic
+ ? int(Dynamic)
+ : outer_stride_at_compile_time<MatrixType>::ret * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar))
};
};
}
@@ -55,8 +56,7 @@ template<typename ViewOp, typename MatrixType, typename StorageKind>
class CwiseUnaryViewImpl;
template<typename ViewOp, typename MatrixType>
-class CwiseUnaryView : internal::no_assignment_operator,
- public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind>
+class CwiseUnaryView : public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind>
{
public:
@@ -98,6 +98,10 @@ class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
typedef typename internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl)
+
+ inline Scalar* data() { return &coeffRef(0); }
+ inline const Scalar* data() const { return &coeff(0); }
inline Index innerStride() const
{
@@ -106,7 +110,7 @@ class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
inline Index outerStride() const
{
- return derived().nestedExpression().outerStride();
+ return derived().nestedExpression().outerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar);
}
EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
diff --git a/extern/Eigen3/Eigen/src/Core/DenseBase.h b/extern/Eigen3/Eigen/src/Core/DenseBase.h
index 1cc0314ef0b..c5800f6c8c8 100644
--- a/extern/Eigen3/Eigen/src/Core/DenseBase.h
+++ b/extern/Eigen3/Eigen/src/Core/DenseBase.h
@@ -13,6 +13,16 @@
namespace Eigen {
+namespace internal {
+
+// The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type.
+// This dummy function simply aims at checking that at compile time.
+static inline void check_DenseIndex_is_signed() {
+ EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+}
+
+} // end namespace internal
+
/** \class DenseBase
* \ingroup Core_Module
*
@@ -204,21 +214,21 @@ template<typename Derived> class DenseBase
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
* nothing else.
*/
- void resize(Index size)
+ void resize(Index newSize)
{
- EIGEN_ONLY_USED_FOR_DEBUG(size);
- eigen_assert(size == this->size()
+ EIGEN_ONLY_USED_FOR_DEBUG(newSize);
+ eigen_assert(newSize == this->size()
&& "DenseBase::resize() does not actually allow to resize.");
}
/** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
* Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
* nothing else.
*/
- void resize(Index rows, Index cols)
+ void resize(Index nbRows, Index nbCols)
{
- EIGEN_ONLY_USED_FOR_DEBUG(rows);
- EIGEN_ONLY_USED_FOR_DEBUG(cols);
- eigen_assert(rows == this->rows() && cols == this->cols()
+ EIGEN_ONLY_USED_FOR_DEBUG(nbRows);
+ EIGEN_ONLY_USED_FOR_DEBUG(nbCols);
+ eigen_assert(nbRows == this->rows() && nbCols == this->cols()
&& "DenseBase::resize() does not actually allow to resize.");
}
@@ -271,7 +281,7 @@ template<typename Derived> class DenseBase
CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other);
Eigen::Transpose<Derived> transpose();
- typedef const Transpose<const Derived> ConstTransposeReturnType;
+ typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
ConstTransposeReturnType transpose() const;
void transposeInPlace();
#ifndef EIGEN_NO_DEBUG
@@ -281,29 +291,6 @@ template<typename Derived> class DenseBase
public:
#endif
- typedef VectorBlock<Derived> SegmentReturnType;
- typedef const VectorBlock<const Derived> ConstSegmentReturnType;
- template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };
- template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };
-
- // Note: The "DenseBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
- SegmentReturnType segment(Index start, Index size);
- typename DenseBase::ConstSegmentReturnType segment(Index start, Index size) const;
-
- SegmentReturnType head(Index size);
- typename DenseBase::ConstSegmentReturnType head(Index size) const;
-
- SegmentReturnType tail(Index size);
- typename DenseBase::ConstSegmentReturnType tail(Index size) const;
-
- template<int Size> typename FixedSegmentReturnType<Size>::Type head();
- template<int Size> typename ConstFixedSegmentReturnType<Size>::Type head() const;
-
- template<int Size> typename FixedSegmentReturnType<Size>::Type tail();
- template<int Size> typename ConstFixedSegmentReturnType<Size>::Type tail() const;
-
- template<int Size> typename FixedSegmentReturnType<Size>::Type segment(Index start);
- template<int Size> typename ConstFixedSegmentReturnType<Size>::Type segment(Index start) const;
static const ConstantReturnType
Constant(Index rows, Index cols, const Scalar& value);
@@ -348,17 +335,20 @@ template<typename Derived> class DenseBase
template<typename OtherDerived>
bool isApprox(const DenseBase<OtherDerived>& other,
- RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
bool isMuchSmallerThan(const RealScalar& other,
- RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
template<typename OtherDerived>
bool isMuchSmallerThan(const DenseBase<OtherDerived>& other,
- RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
- bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
- bool isConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
- bool isZero(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
- bool isOnes(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isZero(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isOnes(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+
+ inline bool hasNaN() const;
+ inline bool allFinite() const;
inline Derived& operator*=(const Scalar& other);
inline Derived& operator/=(const Scalar& other);
@@ -438,8 +428,6 @@ template<typename Derived> class DenseBase
return derived().coeff(0,0);
}
-/////////// Array module ///////////
-
bool all(void) const;
bool any(void) const;
Index count() const;
@@ -465,11 +453,11 @@ template<typename Derived> class DenseBase
template<typename ThenDerived>
inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
- select(const DenseBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const;
+ select(const DenseBase<ThenDerived>& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const;
template<typename ElseDerived>
inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
- select(typename ElseDerived::Scalar thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;
+ select(const typename ElseDerived::Scalar& thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;
template<int p> RealScalar lpNorm() const;
diff --git a/extern/Eigen3/Eigen/src/Core/DenseCoeffsBase.h b/extern/Eigen3/Eigen/src/Core/DenseCoeffsBase.h
index 72704c2d79f..3c890f21590 100644
--- a/extern/Eigen3/Eigen/src/Core/DenseCoeffsBase.h
+++ b/extern/Eigen3/Eigen/src/Core/DenseCoeffsBase.h
@@ -427,22 +427,22 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
template<int StoreMode>
EIGEN_STRONG_INLINE void writePacket
- (Index row, Index col, const typename internal::packet_traits<Scalar>::type& x)
+ (Index row, Index col, const typename internal::packet_traits<Scalar>::type& val)
{
eigen_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
- derived().template writePacket<StoreMode>(row,col,x);
+ derived().template writePacket<StoreMode>(row,col,val);
}
/** \internal */
template<int StoreMode>
EIGEN_STRONG_INLINE void writePacketByOuterInner
- (Index outer, Index inner, const typename internal::packet_traits<Scalar>::type& x)
+ (Index outer, Index inner, const typename internal::packet_traits<Scalar>::type& val)
{
writePacket<StoreMode>(rowIndexByOuterInner(outer, inner),
colIndexByOuterInner(outer, inner),
- x);
+ val);
}
/** \internal
@@ -456,10 +456,10 @@ class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived,
*/
template<int StoreMode>
EIGEN_STRONG_INLINE void writePacket
- (Index index, const typename internal::packet_traits<Scalar>::type& x)
+ (Index index, const typename internal::packet_traits<Scalar>::type& val)
{
eigen_internal_assert(index >= 0 && index < size());
- derived().template writePacket<StoreMode>(index,x);
+ derived().template writePacket<StoreMode>(index,val);
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
diff --git a/extern/Eigen3/Eigen/src/Core/DenseStorage.h b/extern/Eigen3/Eigen/src/Core/DenseStorage.h
index 1fc2daf2c40..3e7f9c1b7a7 100644
--- a/extern/Eigen3/Eigen/src/Core/DenseStorage.h
+++ b/extern/Eigen3/Eigen/src/Core/DenseStorage.h
@@ -35,17 +35,36 @@ template <typename T, int Size, int MatrixOrArrayOptions,
struct plain_array
{
T array[Size];
- plain_array() {}
- plain_array(constructor_without_unaligned_array_assert) {}
+
+ plain_array()
+ {
+ EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+ }
+
+ plain_array(constructor_without_unaligned_array_assert)
+ {
+ EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+ }
};
-#ifdef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
+#elif EIGEN_GNUC_AT_LEAST(4,7)
+ // GCC 4.7 is too aggressive in its optimizations and remove the alignement test based on the fact the array is declared to be aligned.
+ // See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900
+ // Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined:
+ template<typename PtrType>
+ EIGEN_ALWAYS_INLINE PtrType eigen_unaligned_array_assert_workaround_gcc47(PtrType array) { return array; }
+ #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
+ eigen_assert((reinterpret_cast<size_t>(eigen_unaligned_array_assert_workaround_gcc47(array)) & sizemask) == 0 \
+ && "this assertion is explained here: " \
+ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \
+ " **** READ THIS WEB PAGE !!! ****");
#else
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
eigen_assert((reinterpret_cast<size_t>(array) & sizemask) == 0 \
&& "this assertion is explained here: " \
- "http://eigen.tuxfamily.org/dox-devel/TopicUnalignedArrayAssert.html" \
+ "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \
" **** READ THIS WEB PAGE !!! ****");
#endif
@@ -53,8 +72,17 @@ template <typename T, int Size, int MatrixOrArrayOptions>
struct plain_array<T, Size, MatrixOrArrayOptions, 16>
{
EIGEN_USER_ALIGN16 T array[Size];
- plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf) }
- plain_array(constructor_without_unaligned_array_assert) {}
+
+ plain_array()
+ {
+ EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf);
+ EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+ }
+
+ plain_array(constructor_without_unaligned_array_assert)
+ {
+ EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+ }
};
template <typename T, int MatrixOrArrayOptions, int Alignment>
@@ -86,7 +114,7 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
{
internal::plain_array<T,Size,_Options> m_data;
public:
- inline explicit DenseStorage() {}
+ inline DenseStorage() {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()) {}
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
@@ -103,7 +131,7 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
{
public:
- inline explicit DenseStorage() {}
+ inline DenseStorage() {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
inline void swap(DenseStorage& ) {}
@@ -132,16 +160,16 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
DenseIndex m_rows;
DenseIndex m_cols;
public:
- inline explicit DenseStorage() : m_rows(0), m_cols(0) {}
+ inline DenseStorage() : m_rows(0), m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
- inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {}
+ inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
inline void swap(DenseStorage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
- inline DenseIndex rows(void) const {return m_rows;}
- inline DenseIndex cols(void) const {return m_cols;}
- inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
- inline void resize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
+ inline DenseIndex rows() const {return m_rows;}
+ inline DenseIndex cols() const {return m_cols;}
+ inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
+ inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
};
@@ -152,15 +180,15 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_rows;
public:
- inline explicit DenseStorage() : m_rows(0) {}
+ inline DenseStorage() : m_rows(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
- inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {}
+ inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
inline DenseIndex rows(void) const {return m_rows;}
inline DenseIndex cols(void) const {return _Cols;}
- inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
- inline void resize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
+ inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
+ inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
};
@@ -171,15 +199,15 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_cols;
public:
- inline explicit DenseStorage() : m_cols(0) {}
+ inline DenseStorage() : m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
- inline DenseStorage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {}
+ inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
inline DenseIndex rows(void) const {return _Rows;}
inline DenseIndex cols(void) const {return m_cols;}
- inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
- inline void resize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
+ inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
+ inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
inline const T *data() const { return m_data.array; }
inline T *data() { return m_data.array; }
};
@@ -191,24 +219,24 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
DenseIndex m_rows;
DenseIndex m_cols;
public:
- inline explicit DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
+ inline DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(0), m_rows(0), m_cols(0) {}
- inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex cols)
- : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols)
+ inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
+ : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
inline void swap(DenseStorage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
inline DenseIndex rows(void) const {return m_rows;}
inline DenseIndex cols(void) const {return m_cols;}
- inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols)
+ inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
- m_rows = rows;
- m_cols = cols;
+ m_rows = nbRows;
+ m_cols = nbCols;
}
- void resize(DenseIndex size, DenseIndex rows, DenseIndex cols)
+ void resize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
{
if(size != m_rows*m_cols)
{
@@ -219,8 +247,8 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
m_data = 0;
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
}
- m_rows = rows;
- m_cols = cols;
+ m_rows = nbRows;
+ m_cols = nbCols;
}
inline const T *data() const { return m_data; }
inline T *data() { return m_data; }
@@ -232,20 +260,20 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
T *m_data;
DenseIndex m_cols;
public:
- inline explicit DenseStorage() : m_data(0), m_cols(0) {}
+ inline DenseStorage() : m_data(0), m_cols(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
- inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(cols)
+ inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
static inline DenseIndex rows(void) {return _Rows;}
inline DenseIndex cols(void) const {return m_cols;}
- inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols)
+ inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
- m_cols = cols;
+ m_cols = nbCols;
}
- EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex cols)
+ EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex nbCols)
{
if(size != _Rows*m_cols)
{
@@ -256,7 +284,7 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
m_data = 0;
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
}
- m_cols = cols;
+ m_cols = nbCols;
}
inline const T *data() const { return m_data; }
inline T *data() { return m_data; }
@@ -268,20 +296,20 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
T *m_data;
DenseIndex m_rows;
public:
- inline explicit DenseStorage() : m_data(0), m_rows(0) {}
+ inline DenseStorage() : m_data(0), m_rows(0) {}
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
- inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows)
+ inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
inline DenseIndex rows(void) const {return m_rows;}
static inline DenseIndex cols(void) {return _Cols;}
- inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex)
+ inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
- m_rows = rows;
+ m_rows = nbRows;
}
- EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex rows, DenseIndex)
+ EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex nbRows, DenseIndex)
{
if(size != m_rows*_Cols)
{
@@ -292,7 +320,7 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
m_data = 0;
EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
}
- m_rows = rows;
+ m_rows = nbRows;
}
inline const T *data() const { return m_data; }
inline T *data() { return m_data; }
diff --git a/extern/Eigen3/Eigen/src/Core/Diagonal.h b/extern/Eigen3/Eigen/src/Core/Diagonal.h
index 16261968a0f..aab8007b3fc 100644
--- a/extern/Eigen3/Eigen/src/Core/Diagonal.h
+++ b/extern/Eigen3/Eigen/src/Core/Diagonal.h
@@ -41,12 +41,12 @@ struct traits<Diagonal<MatrixType,DiagIndex> >
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
typedef typename MatrixType::StorageKind StorageKind;
enum {
- RowsAtCompileTime = (int(DiagIndex) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
- : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
- MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
+ RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
+ : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
+ MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
ColsAtCompileTime = 1,
MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
- : DiagIndex == Dynamic ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,
+ : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,
MatrixType::MaxColsAtCompileTime)
: (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
@@ -61,20 +61,21 @@ struct traits<Diagonal<MatrixType,DiagIndex> >
};
}
-template<typename MatrixType, int DiagIndex> class Diagonal
- : public internal::dense_xpr_base< Diagonal<MatrixType,DiagIndex> >::type
+template<typename MatrixType, int _DiagIndex> class Diagonal
+ : public internal::dense_xpr_base< Diagonal<MatrixType,_DiagIndex> >::type
{
public:
+ enum { DiagIndex = _DiagIndex };
typedef typename internal::dense_xpr_base<Diagonal>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
- inline Diagonal(MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {}
+ inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
inline Index rows() const
- { return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
+ { return m_index.value()<0 ? (std::min<Index>)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min<Index>)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
inline Index cols() const { return 1; }
@@ -113,20 +114,20 @@ template<typename MatrixType, int DiagIndex> class Diagonal
return m_matrix.coeff(row+rowOffset(), row+colOffset());
}
- inline Scalar& coeffRef(Index index)
+ inline Scalar& coeffRef(Index idx)
{
EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
- return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset());
+ return m_matrix.const_cast_derived().coeffRef(idx+rowOffset(), idx+colOffset());
}
- inline const Scalar& coeffRef(Index index) const
+ inline const Scalar& coeffRef(Index idx) const
{
- return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset());
+ return m_matrix.const_cast_derived().coeffRef(idx+rowOffset(), idx+colOffset());
}
- inline CoeffReturnType coeff(Index index) const
+ inline CoeffReturnType coeff(Index idx) const
{
- return m_matrix.coeff(index+rowOffset(), index+colOffset());
+ return m_matrix.coeff(idx+rowOffset(), idx+colOffset());
}
const typename internal::remove_all<typename MatrixType::Nested>::type&
@@ -142,7 +143,7 @@ template<typename MatrixType, int DiagIndex> class Diagonal
protected:
typename MatrixType::Nested m_matrix;
- const internal::variable_if_dynamic<Index, DiagIndex> m_index;
+ const internal::variable_if_dynamicindex<Index, DiagIndex> m_index;
private:
// some compilers may fail to optimize std::max etc in case of compile-time constants...
@@ -171,7 +172,7 @@ MatrixBase<Derived>::diagonal()
/** This is the const version of diagonal(). */
template<typename Derived>
-inline const typename MatrixBase<Derived>::ConstDiagonalReturnType
+inline typename MatrixBase<Derived>::ConstDiagonalReturnType
MatrixBase<Derived>::diagonal() const
{
return ConstDiagonalReturnType(derived());
@@ -189,18 +190,18 @@ MatrixBase<Derived>::diagonal() const
*
* \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived>
-inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Dynamic>::Type
+inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<DynamicIndex>::Type
MatrixBase<Derived>::diagonal(Index index)
{
- return typename DiagonalIndexReturnType<Dynamic>::Type(derived(), index);
+ return typename DiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
}
/** This is the const version of diagonal(Index). */
template<typename Derived>
-inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Dynamic>::Type
+inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<DynamicIndex>::Type
MatrixBase<Derived>::diagonal(Index index) const
{
- return typename ConstDiagonalIndexReturnType<Dynamic>::Type(derived(), index);
+ return typename ConstDiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
}
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
diff --git a/extern/Eigen3/Eigen/src/Core/DiagonalMatrix.h b/extern/Eigen3/Eigen/src/Core/DiagonalMatrix.h
index 88190da684d..e6c220f4195 100644
--- a/extern/Eigen3/Eigen/src/Core/DiagonalMatrix.h
+++ b/extern/Eigen3/Eigen/src/Core/DiagonalMatrix.h
@@ -20,6 +20,7 @@ class DiagonalBase : public EigenBase<Derived>
public:
typedef typename internal::traits<Derived>::DiagonalVectorType DiagonalVectorType;
typedef typename DiagonalVectorType::Scalar Scalar;
+ typedef typename DiagonalVectorType::RealScalar RealScalar;
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
@@ -55,9 +56,14 @@ class DiagonalBase : public EigenBase<Derived>
inline Index rows() const { return diagonal().size(); }
inline Index cols() const { return diagonal().size(); }
+ /** \returns the diagonal matrix product of \c *this by the matrix \a matrix.
+ */
template<typename MatrixDerived>
const DiagonalProduct<MatrixDerived, Derived, OnTheLeft>
- operator*(const MatrixBase<MatrixDerived> &matrix) const;
+ operator*(const MatrixBase<MatrixDerived> &matrix) const
+ {
+ return DiagonalProduct<MatrixDerived, Derived, OnTheLeft>(matrix.derived(), derived());
+ }
inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType> >
inverse() const
@@ -65,6 +71,17 @@ class DiagonalBase : public EigenBase<Derived>
return diagonal().cwiseInverse();
}
+ inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> >
+ operator*(const Scalar& scalar) const
+ {
+ return diagonal() * scalar;
+ }
+ friend inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> >
+ operator*(const Scalar& scalar, const DiagonalBase& other)
+ {
+ return other.diagonal() * scalar;
+ }
+
#ifdef EIGEN2_SUPPORT
template<typename OtherDerived>
bool isApprox(const DiagonalBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
@@ -238,7 +255,7 @@ class DiagonalWrapper
#endif
/** Constructor from expression of diagonal coefficients to wrap. */
- inline DiagonalWrapper(DiagonalVectorType& diagonal) : m_diagonal(diagonal) {}
+ inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {}
/** \returns a const reference to the wrapped expression of diagonal coefficients. */
const DiagonalVectorType& diagonal() const { return m_diagonal; }
@@ -272,13 +289,14 @@ MatrixBase<Derived>::asDiagonal() const
* \sa asDiagonal()
*/
template<typename Derived>
-bool MatrixBase<Derived>::isDiagonal(RealScalar prec) const
+bool MatrixBase<Derived>::isDiagonal(const RealScalar& prec) const
{
+ using std::abs;
if(cols() != rows()) return false;
RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
for(Index j = 0; j < cols(); ++j)
{
- RealScalar absOnDiagonal = internal::abs(coeff(j,j));
+ RealScalar absOnDiagonal = abs(coeff(j,j));
if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
}
for(Index j = 0; j < cols(); ++j)
diff --git a/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h b/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h
index 598c6b3e19a..c03a0c2e12b 100644
--- a/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h
+++ b/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h
@@ -26,14 +26,15 @@ struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
_StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor,
- _PacketOnDiag = !((int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
- ||(int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)),
+ _ScalarAccessOnDiag = !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft)
+ ||(int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheRight)),
_SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value,
// FIXME currently we need same types, but in the future the next rule should be the one
- //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::Flags)&PacketAccessBit))),
- _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && ((!_PacketOnDiag) || (bool(int(DiagonalType::Flags)&PacketAccessBit))),
+ //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
+ _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
+ _LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
- Flags = (HereditaryBits & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),
+ Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
};
};
@@ -54,13 +55,21 @@ class DiagonalProduct : internal::no_assignment_operator,
eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols()));
}
- inline Index rows() const { return m_matrix.rows(); }
- inline Index cols() const { return m_matrix.cols(); }
+ EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); }
- const Scalar coeff(Index row, Index col) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
{
return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col);
}
+
+ EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const
+ {
+ enum {
+ StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor
+ };
+ return coeff(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx);
+ }
template<int LoadMode>
EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
@@ -69,11 +78,19 @@ class DiagonalProduct : internal::no_assignment_operator,
StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor
};
const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
-
return packet_impl<LoadMode>(row,col,indexInDiagonalVector,typename internal::conditional<
((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type());
}
+
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketScalar packet(Index idx) const
+ {
+ enum {
+ StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor
+ };
+ return packet<LoadMode>(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx);
+ }
protected:
template<int LoadMode>
@@ -88,7 +105,7 @@ class DiagonalProduct : internal::no_assignment_operator,
{
enum {
InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
- DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned
+ DiagonalVectorPacketLoadMode = (LoadMode == Aligned && (((InnerSize%16) == 0) || (int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit)==AlignedBit) ? Aligned : Unaligned)
};
return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
m_diagonal.diagonal().template packet<DiagonalVectorPacketLoadMode>(id));
@@ -103,19 +120,9 @@ class DiagonalProduct : internal::no_assignment_operator,
template<typename Derived>
template<typename DiagonalDerived>
inline const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
-MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &diagonal) const
-{
- return DiagonalProduct<Derived, DiagonalDerived, OnTheRight>(derived(), diagonal.derived());
-}
-
-/** \returns the diagonal matrix product of \c *this by the matrix \a matrix.
- */
-template<typename DiagonalDerived>
-template<typename MatrixDerived>
-inline const DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>
-DiagonalBase<DiagonalDerived>::operator*(const MatrixBase<MatrixDerived> &matrix) const
+MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &a_diagonal) const
{
- return DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>(matrix.derived(), derived());
+ return DiagonalProduct<Derived, DiagonalDerived, OnTheRight>(derived(), a_diagonal.derived());
}
} // end namespace Eigen
diff --git a/extern/Eigen3/Eigen/src/Core/Dot.h b/extern/Eigen3/Eigen/src/Core/Dot.h
index ae9274e36dd..9d7651f1fd4 100644
--- a/extern/Eigen3/Eigen/src/Core/Dot.h
+++ b/extern/Eigen3/Eigen/src/Core/Dot.h
@@ -112,7 +112,7 @@ MatrixBase<Derived>::eigen2_dot(const MatrixBase<OtherDerived>& other) const
template<typename Derived>
EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
{
- return internal::real((*this).cwiseAbs2().sum());
+ return numext::real((*this).cwiseAbs2().sum());
}
/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
@@ -124,7 +124,8 @@ EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scala
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
{
- return internal::sqrt(squaredNorm());
+ using std::sqrt;
+ return sqrt(squaredNorm());
}
/** \returns an expression of the quotient of *this by its own norm.
@@ -165,6 +166,7 @@ struct lpNorm_selector
typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
static inline RealScalar run(const MatrixBase<Derived>& m)
{
+ using std::pow;
return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p);
}
};
@@ -223,11 +225,11 @@ MatrixBase<Derived>::lpNorm() const
template<typename Derived>
template<typename OtherDerived>
bool MatrixBase<Derived>::isOrthogonal
-(const MatrixBase<OtherDerived>& other, RealScalar prec) const
+(const MatrixBase<OtherDerived>& other, const RealScalar& prec) const
{
typename internal::nested<Derived,2>::type nested(derived());
typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
- return internal::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
+ return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
}
/** \returns true if *this is approximately an unitary matrix,
@@ -242,7 +244,7 @@ bool MatrixBase<Derived>::isOrthogonal
* Output: \verbinclude MatrixBase_isUnitary.out
*/
template<typename Derived>
-bool MatrixBase<Derived>::isUnitary(RealScalar prec) const
+bool MatrixBase<Derived>::isUnitary(const RealScalar& prec) const
{
typename Derived::Nested nested(derived());
for(Index i = 0; i < cols(); ++i)
diff --git a/extern/Eigen3/Eigen/src/Core/EigenBase.h b/extern/Eigen3/Eigen/src/Core/EigenBase.h
index 0bbd28bec21..fadb45852f6 100644
--- a/extern/Eigen3/Eigen/src/Core/EigenBase.h
+++ b/extern/Eigen3/Eigen/src/Core/EigenBase.h
@@ -126,35 +126,6 @@ Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
return derived();
}
-/** replaces \c *this by \c *this * \a other.
- *
- * \returns a reference to \c *this
- */
-template<typename Derived>
-template<typename OtherDerived>
-inline Derived&
-MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
-{
- other.derived().applyThisOnTheRight(derived());
- return derived();
-}
-
-/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=() */
-template<typename Derived>
-template<typename OtherDerived>
-inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
-{
- other.derived().applyThisOnTheRight(derived());
-}
-
-/** replaces \c *this by \c *this * \a other. */
-template<typename Derived>
-template<typename OtherDerived>
-inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
-{
- other.derived().applyThisOnTheLeft(derived());
-}
-
} // end namespace Eigen
#endif // EIGEN_EIGENBASE_H
diff --git a/extern/Eigen3/Eigen/src/Core/Functors.h b/extern/Eigen3/Eigen/src/Core/Functors.h
index 278c46c6b61..04fb217323d 100644
--- a/extern/Eigen3/Eigen/src/Core/Functors.h
+++ b/extern/Eigen3/Eigen/src/Core/Functors.h
@@ -154,6 +154,7 @@ template<typename Scalar> struct scalar_hypot_op {
{
using std::max;
using std::min;
+ using std::sqrt;
Scalar p = (max)(_x, _y);
Scalar q = (min)(_x, _y);
Scalar qp = q/p;
@@ -170,7 +171,7 @@ struct functor_traits<scalar_hypot_op<Scalar> > {
*/
template<typename Scalar, typename OtherScalar> struct scalar_binary_pow_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_binary_pow_op)
- inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return internal::pow(a, b); }
+ inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return numext::pow(a, b); }
};
template<typename Scalar, typename OtherScalar>
struct functor_traits<scalar_binary_pow_op<Scalar,OtherScalar> > {
@@ -204,21 +205,28 @@ struct functor_traits<scalar_difference_op<Scalar> > {
*
* \sa class CwiseBinaryOp, Cwise::operator/()
*/
-template<typename Scalar> struct scalar_quotient_op {
+template<typename LhsScalar,typename RhsScalar> struct scalar_quotient_op {
+ enum {
+ // TODO vectorize mixed product
+ Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasDiv && packet_traits<RhsScalar>::HasDiv
+ };
+ typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a / b; }
+ EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; }
template<typename Packet>
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
{ return internal::pdiv(a,b); }
};
-template<typename Scalar>
-struct functor_traits<scalar_quotient_op<Scalar> > {
+template<typename LhsScalar,typename RhsScalar>
+struct functor_traits<scalar_quotient_op<LhsScalar,RhsScalar> > {
enum {
- Cost = 2 * NumTraits<Scalar>::MulCost,
- PacketAccess = packet_traits<Scalar>::HasDiv
+ Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost), // rough estimate!
+ PacketAccess = scalar_quotient_op<LhsScalar,RhsScalar>::Vectorizable
};
};
+
+
/** \internal
* \brief Template functor to compute the and of two booleans
*
@@ -280,7 +288,7 @@ struct functor_traits<scalar_opposite_op<Scalar> >
template<typename Scalar> struct scalar_abs_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op)
typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs(a); }
+ EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { using std::abs; return abs(a); }
template<typename Packet>
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
{ return internal::pabs(a); }
@@ -302,7 +310,7 @@ struct functor_traits<scalar_abs_op<Scalar> >
template<typename Scalar> struct scalar_abs2_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op)
typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs2(a); }
+ EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs2(a); }
template<typename Packet>
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
{ return internal::pmul(a,a); }
@@ -318,7 +326,7 @@ struct functor_traits<scalar_abs2_op<Scalar> >
*/
template<typename Scalar> struct scalar_conjugate_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op)
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return internal::conj(a); }
+ EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { using numext::conj; return conj(a); }
template<typename Packet>
EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); }
};
@@ -355,7 +363,7 @@ template<typename Scalar>
struct scalar_real_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op)
typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::real(a); }
+ EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::real(a); }
};
template<typename Scalar>
struct functor_traits<scalar_real_op<Scalar> >
@@ -370,7 +378,7 @@ template<typename Scalar>
struct scalar_imag_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op)
typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::imag(a); }
+ EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::imag(a); }
};
template<typename Scalar>
struct functor_traits<scalar_imag_op<Scalar> >
@@ -385,7 +393,7 @@ template<typename Scalar>
struct scalar_real_ref_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op)
typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::real_ref(*const_cast<Scalar*>(&a)); }
+ EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::real_ref(*const_cast<Scalar*>(&a)); }
};
template<typename Scalar>
struct functor_traits<scalar_real_ref_op<Scalar> >
@@ -400,7 +408,7 @@ template<typename Scalar>
struct scalar_imag_ref_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op)
typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::imag_ref(*const_cast<Scalar*>(&a)); }
+ EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::imag_ref(*const_cast<Scalar*>(&a)); }
};
template<typename Scalar>
struct functor_traits<scalar_imag_ref_op<Scalar> >
@@ -414,7 +422,7 @@ struct functor_traits<scalar_imag_ref_op<Scalar> >
*/
template<typename Scalar> struct scalar_exp_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op)
- inline const Scalar operator() (const Scalar& a) const { return internal::exp(a); }
+ inline const Scalar operator() (const Scalar& a) const { using std::exp; return exp(a); }
typedef typename packet_traits<Scalar>::type Packet;
inline Packet packetOp(const Packet& a) const { return internal::pexp(a); }
};
@@ -430,7 +438,7 @@ struct functor_traits<scalar_exp_op<Scalar> >
*/
template<typename Scalar> struct scalar_log_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op)
- inline const Scalar operator() (const Scalar& a) const { return internal::log(a); }
+ inline const Scalar operator() (const Scalar& a) const { using std::log; return log(a); }
typedef typename packet_traits<Scalar>::type Packet;
inline Packet packetOp(const Packet& a) const { return internal::plog(a); }
};
@@ -447,7 +455,7 @@ struct functor_traits<scalar_log_op<Scalar> >
* indeed it seems better to declare m_other as a Packet and do the pset1() once
* in the constructor. However, in practice:
* - GCC does not like m_other as a Packet and generate a load every time it needs it
- * - on the other hand GCC is able to moves the pset1() away the loop :)
+ * - on the other hand GCC is able to moves the pset1() outside the loop :)
* - simpler code ;)
* (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y)
*/
@@ -478,33 +486,6 @@ template<typename Scalar1,typename Scalar2>
struct functor_traits<scalar_multiple2_op<Scalar1,Scalar2> >
{ enum { Cost = NumTraits<Scalar1>::MulCost, PacketAccess = false }; };
-template<typename Scalar, bool IsInteger>
-struct scalar_quotient1_impl {
- typedef typename packet_traits<Scalar>::type Packet;
- // FIXME default copy constructors seems bugged with std::complex<>
- EIGEN_STRONG_INLINE scalar_quotient1_impl(const scalar_quotient1_impl& other) : m_other(other.m_other) { }
- EIGEN_STRONG_INLINE scalar_quotient1_impl(const Scalar& other) : m_other(static_cast<Scalar>(1) / other) {}
- EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; }
- EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
- { return internal::pmul(a, pset1<Packet>(m_other)); }
- const Scalar m_other;
-};
-template<typename Scalar>
-struct functor_traits<scalar_quotient1_impl<Scalar,false> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
-
-template<typename Scalar>
-struct scalar_quotient1_impl<Scalar,true> {
- // FIXME default copy constructors seems bugged with std::complex<>
- EIGEN_STRONG_INLINE scalar_quotient1_impl(const scalar_quotient1_impl& other) : m_other(other.m_other) { }
- EIGEN_STRONG_INLINE scalar_quotient1_impl(const Scalar& other) : m_other(other) {}
- EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; }
- typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
-};
-template<typename Scalar>
-struct functor_traits<scalar_quotient1_impl<Scalar,true> >
-{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
-
/** \internal
* \brief Template functor to divide a scalar by a fixed other one
*
@@ -514,14 +495,19 @@ struct functor_traits<scalar_quotient1_impl<Scalar,true> >
* \sa class CwiseUnaryOp, MatrixBase::operator/
*/
template<typename Scalar>
-struct scalar_quotient1_op : scalar_quotient1_impl<Scalar, NumTraits<Scalar>::IsInteger > {
- EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other)
- : scalar_quotient1_impl<Scalar, NumTraits<Scalar>::IsInteger >(other) {}
+struct scalar_quotient1_op {
+ typedef typename packet_traits<Scalar>::type Packet;
+ // FIXME default copy constructors seems bugged with std::complex<>
+ EIGEN_STRONG_INLINE scalar_quotient1_op(const scalar_quotient1_op& other) : m_other(other.m_other) { }
+ EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other) : m_other(other) {}
+ EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; }
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+ { return internal::pdiv(a, pset1<Packet>(m_other)); }
+ typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
};
template<typename Scalar>
struct functor_traits<scalar_quotient1_op<Scalar> >
-: functor_traits<scalar_quotient1_impl<Scalar, NumTraits<Scalar>::IsInteger> >
-{};
+{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
// nullary functors
@@ -555,20 +541,28 @@ template <typename Scalar, bool RandomAccess> struct linspaced_op_impl;
// linear access for packet ops:
// 1) initialization
// base = [low, ..., low] + ([step, ..., step] * [-size, ..., 0])
-// 2) each step
+// 2) each step (where size is 1 for coeff access or PacketSize for packet access)
// base += [size*step, ..., size*step]
+//
+// TODO: Perhaps it's better to initialize lazily (so not in the constructor but in packetOp)
+// in order to avoid the padd() in operator() ?
template <typename Scalar>
struct linspaced_op_impl<Scalar,false>
{
typedef typename packet_traits<Scalar>::type Packet;
- linspaced_op_impl(Scalar low, Scalar step) :
+ linspaced_op_impl(const Scalar& low, const Scalar& step) :
m_low(low), m_step(step),
m_packetStep(pset1<Packet>(packet_traits<Scalar>::size*step)),
- m_base(padd(pset1<Packet>(low),pmul(pset1<Packet>(step),plset<Scalar>(-packet_traits<Scalar>::size)))) {}
+ m_base(padd(pset1<Packet>(low), pmul(pset1<Packet>(step),plset<Scalar>(-packet_traits<Scalar>::size)))) {}
template<typename Index>
- EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
+ EIGEN_STRONG_INLINE const Scalar operator() (Index i) const
+ {
+ m_base = padd(m_base, pset1<Packet>(m_step));
+ return m_low+Scalar(i)*m_step;
+ }
+
template<typename Index>
EIGEN_STRONG_INLINE const Packet packetOp(Index) const { return m_base = padd(m_base,m_packetStep); }
@@ -586,7 +580,7 @@ struct linspaced_op_impl<Scalar,true>
{
typedef typename packet_traits<Scalar>::type Packet;
- linspaced_op_impl(Scalar low, Scalar step) :
+ linspaced_op_impl(const Scalar& low, const Scalar& step) :
m_low(low), m_step(step),
m_lowPacket(pset1<Packet>(m_low)), m_stepPacket(pset1<Packet>(m_step)), m_interPacket(plset<Scalar>(0)) {}
@@ -615,7 +609,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o
template <typename Scalar, bool RandomAccess> struct linspaced_op
{
typedef typename packet_traits<Scalar>::type Packet;
- linspaced_op(Scalar low, Scalar high, int num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
+ linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
template<typename Index>
EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
@@ -654,12 +648,14 @@ template <typename Scalar, bool RandomAccess> struct linspaced_op
template<typename Functor> struct functor_has_linear_access { enum { ret = 1 }; };
template<typename Scalar> struct functor_has_linear_access<scalar_identity_op<Scalar> > { enum { ret = 0 }; };
-// in CwiseBinaryOp, we require the Lhs and Rhs to have the same scalar type, except for multiplication
-// where we only require them to have the same _real_ scalar type so one may multiply, say, float by complex<float>.
+// In Eigen, any binary op (Product, CwiseBinaryOp) require the Lhs and Rhs to have the same scalar type, except for multiplication
+// where the mixing of different types is handled by scalar_product_traits
+// In particular, real * complex<real> is allowed.
// FIXME move this to functor_traits adding a functor_default
-template<typename Functor> struct functor_allows_mixing_real_and_complex { enum { ret = 0 }; };
-template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
-template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_conj_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+template<typename Functor> struct functor_is_product_like { enum { ret = 0 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_conj_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_quotient_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
/** \internal
@@ -688,7 +684,7 @@ struct functor_traits<scalar_add_op<Scalar> >
*/
template<typename Scalar> struct scalar_sqrt_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op)
- inline const Scalar operator() (const Scalar& a) const { return internal::sqrt(a); }
+ inline const Scalar operator() (const Scalar& a) const { using std::sqrt; return sqrt(a); }
typedef typename packet_traits<Scalar>::type Packet;
inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); }
};
@@ -706,7 +702,7 @@ struct functor_traits<scalar_sqrt_op<Scalar> >
*/
template<typename Scalar> struct scalar_cos_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op)
- inline Scalar operator() (const Scalar& a) const { return internal::cos(a); }
+ inline Scalar operator() (const Scalar& a) const { using std::cos; return cos(a); }
typedef typename packet_traits<Scalar>::type Packet;
inline Packet packetOp(const Packet& a) const { return internal::pcos(a); }
};
@@ -725,7 +721,7 @@ struct functor_traits<scalar_cos_op<Scalar> >
*/
template<typename Scalar> struct scalar_sin_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op)
- inline const Scalar operator() (const Scalar& a) const { return internal::sin(a); }
+ inline const Scalar operator() (const Scalar& a) const { using std::sin; return sin(a); }
typedef typename packet_traits<Scalar>::type Packet;
inline Packet packetOp(const Packet& a) const { return internal::psin(a); }
};
@@ -745,7 +741,7 @@ struct functor_traits<scalar_sin_op<Scalar> >
*/
template<typename Scalar> struct scalar_tan_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op)
- inline const Scalar operator() (const Scalar& a) const { return internal::tan(a); }
+ inline const Scalar operator() (const Scalar& a) const { using std::tan; return tan(a); }
typedef typename packet_traits<Scalar>::type Packet;
inline Packet packetOp(const Packet& a) const { return internal::ptan(a); }
};
@@ -764,7 +760,7 @@ struct functor_traits<scalar_tan_op<Scalar> >
*/
template<typename Scalar> struct scalar_acos_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op)
- inline const Scalar operator() (const Scalar& a) const { return internal::acos(a); }
+ inline const Scalar operator() (const Scalar& a) const { using std::acos; return acos(a); }
typedef typename packet_traits<Scalar>::type Packet;
inline Packet packetOp(const Packet& a) const { return internal::pacos(a); }
};
@@ -783,7 +779,7 @@ struct functor_traits<scalar_acos_op<Scalar> >
*/
template<typename Scalar> struct scalar_asin_op {
EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op)
- inline const Scalar operator() (const Scalar& a) const { return internal::asin(a); }
+ inline const Scalar operator() (const Scalar& a) const { using std::asin; return asin(a); }
typedef typename packet_traits<Scalar>::type Packet;
inline Packet packetOp(const Packet& a) const { return internal::pasin(a); }
};
@@ -805,7 +801,7 @@ struct scalar_pow_op {
// FIXME default copy constructors seems bugged with std::complex<>
inline scalar_pow_op(const scalar_pow_op& other) : m_exponent(other.m_exponent) { }
inline scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {}
- inline Scalar operator() (const Scalar& a) const { return internal::pow(a, m_exponent); }
+ inline Scalar operator() (const Scalar& a) const { return numext::pow(a, m_exponent); }
const Scalar m_exponent;
};
template<typename Scalar>
diff --git a/extern/Eigen3/Eigen/src/Core/Fuzzy.h b/extern/Eigen3/Eigen/src/Core/Fuzzy.h
index d74edcfdb9d..fe63bd2984d 100644
--- a/extern/Eigen3/Eigen/src/Core/Fuzzy.h
+++ b/extern/Eigen3/Eigen/src/Core/Fuzzy.h
@@ -19,7 +19,7 @@ namespace internal
template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
struct isApprox_selector
{
- static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec)
+ static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)
{
using std::min;
typename internal::nested<Derived,2>::type nested(x);
@@ -31,7 +31,7 @@ struct isApprox_selector
template<typename Derived, typename OtherDerived>
struct isApprox_selector<Derived, OtherDerived, true>
{
- static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar)
+ static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&)
{
return x.matrix() == y.matrix();
}
@@ -40,16 +40,16 @@ struct isApprox_selector<Derived, OtherDerived, true>
template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
struct isMuchSmallerThan_object_selector
{
- static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec)
+ static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)
{
- return x.cwiseAbs2().sum() <= abs2(prec) * y.cwiseAbs2().sum();
+ return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum();
}
};
template<typename Derived, typename OtherDerived>
struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true>
{
- static bool run(const Derived& x, const OtherDerived&, typename Derived::RealScalar)
+ static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&)
{
return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
}
@@ -58,16 +58,16 @@ struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true>
template<typename Derived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
struct isMuchSmallerThan_scalar_selector
{
- static bool run(const Derived& x, const typename Derived::RealScalar& y, typename Derived::RealScalar prec)
+ static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec)
{
- return x.cwiseAbs2().sum() <= abs2(prec * y);
+ return x.cwiseAbs2().sum() <= numext::abs2(prec * y);
}
};
template<typename Derived>
struct isMuchSmallerThan_scalar_selector<Derived, true>
{
- static bool run(const Derived& x, const typename Derived::RealScalar&, typename Derived::RealScalar)
+ static bool run(const Derived& x, const typename Derived::RealScalar&, const typename Derived::RealScalar&)
{
return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
}
@@ -97,7 +97,7 @@ template<typename Derived>
template<typename OtherDerived>
bool DenseBase<Derived>::isApprox(
const DenseBase<OtherDerived>& other,
- RealScalar prec
+ const RealScalar& prec
) const
{
return internal::isApprox_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
@@ -119,7 +119,7 @@ bool DenseBase<Derived>::isApprox(
template<typename Derived>
bool DenseBase<Derived>::isMuchSmallerThan(
const typename NumTraits<Scalar>::Real& other,
- RealScalar prec
+ const RealScalar& prec
) const
{
return internal::isMuchSmallerThan_scalar_selector<Derived>::run(derived(), other, prec);
@@ -139,7 +139,7 @@ template<typename Derived>
template<typename OtherDerived>
bool DenseBase<Derived>::isMuchSmallerThan(
const DenseBase<OtherDerived>& other,
- RealScalar prec
+ const RealScalar& prec
) const
{
return internal::isMuchSmallerThan_object_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
diff --git a/extern/Eigen3/Eigen/src/Core/GeneralProduct.h b/extern/Eigen3/Eigen/src/Core/GeneralProduct.h
index bfc2a67b12f..2a59d94645e 100644
--- a/extern/Eigen3/Eigen/src/Core/GeneralProduct.h
+++ b/extern/Eigen3/Eigen/src/Core/GeneralProduct.h
@@ -222,7 +222,29 @@ class GeneralProduct<Lhs, Rhs, InnerProduct>
***********************************************************************/
namespace internal {
-template<int StorageOrder> struct outer_product_selector;
+
+// Column major
+template<typename ProductType, typename Dest, typename Func>
+EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& dest, const Func& func, const false_type&)
+{
+ typedef typename Dest::Index Index;
+ // FIXME make sure lhs is sequentially stored
+ // FIXME not very good if rhs is real and lhs complex while alpha is real too
+ const Index cols = dest.cols();
+ for (Index j=0; j<cols; ++j)
+ func(dest.col(j), prod.rhs().coeff(j) * prod.lhs());
+}
+
+// Row major
+template<typename ProductType, typename Dest, typename Func>
+EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& dest, const Func& func, const true_type&) {
+ typedef typename Dest::Index Index;
+ // FIXME make sure rhs is sequentially stored
+ // FIXME not very good if lhs is real and rhs complex while alpha is real too
+ const Index rows = dest.rows();
+ for (Index i=0; i<rows; ++i)
+ func(dest.row(i), prod.lhs().coeff(i) * prod.rhs());
+}
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct<Lhs,Rhs,OuterProduct> >
@@ -235,6 +257,8 @@ template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, OuterProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
{
+ template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
+
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
@@ -243,41 +267,39 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
}
-
- template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
- {
- internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha);
+
+ struct set { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } };
+ struct add { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };
+ struct sub { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } };
+ struct adds {
+ Scalar m_scale;
+ adds(const Scalar& s) : m_scale(s) {}
+ template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const {
+ dst.const_cast_derived() += m_scale * src;
+ }
+ };
+
+ template<typename Dest>
+ inline void evalTo(Dest& dest) const {
+ internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>());
+ }
+
+ template<typename Dest>
+ inline void addTo(Dest& dest) const {
+ internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>());
}
-};
-
-namespace internal {
-template<> struct outer_product_selector<ColMajor> {
- template<typename ProductType, typename Dest>
- static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
- typedef typename Dest::Index Index;
- // FIXME make sure lhs is sequentially stored
- // FIXME not very good if rhs is real and lhs complex while alpha is real too
- const Index cols = dest.cols();
- for (Index j=0; j<cols; ++j)
- dest.col(j) += (alpha * prod.rhs().coeff(j)) * prod.lhs();
- }
-};
+ template<typename Dest>
+ inline void subTo(Dest& dest) const {
+ internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor<Dest>());
+ }
-template<> struct outer_product_selector<RowMajor> {
- template<typename ProductType, typename Dest>
- static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
- typedef typename Dest::Index Index;
- // FIXME make sure rhs is sequentially stored
- // FIXME not very good if lhs is real and rhs complex while alpha is real too
- const Index rows = dest.rows();
- for (Index i=0; i<rows; ++i)
- dest.row(i) += (alpha * prod.lhs().coeff(i)) * prod.rhs();
- }
+ template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
+ {
+ internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor<Dest>());
+ }
};
-} // end namespace internal
-
/***********************************************************************
* Implementation of General Matrix Vector Product
***********************************************************************/
@@ -311,7 +333,7 @@ class GeneralProduct<Lhs, Rhs, GemvProduct>
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
- GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ GeneralProduct(const Lhs& a_lhs, const Rhs& a_rhs) : Base(a_lhs,a_rhs)
{
// EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::Scalar, typename Rhs::Scalar>::value),
// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
@@ -320,7 +342,7 @@ class GeneralProduct<Lhs, Rhs, GemvProduct>
enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight };
typedef typename internal::conditional<int(Side)==OnTheRight,_LhsNested,_RhsNested>::type MatrixType;
- template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
{
eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols());
internal::gemv_selector<Side,(int(MatrixType::Flags)&RowMajorBit) ? RowMajor : ColMajor,
@@ -335,7 +357,7 @@ template<int StorageOrder, bool BlasCompatible>
struct gemv_selector<OnTheLeft,StorageOrder,BlasCompatible>
{
template<typename ProductType, typename Dest>
- static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
{
Transpose<Dest> destT(dest);
enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
@@ -384,7 +406,7 @@ struct gemv_static_vector_if<Scalar,Size,MaxSize,true>
template<> struct gemv_selector<OnTheRight,ColMajor,true>
{
template<typename ProductType, typename Dest>
- static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ static inline void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
{
typedef typename ProductType::Index Index;
typedef typename ProductType::LhsScalar LhsScalar;
@@ -413,7 +435,7 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
- bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0));
+ bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
@@ -457,7 +479,7 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
template<> struct gemv_selector<OnTheRight,RowMajor,true>
{
template<typename ProductType, typename Dest>
- static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
{
typedef typename ProductType::LhsScalar LhsScalar;
typedef typename ProductType::RhsScalar RhsScalar;
@@ -508,7 +530,7 @@ template<> struct gemv_selector<OnTheRight,RowMajor,true>
template<> struct gemv_selector<OnTheRight,ColMajor,false>
{
template<typename ProductType, typename Dest>
- static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
{
typedef typename Dest::Index Index;
// TODO makes sure dest is sequentially stored in memory, otherwise use a temp
@@ -521,7 +543,7 @@ template<> struct gemv_selector<OnTheRight,ColMajor,false>
template<> struct gemv_selector<OnTheRight,RowMajor,false>
{
template<typename ProductType, typename Dest>
- static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
{
typedef typename Dest::Index Index;
// TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
diff --git a/extern/Eigen3/Eigen/src/Core/GenericPacketMath.h b/extern/Eigen3/Eigen/src/Core/GenericPacketMath.h
index 858fb243ec8..5f783ebeee8 100644
--- a/extern/Eigen3/Eigen/src/Core/GenericPacketMath.h
+++ b/extern/Eigen3/Eigen/src/Core/GenericPacketMath.h
@@ -106,7 +106,7 @@ pnegate(const Packet& a) { return -a; }
/** \internal \returns conj(a) (coeff-wise) */
template<typename Packet> inline Packet
-pconj(const Packet& a) { return conj(a); }
+pconj(const Packet& a) { return numext::conj(a); }
/** \internal \returns a * b (coeff-wise) */
template<typename Packet> inline Packet
@@ -130,7 +130,7 @@ pmax(const Packet& a,
/** \internal \returns the absolute value of \a a */
template<typename Packet> inline Packet
-pabs(const Packet& a) { return abs(a); }
+pabs(const Packet& a) { using std::abs; return abs(a); }
/** \internal \returns the bitwise and of \a a and \a b */
template<typename Packet> inline Packet
@@ -156,7 +156,11 @@ pload(const typename unpacket_traits<Packet>::type* from) { return *from; }
template<typename Packet> inline Packet
ploadu(const typename unpacket_traits<Packet>::type* from) { return *from; }
-/** \internal \returns a packet with elements of \a *from duplicated, e.g.: (from[0],from[0],from[1],from[1]) */
+/** \internal \returns a packet with elements of \a *from duplicated.
+ * For instance, for a packet of 8 elements, 4 scalar will be read from \a *from and
+ * duplicated to form: {from[0],from[0],from[1],from[1],,from[2],from[2],,from[3],from[3]}
+ * Currently, this function is only used for scalar * complex products.
+ */
template<typename Packet> inline Packet
ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
@@ -215,7 +219,12 @@ template<typename Packet> inline Packet preverse(const Packet& a)
/** \internal \returns \a a with real and imaginary part flipped (for complex type only) */
template<typename Packet> inline Packet pcplxflip(const Packet& a)
-{ return Packet(imag(a),real(a)); }
+{
+ // FIXME: uncomment the following in case we drop the internal imag and real functions.
+// using std::imag;
+// using std::real;
+ return Packet(imag(a),real(a));
+}
/**************************
* Special math functions
@@ -223,35 +232,35 @@ template<typename Packet> inline Packet pcplxflip(const Packet& a)
/** \internal \returns the sine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet psin(const Packet& a) { return sin(a); }
+Packet psin(const Packet& a) { using std::sin; return sin(a); }
/** \internal \returns the cosine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pcos(const Packet& a) { return cos(a); }
+Packet pcos(const Packet& a) { using std::cos; return cos(a); }
/** \internal \returns the tan of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet ptan(const Packet& a) { return tan(a); }
+Packet ptan(const Packet& a) { using std::tan; return tan(a); }
/** \internal \returns the arc sine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pasin(const Packet& a) { return asin(a); }
+Packet pasin(const Packet& a) { using std::asin; return asin(a); }
/** \internal \returns the arc cosine of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pacos(const Packet& a) { return acos(a); }
+Packet pacos(const Packet& a) { using std::acos; return acos(a); }
/** \internal \returns the exp of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet pexp(const Packet& a) { return exp(a); }
+Packet pexp(const Packet& a) { using std::exp; return exp(a); }
/** \internal \returns the log of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet plog(const Packet& a) { return log(a); }
+Packet plog(const Packet& a) { using std::log; return log(a); }
/** \internal \returns the square-root of \a a (coeff-wise) */
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
-Packet psqrt(const Packet& a) { return sqrt(a); }
+Packet psqrt(const Packet& a) { using std::sqrt; return sqrt(a); }
/***************************************************************************
* The following functions might not have to be overwritten for vectorized types
@@ -302,8 +311,21 @@ struct palign_impl
static inline void run(PacketType&, const PacketType&) {}
};
-/** \internal update \a first using the concatenation of the \a Offset last elements
- * of \a first and packet_size minus \a Offset first elements of \a second */
+/** \internal update \a first using the concatenation of the packet_size minus \a Offset last elements
+ * of \a first and \a Offset first elements of \a second.
+ *
+ * This function is currently only used to optimize matrix-vector products on unligned matrices.
+ * It takes 2 packets that represent a contiguous memory array, and returns a packet starting
+ * at the position \a Offset. For instance, for packets of 4 elements, we have:
+ * Input:
+ * - first = {f0,f1,f2,f3}
+ * - second = {s0,s1,s2,s3}
+ * Output:
+ * - if Offset==0 then {f0,f1,f2,f3}
+ * - if Offset==1 then {f1,f2,f3,s0}
+ * - if Offset==2 then {f2,f3,s0,s1}
+ * - if Offset==3 then {f3,s0,s1,s3}
+ */
template<int Offset,typename PacketType>
inline void palign(PacketType& first, const PacketType& second)
{
diff --git a/extern/Eigen3/Eigen/src/Core/GlobalFunctions.h b/extern/Eigen3/Eigen/src/Core/GlobalFunctions.h
index e63726c4735..2acf9772334 100644
--- a/extern/Eigen3/Eigen/src/Core/GlobalFunctions.h
+++ b/extern/Eigen3/Eigen/src/Core/GlobalFunctions.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
@@ -11,7 +11,7 @@
#ifndef EIGEN_GLOBAL_FUNCTIONS_H
#define EIGEN_GLOBAL_FUNCTIONS_H
-#define EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(NAME,FUNCTOR) \
+#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR) \
template<typename Derived> \
inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> \
NAME(const Eigen::ArrayBase<Derived>& x) { \
@@ -35,20 +35,21 @@
};
-namespace std
+namespace Eigen
{
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(real,scalar_real_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(imag,scalar_imag_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sin,scalar_sin_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(cos,scalar_cos_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(asin,scalar_asin_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(acos,scalar_acos_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(tan,scalar_tan_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(exp,scalar_exp_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(log,scalar_log_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(abs,scalar_abs_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sqrt,scalar_sqrt_op)
-
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real,scalar_real_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag,scalar_imag_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj,scalar_conjugate_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin,scalar_sin_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos,scalar_cos_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin,scalar_asin_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos,scalar_acos_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan,scalar_tan_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp,scalar_exp_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log,scalar_log_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs,scalar_abs_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt,scalar_sqrt_op)
+
template<typename Derived>
inline const Eigen::CwiseUnaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar>, const Derived>
pow(const Eigen::ArrayBase<Derived>& x, const typename Derived::Scalar& exponent) {
@@ -64,16 +65,13 @@ namespace std
exponents.derived()
);
}
-}
-
-namespace Eigen
-{
+
/**
* \brief Component-wise division of a scalar by array elements.
**/
template <typename Derived>
inline const Eigen::CwiseUnaryOp<Eigen::internal::scalar_inverse_mult_op<typename Derived::Scalar>, const Derived>
- operator/(typename Derived::Scalar s, const Eigen::ArrayBase<Derived>& a)
+ operator/(const typename Derived::Scalar& s, const Eigen::ArrayBase<Derived>& a)
{
return Eigen::CwiseUnaryOp<Eigen::internal::scalar_inverse_mult_op<typename Derived::Scalar>, const Derived>(
a.derived(),
@@ -85,19 +83,10 @@ namespace Eigen
{
EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op)
EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sin,scalar_sin_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(cos,scalar_cos_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(asin,scalar_asin_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(acos,scalar_acos_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(tan,scalar_tan_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(exp,scalar_exp_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(log,scalar_log_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs,scalar_abs_op)
EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op)
- EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sqrt,scalar_sqrt_op)
}
}
-// TODO: cleanly disable those functions that are not supported on Array (internal::real_ref, internal::random, internal::isApprox...)
+// TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random, internal::isApprox...)
#endif // EIGEN_GLOBAL_FUNCTIONS_H
diff --git a/extern/Eigen3/Eigen/src/Core/IO.h b/extern/Eigen3/Eigen/src/Core/IO.h
index cc8e18a0076..8d4bc59e9dd 100644
--- a/extern/Eigen3/Eigen/src/Core/IO.h
+++ b/extern/Eigen3/Eigen/src/Core/IO.h
@@ -55,9 +55,8 @@ struct IOFormat
const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="",
const std::string& _matPrefix="", const std::string& _matSuffix="")
: matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator),
- coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)
+ rowSpacer(""), coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)
{
- rowSpacer = "";
int i = int(matSuffix.length())-1;
while (i>=0 && matSuffix[i]!='\n')
{
@@ -129,6 +128,7 @@ struct significant_decimals_default_impl
static inline int run()
{
using std::ceil;
+ using std::log;
return cast<RealScalar,int>(ceil(-log(NumTraits<RealScalar>::epsilon())/log(RealScalar(10))));
}
};
@@ -185,21 +185,22 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
explicit_precision = fmt.precision;
}
+ std::streamsize old_precision = 0;
+ if(explicit_precision) old_precision = s.precision(explicit_precision);
+
bool align_cols = !(fmt.flags & DontAlignCols);
if(align_cols)
{
// compute the largest width
- for(Index j = 1; j < m.cols(); ++j)
+ for(Index j = 0; j < m.cols(); ++j)
for(Index i = 0; i < m.rows(); ++i)
{
std::stringstream sstr;
- if(explicit_precision) sstr.precision(explicit_precision);
+ sstr.copyfmt(s);
sstr << m.coeff(i,j);
width = std::max<Index>(width, Index(sstr.str().length()));
}
}
- std::streamsize old_precision = 0;
- if(explicit_precision) old_precision = s.precision(explicit_precision);
s << fmt.matPrefix;
for(Index i = 0; i < m.rows(); ++i)
{
diff --git a/extern/Eigen3/Eigen/src/Core/Map.h b/extern/Eigen3/Eigen/src/Core/Map.h
index 15a19226e29..f804c89d63e 100644
--- a/extern/Eigen3/Eigen/src/Core/Map.h
+++ b/extern/Eigen3/Eigen/src/Core/Map.h
@@ -133,36 +133,36 @@ template<typename PlainObjectType, int MapOptions, typename StrideType> class Ma
/** Constructor in the fixed-size case.
*
- * \param data pointer to the array to map
- * \param stride optional Stride object, passing the strides.
+ * \param dataPtr pointer to the array to map
+ * \param a_stride optional Stride object, passing the strides.
*/
- inline Map(PointerArgType data, const StrideType& stride = StrideType())
- : Base(cast_to_pointer_type(data)), m_stride(stride)
+ inline Map(PointerArgType dataPtr, const StrideType& a_stride = StrideType())
+ : Base(cast_to_pointer_type(dataPtr)), m_stride(a_stride)
{
PlainObjectType::Base::_check_template_params();
}
/** Constructor in the dynamic-size vector case.
*
- * \param data pointer to the array to map
- * \param size the size of the vector expression
- * \param stride optional Stride object, passing the strides.
+ * \param dataPtr pointer to the array to map
+ * \param a_size the size of the vector expression
+ * \param a_stride optional Stride object, passing the strides.
*/
- inline Map(PointerArgType data, Index size, const StrideType& stride = StrideType())
- : Base(cast_to_pointer_type(data), size), m_stride(stride)
+ inline Map(PointerArgType dataPtr, Index a_size, const StrideType& a_stride = StrideType())
+ : Base(cast_to_pointer_type(dataPtr), a_size), m_stride(a_stride)
{
PlainObjectType::Base::_check_template_params();
}
/** Constructor in the dynamic-size matrix case.
*
- * \param data pointer to the array to map
- * \param rows the number of rows of the matrix expression
- * \param cols the number of columns of the matrix expression
- * \param stride optional Stride object, passing the strides.
+ * \param dataPtr pointer to the array to map
+ * \param nbRows the number of rows of the matrix expression
+ * \param nbCols the number of columns of the matrix expression
+ * \param a_stride optional Stride object, passing the strides.
*/
- inline Map(PointerArgType data, Index rows, Index cols, const StrideType& stride = StrideType())
- : Base(cast_to_pointer_type(data), rows, cols), m_stride(stride)
+ inline Map(PointerArgType dataPtr, Index nbRows, Index nbCols, const StrideType& a_stride = StrideType())
+ : Base(cast_to_pointer_type(dataPtr), nbRows, nbCols), m_stride(a_stride)
{
PlainObjectType::Base::_check_template_params();
}
diff --git a/extern/Eigen3/Eigen/src/Core/MapBase.h b/extern/Eigen3/Eigen/src/Core/MapBase.h
index a388d61ea92..6876de588c0 100644
--- a/extern/Eigen3/Eigen/src/Core/MapBase.h
+++ b/extern/Eigen3/Eigen/src/Core/MapBase.h
@@ -87,9 +87,9 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
*/
inline const Scalar* data() const { return m_data; }
- inline const Scalar& coeff(Index row, Index col) const
+ inline const Scalar& coeff(Index rowId, Index colId) const
{
- return m_data[col * colStride() + row * rowStride()];
+ return m_data[colId * colStride() + rowId * rowStride()];
}
inline const Scalar& coeff(Index index) const
@@ -98,9 +98,9 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
return m_data[index * innerStride()];
}
- inline const Scalar& coeffRef(Index row, Index col) const
+ inline const Scalar& coeffRef(Index rowId, Index colId) const
{
- return this->m_data[col * colStride() + row * rowStride()];
+ return this->m_data[colId * colStride() + rowId * rowStride()];
}
inline const Scalar& coeffRef(Index index) const
@@ -110,10 +110,10 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
}
template<int LoadMode>
- inline PacketScalar packet(Index row, Index col) const
+ inline PacketScalar packet(Index rowId, Index colId) const
{
return internal::ploadt<PacketScalar, LoadMode>
- (m_data + (col * colStride() + row * rowStride()));
+ (m_data + (colId * colStride() + rowId * rowStride()));
}
template<int LoadMode>
@@ -123,29 +123,29 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
}
- inline MapBase(PointerType data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
+ inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
{
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
checkSanity();
}
- inline MapBase(PointerType data, Index size)
- : m_data(data),
- m_rows(RowsAtCompileTime == Dynamic ? size : Index(RowsAtCompileTime)),
- m_cols(ColsAtCompileTime == Dynamic ? size : Index(ColsAtCompileTime))
+ inline MapBase(PointerType dataPtr, Index vecSize)
+ : m_data(dataPtr),
+ m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)),
+ m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime))
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- eigen_assert(size >= 0);
- eigen_assert(data == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
+ eigen_assert(vecSize >= 0);
+ eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize);
checkSanity();
}
- inline MapBase(PointerType data, Index rows, Index cols)
- : m_data(data), m_rows(rows), m_cols(cols)
+ inline MapBase(PointerType dataPtr, Index nbRows, Index nbCols)
+ : m_data(dataPtr), m_rows(nbRows), m_cols(nbCols)
{
- eigen_assert( (data == 0)
- || ( rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
- && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)));
+ eigen_assert( (dataPtr == 0)
+ || ( nbRows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == nbRows)
+ && nbCols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == nbCols)));
checkSanity();
}
@@ -210,23 +210,23 @@ template<typename Derived> class MapBase<Derived, WriteAccessors>
}
template<int StoreMode>
- inline void writePacket(Index row, Index col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& val)
{
internal::pstoret<Scalar, PacketScalar, StoreMode>
- (this->m_data + (col * colStride() + row * rowStride()), x);
+ (this->m_data + (col * colStride() + row * rowStride()), val);
}
template<int StoreMode>
- inline void writePacket(Index index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& val)
{
EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
internal::pstoret<Scalar, PacketScalar, StoreMode>
- (this->m_data + index * innerStride(), x);
+ (this->m_data + index * innerStride(), val);
}
- explicit inline MapBase(PointerType data) : Base(data) {}
- inline MapBase(PointerType data, Index size) : Base(data, size) {}
- inline MapBase(PointerType data, Index rows, Index cols) : Base(data, rows, cols) {}
+ explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {}
+ inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {}
+ inline MapBase(PointerType dataPtr, Index nbRows, Index nbCols) : Base(dataPtr, nbRows, nbCols) {}
Derived& operator=(const MapBase& other)
{
diff --git a/extern/Eigen3/Eigen/src/Core/MathFunctions.h b/extern/Eigen3/Eigen/src/Core/MathFunctions.h
index 05e913f2fec..2bfc5ebd98a 100644
--- a/extern/Eigen3/Eigen/src/Core/MathFunctions.h
+++ b/extern/Eigen3/Eigen/src/Core/MathFunctions.h
@@ -51,16 +51,15 @@ struct global_math_functions_filtering_base
typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type;
};
-#define EIGEN_MATHFUNC_IMPL(func, scalar) func##_impl<typename global_math_functions_filtering_base<scalar>::type>
-#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename func##_retval<typename global_math_functions_filtering_base<scalar>::type>::type
-
+#define EIGEN_MATHFUNC_IMPL(func, scalar) Eigen::internal::func##_impl<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>
+#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename Eigen::internal::func##_retval<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>::type
/****************************************************************************
* Implementation of real *
****************************************************************************/
-template<typename Scalar>
-struct real_impl
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
+struct real_default_impl
{
typedef typename NumTraits<Scalar>::Real RealScalar;
static inline RealScalar run(const Scalar& x)
@@ -69,34 +68,32 @@ struct real_impl
}
};
-template<typename RealScalar>
-struct real_impl<std::complex<RealScalar> >
+template<typename Scalar>
+struct real_default_impl<Scalar,true>
{
- static inline RealScalar run(const std::complex<RealScalar>& x)
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline RealScalar run(const Scalar& x)
{
using std::real;
return real(x);
}
};
+template<typename Scalar> struct real_impl : real_default_impl<Scalar> {};
+
template<typename Scalar>
struct real_retval
{
typedef typename NumTraits<Scalar>::Real type;
};
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)
-{
- return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);
-}
/****************************************************************************
* Implementation of imag *
****************************************************************************/
-template<typename Scalar>
-struct imag_impl
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
+struct imag_default_impl
{
typedef typename NumTraits<Scalar>::Real RealScalar;
static inline RealScalar run(const Scalar&)
@@ -105,28 +102,25 @@ struct imag_impl
}
};
-template<typename RealScalar>
-struct imag_impl<std::complex<RealScalar> >
+template<typename Scalar>
+struct imag_default_impl<Scalar,true>
{
- static inline RealScalar run(const std::complex<RealScalar>& x)
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline RealScalar run(const Scalar& x)
{
using std::imag;
return imag(x);
}
};
+template<typename Scalar> struct imag_impl : imag_default_impl<Scalar> {};
+
template<typename Scalar>
struct imag_retval
{
typedef typename NumTraits<Scalar>::Real type;
};
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)
-{
- return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);
-}
-
/****************************************************************************
* Implementation of real_ref *
****************************************************************************/
@@ -151,18 +145,6 @@ struct real_ref_retval
typedef typename NumTraits<Scalar>::Real & type;
};
-template<typename Scalar>
-inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)
-{
- return real_ref_impl<Scalar>::run(x);
-}
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)
-{
- return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);
-}
-
/****************************************************************************
* Implementation of imag_ref *
****************************************************************************/
@@ -203,23 +185,11 @@ struct imag_ref_retval
typedef typename NumTraits<Scalar>::Real & type;
};
-template<typename Scalar>
-inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)
-{
- return imag_ref_impl<Scalar>::run(x);
-}
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)
-{
- return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);
-}
-
/****************************************************************************
* Implementation of conj *
****************************************************************************/
-template<typename Scalar>
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
struct conj_impl
{
static inline Scalar run(const Scalar& x)
@@ -228,10 +198,10 @@ struct conj_impl
}
};
-template<typename RealScalar>
-struct conj_impl<std::complex<RealScalar> >
+template<typename Scalar>
+struct conj_impl<Scalar,true>
{
- static inline std::complex<RealScalar> run(const std::complex<RealScalar>& x)
+ static inline Scalar run(const Scalar& x)
{
using std::conj;
return conj(x);
@@ -244,39 +214,6 @@ struct conj_retval
typedef Scalar type;
};
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)
-{
- return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of abs *
-****************************************************************************/
-
-template<typename Scalar>
-struct abs_impl
-{
- typedef typename NumTraits<Scalar>::Real RealScalar;
- static inline RealScalar run(const Scalar& x)
- {
- using std::abs;
- return abs(x);
- }
-};
-
-template<typename Scalar>
-struct abs_retval
-{
- typedef typename NumTraits<Scalar>::Real type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(abs, Scalar) abs(const Scalar& x)
-{
- return EIGEN_MATHFUNC_IMPL(abs, Scalar)::run(x);
-}
-
/****************************************************************************
* Implementation of abs2 *
****************************************************************************/
@@ -306,12 +243,6 @@ struct abs2_retval
typedef typename NumTraits<Scalar>::Real type;
};
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
-{
- return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
-}
-
/****************************************************************************
* Implementation of norm1 *
****************************************************************************/
@@ -322,6 +253,7 @@ struct norm1_default_impl
typedef typename NumTraits<Scalar>::Real RealScalar;
static inline RealScalar run(const Scalar& x)
{
+ using std::abs;
return abs(real(x)) + abs(imag(x));
}
};
@@ -331,6 +263,7 @@ struct norm1_default_impl<Scalar, false>
{
static inline Scalar run(const Scalar& x)
{
+ using std::abs;
return abs(x);
}
};
@@ -344,12 +277,6 @@ struct norm1_retval
typedef typename NumTraits<Scalar>::Real type;
};
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
-{
- return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);
-}
-
/****************************************************************************
* Implementation of hypot *
****************************************************************************/
@@ -362,9 +289,12 @@ struct hypot_impl
{
using std::max;
using std::min;
+ using std::abs;
+ using std::sqrt;
RealScalar _x = abs(x);
RealScalar _y = abs(y);
RealScalar p = (max)(_x, _y);
+ if(p==RealScalar(0)) return 0;
RealScalar q = (min)(_x, _y);
RealScalar qp = q/p;
return p * sqrt(RealScalar(1) + qp*qp);
@@ -377,12 +307,6 @@ struct hypot_retval
typedef typename NumTraits<Scalar>::Real type;
};
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)
-{
- return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
-}
-
/****************************************************************************
* Implementation of cast *
****************************************************************************/
@@ -405,97 +329,29 @@ inline NewType cast(const OldType& x)
}
/****************************************************************************
-* Implementation of sqrt *
+* Implementation of atanh2 *
****************************************************************************/
template<typename Scalar, bool IsInteger>
-struct sqrt_default_impl
-{
- static inline Scalar run(const Scalar& x)
- {
- using std::sqrt;
- return sqrt(x);
- }
-};
-
-template<typename Scalar>
-struct sqrt_default_impl<Scalar, true>
-{
- static inline Scalar run(const Scalar&)
- {
-#ifdef EIGEN2_SUPPORT
- eigen_assert(!NumTraits<Scalar>::IsInteger);
-#else
- EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
-#endif
- return Scalar(0);
- }
-};
-
-template<typename Scalar>
-struct sqrt_impl : sqrt_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
-
-template<typename Scalar>
-struct sqrt_retval
-{
- typedef Scalar type;
-};
-
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x)
-{
- return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x);
-}
-
-/****************************************************************************
-* Implementation of standard unary real functions (exp, log, sin, cos, ... *
-****************************************************************************/
-
-// This macro instanciate all the necessary template mechanism which is common to all unary real functions.
-#define EIGEN_MATHFUNC_STANDARD_REAL_UNARY(NAME) \
- template<typename Scalar, bool IsInteger> struct NAME##_default_impl { \
- static inline Scalar run(const Scalar& x) { using std::NAME; return NAME(x); } \
- }; \
- template<typename Scalar> struct NAME##_default_impl<Scalar, true> { \
- static inline Scalar run(const Scalar&) { \
- EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) \
- return Scalar(0); \
- } \
- }; \
- template<typename Scalar> struct NAME##_impl \
- : NAME##_default_impl<Scalar, NumTraits<Scalar>::IsInteger> \
- {}; \
- template<typename Scalar> struct NAME##_retval { typedef Scalar type; }; \
- template<typename Scalar> \
- inline EIGEN_MATHFUNC_RETVAL(NAME, Scalar) NAME(const Scalar& x) { \
- return EIGEN_MATHFUNC_IMPL(NAME, Scalar)::run(x); \
- }
-
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(exp)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(log)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(sin)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(cos)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(tan)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(asin)
-EIGEN_MATHFUNC_STANDARD_REAL_UNARY(acos)
-
-/****************************************************************************
-* Implementation of atan2 *
-****************************************************************************/
-
-template<typename Scalar, bool IsInteger>
-struct atan2_default_impl
+struct atanh2_default_impl
{
typedef Scalar retval;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
static inline Scalar run(const Scalar& x, const Scalar& y)
{
- using std::atan2;
- return atan2(x, y);
+ using std::abs;
+ using std::log;
+ using std::sqrt;
+ Scalar z = x / y;
+ if (y == Scalar(0) || abs(z) > sqrt(NumTraits<RealScalar>::epsilon()))
+ return RealScalar(0.5) * log((y + x) / (y - x));
+ else
+ return z + z*z*z / RealScalar(3);
}
};
template<typename Scalar>
-struct atan2_default_impl<Scalar, true>
+struct atanh2_default_impl<Scalar, true>
{
static inline Scalar run(const Scalar&, const Scalar&)
{
@@ -505,20 +361,14 @@ struct atan2_default_impl<Scalar, true>
};
template<typename Scalar>
-struct atan2_impl : atan2_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
+struct atanh2_impl : atanh2_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
template<typename Scalar>
-struct atan2_retval
+struct atanh2_retval
{
typedef Scalar type;
};
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(atan2, Scalar) atan2(const Scalar& x, const Scalar& y)
-{
- return EIGEN_MATHFUNC_IMPL(atan2, Scalar)::run(x, y);
-}
-
/****************************************************************************
* Implementation of pow *
****************************************************************************/
@@ -562,12 +412,6 @@ struct pow_retval
typedef Scalar type;
};
-template<typename Scalar>
-inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y)
-{
- return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y);
-}
-
/****************************************************************************
* Implementation of random *
****************************************************************************/
@@ -666,11 +510,10 @@ struct random_default_impl<Scalar, false, true>
#else
enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value,
scalar_bits = sizeof(Scalar) * CHAR_BIT,
- shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits))
+ shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)),
+ offset = NumTraits<Scalar>::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0
};
- Scalar x = Scalar(std::rand() >> shift);
- Scalar offset = NumTraits<Scalar>::IsSigned ? Scalar(1 << (rand_bits-1)) : Scalar(0);
- return x - offset;
+ return Scalar((std::rand() >> shift) - offset);
#endif
}
};
@@ -702,6 +545,97 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
}
+} // end namespace internal
+
+/****************************************************************************
+* Generic math function *
+****************************************************************************/
+
+namespace numext {
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)
+{
+ return internal::real_ref_impl<Scalar>::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)
+{
+ return internal::imag_ref_impl<Scalar>::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)
+{
+ return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(atanh2, Scalar) atanh2(const Scalar& x, const Scalar& y)
+{
+ return EIGEN_MATHFUNC_IMPL(atanh2, Scalar)::run(x, y);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y)
+{
+ return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y);
+}
+
+// std::isfinite is non standard, so let's define our own version,
+// even though it is not very efficient.
+template<typename T> bool (isfinite)(const T& x)
+{
+ return x<NumTraits<T>::highest() && x>NumTraits<T>::lowest();
+}
+
+} // end namespace numext
+
+namespace internal {
+
/****************************************************************************
* Implementation of fuzzy comparisons *
****************************************************************************/
@@ -718,11 +652,13 @@ struct scalar_fuzzy_default_impl<Scalar, false, false>
template<typename OtherScalar>
static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
{
+ using std::abs;
return abs(x) <= abs(y) * prec;
}
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
{
using std::min;
+ using std::abs;
return abs(x - y) <= (min)(abs(x), abs(y)) * prec;
}
static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)
@@ -757,12 +693,12 @@ struct scalar_fuzzy_default_impl<Scalar, true, false>
template<typename OtherScalar>
static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
{
- return abs2(x) <= abs2(y) * prec * prec;
+ return numext::abs2(x) <= numext::abs2(y) * prec * prec;
}
static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
{
using std::min;
- return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec;
+ return numext::abs2(x - y) <= (min)(numext::abs2(x), numext::abs2(y)) * prec * prec;
}
};
@@ -824,17 +760,7 @@ template<> struct scalar_fuzzy_impl<bool>
};
-/****************************************************************************
-* Special functions *
-****************************************************************************/
-
-// std::isfinite is non standard, so let's define our own version,
-// even though it is not very efficient.
-template<typename T> bool (isfinite)(const T& x)
-{
- return x<NumTraits<T>::highest() && x>NumTraits<T>::lowest();
-}
-
+
} // end namespace internal
} // end namespace Eigen
diff --git a/extern/Eigen3/Eigen/src/Core/Matrix.h b/extern/Eigen3/Eigen/src/Core/Matrix.h
index 99160b591b0..d7d0b5b9a4f 100644
--- a/extern/Eigen3/Eigen/src/Core/Matrix.h
+++ b/extern/Eigen3/Eigen/src/Core/Matrix.h
@@ -200,16 +200,16 @@ class Matrix
*
* \sa resize(Index,Index)
*/
- EIGEN_STRONG_INLINE explicit Matrix() : Base()
+ EIGEN_STRONG_INLINE Matrix() : Base()
{
Base::_check_template_params();
- EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
// FIXME is it still needed
Matrix(internal::constructor_without_unaligned_array_assert)
: Base(internal::constructor_without_unaligned_array_assert())
- { Base::_check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED }
+ { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
/** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
*
@@ -224,7 +224,7 @@ class Matrix
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix)
eigen_assert(dim >= 0);
eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
- EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -304,7 +304,7 @@ class Matrix
: Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
{
Base::_check_template_params();
- Base::resize(other.rows(), other.cols());
+ Base::_resize_to_match(other);
// FIXME/CHECK: isn't *this = other.derived() more efficient. it allows to
// go for pure _set() implementations, right?
*this = other;
diff --git a/extern/Eigen3/Eigen/src/Core/MatrixBase.h b/extern/Eigen3/Eigen/src/Core/MatrixBase.h
index c1e0ed132cc..344b38f2fc7 100644
--- a/extern/Eigen3/Eigen/src/Core/MatrixBase.h
+++ b/extern/Eigen3/Eigen/src/Core/MatrixBase.h
@@ -162,6 +162,9 @@ template<typename Derived> class MatrixBase
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename ProductDerived, typename Lhs, typename Rhs>
Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
+
+ template<typename MatrixPower, typename Lhs, typename Rhs>
+ Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
#endif // not EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
@@ -212,8 +215,8 @@ template<typename Derived> class MatrixBase
typedef Diagonal<Derived> DiagonalReturnType;
DiagonalReturnType diagonal();
- typedef const Diagonal<const Derived> ConstDiagonalReturnType;
- const ConstDiagonalReturnType diagonal() const;
+ typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
+ ConstDiagonalReturnType diagonal() const;
template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
template<int Index> struct ConstDiagonalIndexReturnType { typedef const Diagonal<const Derived,Index> Type; };
@@ -224,11 +227,11 @@ template<typename Derived> class MatrixBase
// Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
// On the other hand they confuse MSVC8...
#if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later
- typename MatrixBase::template DiagonalIndexReturnType<Dynamic>::Type diagonal(Index index);
- typename MatrixBase::template ConstDiagonalIndexReturnType<Dynamic>::Type diagonal(Index index) const;
+ typename MatrixBase::template DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
+ typename MatrixBase::template ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
#else
- typename DiagonalIndexReturnType<Dynamic>::Type diagonal(Index index);
- typename ConstDiagonalIndexReturnType<Dynamic>::Type diagonal(Index index) const;
+ typename DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
+ typename ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
#endif
#ifdef EIGEN2_SUPPORT
@@ -237,7 +240,7 @@ template<typename Derived> class MatrixBase
// huuuge hack. make Eigen2's matrix.part<Diagonal>() work in eigen3. Problem: Diagonal is now a class template instead
// of an integer constant. Solution: overload the part() method template wrt template parameters list.
- template<template<typename T, int n> class U>
+ template<template<typename T, int N> class U>
const DiagonalWrapper<ConstDiagonalReturnType> part() const
{ return diagonal().asDiagonal(); }
#endif // EIGEN2_SUPPORT
@@ -255,7 +258,7 @@ template<typename Derived> class MatrixBase
template<unsigned int UpLo> typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
const SparseView<Derived> sparseView(const Scalar& m_reference = Scalar(0),
- typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) const;
+ const typename NumTraits<Scalar>::Real& m_epsilon = NumTraits<Scalar>::dummy_precision()) const;
static const IdentityReturnType Identity();
static const IdentityReturnType Identity(Index rows, Index cols);
static const BasisReturnType Unit(Index size, Index i);
@@ -271,16 +274,16 @@ template<typename Derived> class MatrixBase
Derived& setIdentity();
Derived& setIdentity(Index rows, Index cols);
- bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
- bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isIdentity(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isDiagonal(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
- bool isUpperTriangular(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
- bool isLowerTriangular(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isUpperTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isLowerTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
template<typename OtherDerived>
bool isOrthogonal(const MatrixBase<OtherDerived>& other,
- RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
- bool isUnitary(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isUnitary(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
/** \returns true if each coefficients of \c *this and \a other are all exactly equal.
* \warning When using floating point scalar values you probably should rather use a
@@ -314,7 +317,7 @@ template<typename Derived> class MatrixBase
MatrixBase<Derived>& matrix() { return *this; }
const MatrixBase<Derived>& matrix() const { return *this; }
- /** \returns an \link ArrayBase Array \endlink expression of this matrix
+ /** \returns an \link Eigen::ArrayBase Array \endlink expression of this matrix
* \sa ArrayBase::matrix() */
ArrayWrapper<Derived> array() { return derived(); }
const ArrayWrapper<const Derived> array() const { return derived(); }
@@ -454,6 +457,7 @@ template<typename Derived> class MatrixBase
const MatrixFunctionReturnValue<Derived> sin() const;
const MatrixSquareRootReturnValue<Derived> sqrt() const;
const MatrixLogarithmReturnValue<Derived> log() const;
+ const MatrixPowerReturnValue<Derived> pow(const RealScalar& p) const;
#ifdef EIGEN2_SUPPORT
template<typename ProductDerived, typename Lhs, typename Rhs>
@@ -506,6 +510,51 @@ template<typename Derived> class MatrixBase
{EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
};
+
+/***************************************************************************
+* Implementation of matrix base methods
+***************************************************************************/
+
+/** replaces \c *this by \c *this * \a other.
+ *
+ * \returns a reference to \c *this
+ *
+ * Example: \include MatrixBase_applyOnTheRight.cpp
+ * Output: \verbinclude MatrixBase_applyOnTheRight.out
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline Derived&
+MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
+{
+ other.derived().applyThisOnTheRight(derived());
+ return derived();
+}
+
+/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
+ *
+ * Example: \include MatrixBase_applyOnTheRight.cpp
+ * Output: \verbinclude MatrixBase_applyOnTheRight.out
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
+{
+ other.derived().applyThisOnTheRight(derived());
+}
+
+/** replaces \c *this by \a other * \c *this.
+ *
+ * Example: \include MatrixBase_applyOnTheLeft.cpp
+ * Output: \verbinclude MatrixBase_applyOnTheLeft.out
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
+{
+ other.derived().applyThisOnTheLeft(derived());
+}
+
} // end namespace Eigen
#endif // EIGEN_MATRIXBASE_H
diff --git a/extern/Eigen3/Eigen/src/Core/NoAlias.h b/extern/Eigen3/Eigen/src/Core/NoAlias.h
index ecb3fa2850e..768bfb18ca4 100644
--- a/extern/Eigen3/Eigen/src/Core/NoAlias.h
+++ b/extern/Eigen3/Eigen/src/Core/NoAlias.h
@@ -80,8 +80,17 @@ class NoAlias
template<typename Lhs, typename Rhs, int NestingFlags>
EIGEN_STRONG_INLINE ExpressionType& operator-=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
{ return m_expression.derived() -= CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
+
+ template<typename OtherDerived>
+ ExpressionType& operator=(const ReturnByValue<OtherDerived>& func)
+ { return m_expression = func; }
#endif
+ ExpressionType& expression() const
+ {
+ return m_expression;
+ }
+
protected:
ExpressionType& m_expression;
};
diff --git a/extern/Eigen3/Eigen/src/Core/NumTraits.h b/extern/Eigen3/Eigen/src/Core/NumTraits.h
index c94ef026b42..bac9e50b857 100644
--- a/extern/Eigen3/Eigen/src/Core/NumTraits.h
+++ b/extern/Eigen3/Eigen/src/Core/NumTraits.h
@@ -140,6 +140,9 @@ struct NumTraits<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
AddCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::AddCost,
MulCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::MulCost
};
+
+ static inline RealScalar epsilon() { return NumTraits<RealScalar>::epsilon(); }
+ static inline RealScalar dummy_precision() { return NumTraits<RealScalar>::dummy_precision(); }
};
} // end namespace Eigen
diff --git a/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h b/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h
index bc29f814205..1297b8413fb 100644
--- a/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h
+++ b/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h
@@ -105,13 +105,13 @@ class PermutationBase : public EigenBase<Derived>
#endif
/** \returns the number of rows */
- inline Index rows() const { return indices().size(); }
+ inline Index rows() const { return Index(indices().size()); }
/** \returns the number of columns */
- inline Index cols() const { return indices().size(); }
+ inline Index cols() const { return Index(indices().size()); }
/** \returns the size of a side of the respective square matrix, i.e., the number of indices */
- inline Index size() const { return indices().size(); }
+ inline Index size() const { return Index(indices().size()); }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename DenseDerived>
@@ -139,9 +139,9 @@ class PermutationBase : public EigenBase<Derived>
/** Resizes to given size.
*/
- inline void resize(Index size)
+ inline void resize(Index newSize)
{
- indices().resize(size);
+ indices().resize(newSize);
}
/** Sets *this to be the identity permutation matrix */
@@ -153,9 +153,9 @@ class PermutationBase : public EigenBase<Derived>
/** Sets *this to be the identity permutation matrix of given size.
*/
- void setIdentity(Index size)
+ void setIdentity(Index newSize)
{
- resize(size);
+ resize(newSize);
setIdentity();
}
@@ -317,7 +317,7 @@ class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompile
* array's size.
*/
template<typename Other>
- explicit inline PermutationMatrix(const MatrixBase<Other>& indices) : m_indices(indices)
+ explicit inline PermutationMatrix(const MatrixBase<Other>& a_indices) : m_indices(a_indices)
{}
/** Convert the Transpositions \a tr to a permutation matrix */
@@ -406,12 +406,12 @@ class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,
typedef typename IndicesType::Scalar Index;
#endif
- inline Map(const Index* indices)
- : m_indices(indices)
+ inline Map(const Index* indicesPtr)
+ : m_indices(indicesPtr)
{}
- inline Map(const Index* indices, Index size)
- : m_indices(indices,size)
+ inline Map(const Index* indicesPtr, Index size)
+ : m_indices(indicesPtr,size)
{}
/** Copies the other permutation into *this */
@@ -490,8 +490,8 @@ class PermutationWrapper : public PermutationBase<PermutationWrapper<_IndicesTyp
typedef typename Traits::IndicesType IndicesType;
#endif
- inline PermutationWrapper(const IndicesType& indices)
- : m_indices(indices)
+ inline PermutationWrapper(const IndicesType& a_indices)
+ : m_indices(a_indices)
{}
/** const version of indices(). */
@@ -541,24 +541,26 @@ struct permut_matrix_product_retval
: public ReturnByValue<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
{
typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+ typedef typename MatrixType::Index Index;
permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
: m_permutation(perm), m_matrix(matrix)
{}
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
template<typename Dest> inline void evalTo(Dest& dst) const
{
- const int n = Side==OnTheLeft ? rows() : cols();
-
+ const Index n = Side==OnTheLeft ? rows() : cols();
+ // FIXME we need an is_same for expression that is not sensitive to constness. For instance
+ // is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true.
if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix))
{
// apply the permutation inplace
Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());
mask.fill(false);
- int r = 0;
+ Index r = 0;
while(r < m_permutation.size())
{
// search for the next seed
@@ -566,10 +568,10 @@ struct permut_matrix_product_retval
if(r>=m_permutation.size())
break;
// we got one, let's follow it until we are back to the seed
- int k0 = r++;
- int kPrev = k0;
+ Index k0 = r++;
+ Index kPrev = k0;
mask.coeffRef(k0) = true;
- for(int k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k))
+ for(Index k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k))
{
Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>(dst, k)
.swap(Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
diff --git a/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h b/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
index 71c74309acc..dd34b59e541 100644
--- a/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
+++ b/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
@@ -11,30 +11,46 @@
#ifndef EIGEN_DENSESTORAGEBASE_H
#define EIGEN_DENSESTORAGEBASE_H
-#ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
-# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0);
+#if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO)
+# define EIGEN_INITIALIZE_COEFFS
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0);
+#elif defined(EIGEN_INITIALIZE_MATRICES_BY_NAN)
+# define EIGEN_INITIALIZE_COEFFS
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=std::numeric_limits<Scalar>::quiet_NaN();
#else
-# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+# undef EIGEN_INITIALIZE_COEFFS
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
#endif
namespace Eigen {
namespace internal {
-template<typename Index>
-EIGEN_ALWAYS_INLINE void check_rows_cols_for_overflow(Index rows, Index cols)
-{
- // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242
- // we assume Index is signed
- Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed
- bool error = (rows < 0 || cols < 0) ? true
- : (rows == 0 || cols == 0) ? false
- : (rows > max_index / cols);
- if (error)
- throw_std_bad_alloc();
-}
-
-template <typename Derived, typename OtherDerived = Derived, bool IsVector = bool(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl;
+template<int MaxSizeAtCompileTime> struct check_rows_cols_for_overflow {
+ template<typename Index>
+ static EIGEN_ALWAYS_INLINE void run(Index, Index)
+ {
+ }
+};
+
+template<> struct check_rows_cols_for_overflow<Dynamic> {
+ template<typename Index>
+ static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols)
+ {
+ // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242
+ // we assume Index is signed
+ Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed
+ bool error = (rows == 0 || cols == 0) ? false
+ : (rows > max_index / cols);
+ if (error)
+ throw_std_bad_alloc();
+ }
+};
+
+template <typename Derived,
+ typename OtherDerived = Derived,
+ bool IsVector = bool(Derived::IsVectorAtCompileTime) && bool(OtherDerived::IsVectorAtCompileTime)>
+struct conservative_resize_like_impl;
template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl;
@@ -119,12 +135,12 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); }
- EIGEN_STRONG_INLINE const Scalar& coeff(Index row, Index col) const
+ EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const
{
if(Flags & RowMajorBit)
- return m_storage.data()[col + row * m_storage.cols()];
+ return m_storage.data()[colId + rowId * m_storage.cols()];
else // column-major
- return m_storage.data()[row + col * m_storage.rows()];
+ return m_storage.data()[rowId + colId * m_storage.rows()];
}
EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
@@ -132,12 +148,12 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
return m_storage.data()[index];
}
- EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId)
{
if(Flags & RowMajorBit)
- return m_storage.data()[col + row * m_storage.cols()];
+ return m_storage.data()[colId + rowId * m_storage.cols()];
else // column-major
- return m_storage.data()[row + col * m_storage.rows()];
+ return m_storage.data()[rowId + colId * m_storage.rows()];
}
EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
@@ -145,12 +161,12 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
return m_storage.data()[index];
}
- EIGEN_STRONG_INLINE const Scalar& coeffRef(Index row, Index col) const
+ EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const
{
if(Flags & RowMajorBit)
- return m_storage.data()[col + row * m_storage.cols()];
+ return m_storage.data()[colId + rowId * m_storage.cols()];
else // column-major
- return m_storage.data()[row + col * m_storage.rows()];
+ return m_storage.data()[rowId + colId * m_storage.rows()];
}
EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const
@@ -160,12 +176,12 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
/** \internal */
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
{
return internal::ploadt<PacketScalar, LoadMode>
(m_storage.data() + (Flags & RowMajorBit
- ? col + row * m_storage.cols()
- : row + col * m_storage.rows()));
+ ? colId + rowId * m_storage.cols()
+ : rowId + colId * m_storage.rows()));
}
/** \internal */
@@ -177,19 +193,19 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
/** \internal */
template<int StoreMode>
- EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketScalar& x)
+ EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val)
{
internal::pstoret<Scalar, PacketScalar, StoreMode>
(m_storage.data() + (Flags & RowMajorBit
- ? col + row * m_storage.cols()
- : row + col * m_storage.rows()), x);
+ ? colId + rowId * m_storage.cols()
+ : rowId + colId * m_storage.rows()), val);
}
/** \internal */
template<int StoreMode>
- EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& x)
+ EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val)
{
- internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
+ internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, val);
}
/** \returns a const pointer to the data array of this matrix */
@@ -216,17 +232,22 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
*
* \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)
*/
- EIGEN_STRONG_INLINE void resize(Index rows, Index cols)
- {
- #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
- internal::check_rows_cols_for_overflow(rows, cols);
- Index size = rows*cols;
+ EIGEN_STRONG_INLINE void resize(Index nbRows, Index nbCols)
+ {
+ eigen_assert( EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,nbRows==RowsAtCompileTime)
+ && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,nbCols==ColsAtCompileTime)
+ && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,nbRows<=MaxRowsAtCompileTime)
+ && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,nbCols<=MaxColsAtCompileTime)
+ && nbRows>=0 && nbCols>=0 && "Invalid sizes when resizing a matrix or array.");
+ internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(nbRows, nbCols);
+ #ifdef EIGEN_INITIALIZE_COEFFS
+ Index size = nbRows*nbCols;
bool size_changed = size != this->size();
- m_storage.resize(size, rows, cols);
- if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ m_storage.resize(size, nbRows, nbCols);
+ if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
#else
- internal::check_rows_cols_for_overflow(rows, cols);
- m_storage.resize(rows*cols, rows, cols);
+ internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(nbRows, nbCols);
+ m_storage.resize(nbRows*nbCols, nbRows, nbCols);
#endif
}
@@ -244,16 +265,16 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
inline void resize(Index size)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase)
- eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
- #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
+ eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0);
+ #ifdef EIGEN_INITIALIZE_COEFFS
bool size_changed = size != this->size();
#endif
if(RowsAtCompileTime == 1)
m_storage.resize(size, 1, size);
else
m_storage.resize(size, size, 1);
- #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
- if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ #ifdef EIGEN_INITIALIZE_COEFFS
+ if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
#endif
}
@@ -265,9 +286,9 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
*
* \sa resize(Index,Index)
*/
- inline void resize(NoChange_t, Index cols)
+ inline void resize(NoChange_t, Index nbCols)
{
- resize(rows(), cols);
+ resize(rows(), nbCols);
}
/** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange
@@ -278,9 +299,9 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
*
* \sa resize(Index,Index)
*/
- inline void resize(Index rows, NoChange_t)
+ inline void resize(Index nbRows, NoChange_t)
{
- resize(rows, cols());
+ resize(nbRows, cols());
}
/** Resizes \c *this to have the same dimensions as \a other.
@@ -294,7 +315,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
{
const OtherDerived& other = _other.derived();
- internal::check_rows_cols_for_overflow(other.rows(), other.cols());
+ internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.rows(), other.cols());
const Index othersize = other.rows()*other.cols();
if(RowsAtCompileTime == 1)
{
@@ -318,9 +339,9 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
* Matrices are resized relative to the top-left element. In case values need to be
* appended to the matrix they will be uninitialized.
*/
- EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols)
+ EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, Index nbCols)
{
- internal::conservative_resize_like_impl<Derived>::run(*this, rows, cols);
+ internal::conservative_resize_like_impl<Derived>::run(*this, nbRows, nbCols);
}
/** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
@@ -330,10 +351,10 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
*
* In case the matrix is growing, new rows will be uninitialized.
*/
- EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t)
+ EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, NoChange_t)
{
// Note: see the comment in conservativeResize(Index,Index)
- conservativeResize(rows, cols());
+ conservativeResize(nbRows, cols());
}
/** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
@@ -343,10 +364,10 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
*
* In case the matrix is growing, new columns will be uninitialized.
*/
- EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols)
+ EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index nbCols)
{
// Note: see the comment in conservativeResize(Index,Index)
- conservativeResize(rows(), cols);
+ conservativeResize(rows(), nbCols);
}
/** Resizes the vector to \a size while retaining old values.
@@ -400,10 +421,10 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
return Base::operator=(func);
}
- EIGEN_STRONG_INLINE explicit PlainObjectBase() : m_storage()
+ EIGEN_STRONG_INLINE PlainObjectBase() : m_storage()
{
// _check_template_params();
-// EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+// EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -412,15 +433,15 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
PlainObjectBase(internal::constructor_without_unaligned_array_assert)
: m_storage(internal::constructor_without_unaligned_array_assert())
{
-// _check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+// _check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
#endif
- EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols)
- : m_storage(size, rows, cols)
+ EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
+ : m_storage(a_size, nbRows, nbCols)
{
// _check_template_params();
-// EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+// EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
}
/** \copydoc MatrixBase::operator=(const EigenBase<OtherDerived>&)
@@ -439,7 +460,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
: m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
{
_check_template_params();
- internal::check_rows_cols_for_overflow(other.derived().rows(), other.derived().cols());
+ internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.derived().rows(), other.derived().cols());
Base::operator=(other.derived());
}
@@ -551,6 +572,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size())
: (rows() == other.rows() && cols() == other.cols())))
&& "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
+ EIGEN_ONLY_USED_FOR_DEBUG(other);
#else
resizeLike(other);
#endif
@@ -600,23 +622,19 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
}
template<typename T0, typename T1>
- EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
+ EIGEN_STRONG_INLINE void _init2(Index nbRows, Index nbCols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
{
EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) &&
bool(NumTraits<T1>::IsInteger),
FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
- eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
- && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
- internal::check_rows_cols_for_overflow(rows, cols);
- m_storage.resize(rows*cols,rows,cols);
- EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ resize(nbRows,nbCols);
}
template<typename T0, typename T1>
- EIGEN_STRONG_INLINE void _init2(const Scalar& x, const Scalar& y, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)
+ EIGEN_STRONG_INLINE void _init2(const Scalar& val0, const Scalar& val1, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2)
- m_storage.data()[0] = x;
- m_storage.data()[1] = y;
+ m_storage.data()[0] = val0;
+ m_storage.data()[1] = val1;
}
template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
@@ -653,8 +671,10 @@ private:
enum { ThisConstantIsPrivateInPlainObjectBase };
};
+namespace internal {
+
template <typename Derived, typename OtherDerived, bool IsVector>
-struct internal::conservative_resize_like_impl
+struct conservative_resize_like_impl
{
typedef typename Derived::Index Index;
static void run(DenseBase<Derived>& _this, Index rows, Index cols)
@@ -665,7 +685,7 @@ struct internal::conservative_resize_like_impl
if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
(!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns
{
- internal::check_rows_cols_for_overflow(rows, cols);
+ internal::check_rows_cols_for_overflow<Derived::MaxSizeAtCompileTime>::run(rows, cols);
_this.derived().m_storage.conservativeResize(rows*cols,rows,cols);
}
else
@@ -714,11 +734,14 @@ struct internal::conservative_resize_like_impl
}
};
-namespace internal {
-
+// Here, the specialization for vectors inherits from the general matrix case
+// to allow calling .conservativeResize(rows,cols) on vectors.
template <typename Derived, typename OtherDerived>
struct conservative_resize_like_impl<Derived,OtherDerived,true>
+ : conservative_resize_like_impl<Derived,OtherDerived,false>
{
+ using conservative_resize_like_impl<Derived,OtherDerived,false>::run;
+
typedef typename Derived::Index Index;
static void run(DenseBase<Derived>& _this, Index size)
{
diff --git a/extern/Eigen3/Eigen/src/Core/ProductBase.h b/extern/Eigen3/Eigen/src/Core/ProductBase.h
index ec12e5c9f6b..a494b5f8703 100644
--- a/extern/Eigen3/Eigen/src/Core/ProductBase.h
+++ b/extern/Eigen3/Eigen/src/Core/ProductBase.h
@@ -87,10 +87,10 @@ class ProductBase : public MatrixBase<Derived>
typedef typename Base::PlainObject PlainObject;
- ProductBase(const Lhs& lhs, const Rhs& rhs)
- : m_lhs(lhs), m_rhs(rhs)
+ ProductBase(const Lhs& a_lhs, const Rhs& a_rhs)
+ : m_lhs(a_lhs), m_rhs(a_rhs)
{
- eigen_assert(lhs.cols() == rhs.rows()
+ eigen_assert(a_lhs.cols() == a_rhs.rows()
&& "invalid matrix product"
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions");
}
@@ -108,7 +108,7 @@ class ProductBase : public MatrixBase<Derived>
inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); }
template<typename Dest>
- inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { derived().scaleAndAddTo(dst,alpha); }
+ inline void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { derived().scaleAndAddTo(dst,alpha); }
const _LhsNested& lhs() const { return m_lhs; }
const _RhsNested& rhs() const { return m_rhs; }
@@ -195,25 +195,25 @@ class ScaledProduct;
// Also note that here we accept any compatible scalar types
template<typename Derived,typename Lhs,typename Rhs>
const ScaledProduct<Derived>
-operator*(const ProductBase<Derived,Lhs,Rhs>& prod, typename Derived::Scalar x)
+operator*(const ProductBase<Derived,Lhs,Rhs>& prod, const typename Derived::Scalar& x)
{ return ScaledProduct<Derived>(prod.derived(), x); }
template<typename Derived,typename Lhs,typename Rhs>
typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
const ScaledProduct<Derived> >::type
-operator*(const ProductBase<Derived,Lhs,Rhs>& prod, typename Derived::RealScalar x)
+operator*(const ProductBase<Derived,Lhs,Rhs>& prod, const typename Derived::RealScalar& x)
{ return ScaledProduct<Derived>(prod.derived(), x); }
template<typename Derived,typename Lhs,typename Rhs>
const ScaledProduct<Derived>
-operator*(typename Derived::Scalar x,const ProductBase<Derived,Lhs,Rhs>& prod)
+operator*(const typename Derived::Scalar& x,const ProductBase<Derived,Lhs,Rhs>& prod)
{ return ScaledProduct<Derived>(prod.derived(), x); }
template<typename Derived,typename Lhs,typename Rhs>
typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
const ScaledProduct<Derived> >::type
-operator*(typename Derived::RealScalar x,const ProductBase<Derived,Lhs,Rhs>& prod)
+operator*(const typename Derived::RealScalar& x,const ProductBase<Derived,Lhs,Rhs>& prod)
{ return ScaledProduct<Derived>(prod.derived(), x); }
namespace internal {
@@ -241,7 +241,7 @@ class ScaledProduct
typedef typename Base::PlainObject PlainObject;
// EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct)
- ScaledProduct(const NestedProduct& prod, Scalar x)
+ ScaledProduct(const NestedProduct& prod, const Scalar& x)
: Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {}
template<typename Dest>
@@ -254,7 +254,7 @@ class ScaledProduct
inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); }
template<typename Dest>
- inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { m_prod.derived().scaleAndAddTo(dst,alpha * m_alpha); }
+ inline void scaleAndAddTo(Dest& dst, const Scalar& a_alpha) const { m_prod.derived().scaleAndAddTo(dst,a_alpha * m_alpha); }
const Scalar& alpha() const { return m_alpha; }
diff --git a/extern/Eigen3/Eigen/src/Core/Random.h b/extern/Eigen3/Eigen/src/Core/Random.h
index a9f7f434666..480fea408d0 100644
--- a/extern/Eigen3/Eigen/src/Core/Random.h
+++ b/extern/Eigen3/Eigen/src/Core/Random.h
@@ -112,7 +112,7 @@ inline Derived& DenseBase<Derived>::setRandom()
return *this = Random(rows(), cols());
}
-/** Resizes to the given \a size, and sets all coefficients in this expression to random values.
+/** Resizes to the given \a newSize, and sets all coefficients in this expression to random values.
*
* \only_for_vectors
*
@@ -123,16 +123,16 @@ inline Derived& DenseBase<Derived>::setRandom()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setRandom(Index size)
+PlainObjectBase<Derived>::setRandom(Index newSize)
{
- resize(size);
+ resize(newSize);
return setRandom();
}
/** Resizes to the given size, and sets all coefficients in this expression to random values.
*
- * \param rows the new number of rows
- * \param cols the new number of columns
+ * \param nbRows the new number of rows
+ * \param nbCols the new number of columns
*
* Example: \include Matrix_setRandom_int_int.cpp
* Output: \verbinclude Matrix_setRandom_int_int.out
@@ -141,9 +141,9 @@ PlainObjectBase<Derived>::setRandom(Index size)
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
-PlainObjectBase<Derived>::setRandom(Index rows, Index cols)
+PlainObjectBase<Derived>::setRandom(Index nbRows, Index nbCols)
{
- resize(rows, cols);
+ resize(nbRows, nbCols);
return setRandom();
}
diff --git a/extern/Eigen3/Eigen/src/Core/Redux.h b/extern/Eigen3/Eigen/src/Core/Redux.h
index b7ce7c658a2..50548fa9a0e 100644
--- a/extern/Eigen3/Eigen/src/Core/Redux.h
+++ b/extern/Eigen3/Eigen/src/Core/Redux.h
@@ -330,7 +330,8 @@ DenseBase<Derived>::redux(const Func& func) const
::run(derived(), func);
}
-/** \returns the minimum of all coefficients of *this
+/** \returns the minimum of all coefficients of \c *this.
+ * \warning the result is undefined if \c *this contains NaN.
*/
template<typename Derived>
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
@@ -339,7 +340,8 @@ DenseBase<Derived>::minCoeff() const
return this->redux(Eigen::internal::scalar_min_op<Scalar>());
}
-/** \returns the maximum of all coefficients of *this
+/** \returns the maximum of all coefficients of \c *this.
+ * \warning the result is undefined if \c *this contains NaN.
*/
template<typename Derived>
EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
diff --git a/extern/Eigen3/Eigen/src/Core/Ref.h b/extern/Eigen3/Eigen/src/Core/Ref.h
new file mode 100644
index 00000000000..00d9e6d2b41
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Ref.h
@@ -0,0 +1,256 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REF_H
+#define EIGEN_REF_H
+
+namespace Eigen {
+
+template<typename Derived> class RefBase;
+template<typename PlainObjectType, int Options = 0,
+ typename StrideType = typename internal::conditional<PlainObjectType::IsVectorAtCompileTime,InnerStride<1>,OuterStride<> >::type > class Ref;
+
+/** \class Ref
+ * \ingroup Core_Module
+ *
+ * \brief A matrix or vector expression mapping an existing expressions
+ *
+ * \tparam PlainObjectType the equivalent matrix type of the mapped data
+ * \tparam Options specifies whether the pointer is \c #Aligned, or \c #Unaligned.
+ * The default is \c #Unaligned.
+ * \tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1),
+ * but accept a variable outer stride (leading dimension).
+ * This can be overridden by specifying strides.
+ * The type passed here must be a specialization of the Stride template, see examples below.
+ *
+ * This class permits to write non template functions taking Eigen's object as parameters while limiting the number of copies.
+ * A Ref<> object can represent either a const expression or a l-value:
+ * \code
+ * // in-out argument:
+ * void foo1(Ref<VectorXf> x);
+ *
+ * // read-only const argument:
+ * void foo2(const Ref<const VectorXf>& x);
+ * \endcode
+ *
+ * In the in-out case, the input argument must satisfies the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered.
+ * By default, a Ref<VectorXf> can reference any dense vector expression of float having a contiguous memory layout.
+ * Likewise, a Ref<MatrixXf> can reference any column major dense matrix expression of float whose column's elements are contiguously stored with
+ * the possibility to have a constant space inbetween each column, i.e.: the inner stride mmust be equal to 1, but the outer-stride (or leading dimension),
+ * can be greater than the number of rows.
+ *
+ * In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function.
+ * Here are some examples:
+ * \code
+ * MatrixXf A;
+ * VectorXf a;
+ * foo1(a.head()); // OK
+ * foo1(A.col()); // OK
+ * foo1(A.row()); // compilation error because here innerstride!=1
+ * foo2(A.row()); // The row is copied into a contiguous temporary
+ * foo2(2*a); // The expression is evaluated into a temporary
+ * foo2(A.col().segment(2,4)); // No temporary
+ * \endcode
+ *
+ * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameter.
+ * Here is an example accepting an innerstride!=1:
+ * \code
+ * // in-out argument:
+ * void foo3(Ref<VectorXf,0,InnerStride<> > x);
+ * foo3(A.row()); // OK
+ * \endcode
+ * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involved more
+ * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overloads internally calling a
+ * template function, e.g.:
+ * \code
+ * // in the .h:
+ * void foo(const Ref<MatrixXf>& A);
+ * void foo(const Ref<MatrixXf,0,Stride<> >& A);
+ *
+ * // in the .cpp:
+ * template<typename TypeOfA> void foo_impl(const TypeOfA& A) {
+ * ... // crazy code goes here
+ * }
+ * void foo(const Ref<MatrixXf>& A) { foo_impl(A); }
+ * void foo(const Ref<MatrixXf,0,Stride<> >& A) { foo_impl(A); }
+ * \endcode
+ *
+ *
+ * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
+ */
+
+namespace internal {
+
+template<typename _PlainObjectType, int _Options, typename _StrideType>
+struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
+ : public traits<Map<_PlainObjectType, _Options, _StrideType> >
+{
+ typedef _PlainObjectType PlainObjectType;
+ typedef _StrideType StrideType;
+ enum {
+ Options = _Options,
+ Flags = traits<Map<_PlainObjectType, _Options, _StrideType> >::Flags | NestByRefBit
+ };
+
+ template<typename Derived> struct match {
+ enum {
+ HasDirectAccess = internal::has_direct_access<Derived>::ret,
+ StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
+ InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)
+ || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)
+ || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),
+ OuterStrideMatch = Derived::IsVectorAtCompileTime
+ || int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
+ AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
+ MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch
+ };
+ typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
+ };
+
+};
+
+template<typename Derived>
+struct traits<RefBase<Derived> > : public traits<Derived> {};
+
+}
+
+template<typename Derived> class RefBase
+ : public MapBase<Derived>
+{
+ typedef typename internal::traits<Derived>::PlainObjectType PlainObjectType;
+ typedef typename internal::traits<Derived>::StrideType StrideType;
+
+public:
+
+ typedef MapBase<Derived> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(RefBase)
+
+ inline Index innerStride() const
+ {
+ return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
+ }
+
+ inline Index outerStride() const
+ {
+ return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
+ : IsVectorAtCompileTime ? this->size()
+ : int(Flags)&RowMajorBit ? this->cols()
+ : this->rows();
+ }
+
+ RefBase()
+ : Base(0,RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime),
+ // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values:
+ m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime,
+ StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime)
+ {}
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase)
+
+protected:
+
+ typedef Stride<StrideType::OuterStrideAtCompileTime,StrideType::InnerStrideAtCompileTime> StrideBase;
+
+ template<typename Expression>
+ void construct(Expression& expr)
+ {
+ if(PlainObjectType::RowsAtCompileTime==1)
+ {
+ eigen_assert(expr.rows()==1 || expr.cols()==1);
+ ::new (static_cast<Base*>(this)) Base(expr.data(), 1, expr.size());
+ }
+ else if(PlainObjectType::ColsAtCompileTime==1)
+ {
+ eigen_assert(expr.rows()==1 || expr.cols()==1);
+ ::new (static_cast<Base*>(this)) Base(expr.data(), expr.size(), 1);
+ }
+ else
+ ::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols());
+ ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
+ StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());
+ }
+
+ StrideBase m_stride;
+};
+
+
+template<typename PlainObjectType, int Options, typename StrideType> class Ref
+ : public RefBase<Ref<PlainObjectType, Options, StrideType> >
+{
+ typedef internal::traits<Ref> Traits;
+ public:
+
+ typedef RefBase<Ref> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
+
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Derived>
+ inline Ref(PlainObjectBase<Derived>& expr,
+ typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
+ {
+ Base::construct(expr);
+ }
+ template<typename Derived>
+ inline Ref(const DenseBase<Derived>& expr,
+ typename internal::enable_if<bool(internal::is_lvalue<Derived>::value&&bool(Traits::template match<Derived>::MatchAtCompileTime)),Derived>::type* = 0,
+ int = Derived::ThisConstantIsPrivateInPlainObjectBase)
+ #else
+ template<typename Derived>
+ inline Ref(DenseBase<Derived>& expr)
+ #endif
+ {
+ Base::construct(expr.const_cast_derived());
+ }
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref)
+
+};
+
+// this is the const ref version
+template<typename TPlainObjectType, int Options, typename StrideType> class Ref<const TPlainObjectType, Options, StrideType>
+ : public RefBase<Ref<const TPlainObjectType, Options, StrideType> >
+{
+ typedef internal::traits<Ref> Traits;
+ public:
+
+ typedef RefBase<Ref> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
+
+ template<typename Derived>
+ inline Ref(const DenseBase<Derived>& expr)
+ {
+// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
+// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
+// std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
+ construct(expr.derived(), typename Traits::template match<Derived>::type());
+ }
+
+ protected:
+
+ template<typename Expression>
+ void construct(const Expression& expr,internal::true_type)
+ {
+ Base::construct(expr);
+ }
+
+ template<typename Expression>
+ void construct(const Expression& expr, internal::false_type)
+ {
+ m_object.lazyAssign(expr);
+ Base::construct(m_object);
+ }
+
+ protected:
+ TPlainObjectType m_object;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_REF_H
diff --git a/extern/Eigen3/Eigen/src/Core/Replicate.h b/extern/Eigen3/Eigen/src/Core/Replicate.h
index b61fdc29e2f..dde86a8349b 100644
--- a/extern/Eigen3/Eigen/src/Core/Replicate.h
+++ b/extern/Eigen3/Eigen/src/Core/Replicate.h
@@ -70,8 +70,8 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
template<typename OriginalMatrixType>
- inline explicit Replicate(const OriginalMatrixType& matrix)
- : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)
+ inline explicit Replicate(const OriginalMatrixType& a_matrix)
+ : m_matrix(a_matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)
{
EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
@@ -79,8 +79,8 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
}
template<typename OriginalMatrixType>
- inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor)
- : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)
+ inline Replicate(const OriginalMatrixType& a_matrix, Index rowFactor, Index colFactor)
+ : m_matrix(a_matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)
{
EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
@@ -89,27 +89,27 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
- inline Scalar coeff(Index row, Index col) const
+ inline Scalar coeff(Index rowId, Index colId) const
{
// try to avoid using modulo; this is a pure optimization strategy
const Index actual_row = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
- : RowFactor==1 ? row
- : row%m_matrix.rows();
+ : RowFactor==1 ? rowId
+ : rowId%m_matrix.rows();
const Index actual_col = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
- : ColFactor==1 ? col
- : col%m_matrix.cols();
+ : ColFactor==1 ? colId
+ : colId%m_matrix.cols();
return m_matrix.coeff(actual_row, actual_col);
}
template<int LoadMode>
- inline PacketScalar packet(Index row, Index col) const
+ inline PacketScalar packet(Index rowId, Index colId) const
{
const Index actual_row = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
- : RowFactor==1 ? row
- : row%m_matrix.rows();
+ : RowFactor==1 ? rowId
+ : rowId%m_matrix.rows();
const Index actual_col = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
- : ColFactor==1 ? col
- : col%m_matrix.cols();
+ : ColFactor==1 ? colId
+ : colId%m_matrix.cols();
return m_matrix.template packet<LoadMode>(actual_row, actual_col);
}
diff --git a/extern/Eigen3/Eigen/src/Core/ReturnByValue.h b/extern/Eigen3/Eigen/src/Core/ReturnByValue.h
index 613912ffa8c..d66c24ba0f8 100644
--- a/extern/Eigen3/Eigen/src/Core/ReturnByValue.h
+++ b/extern/Eigen3/Eigen/src/Core/ReturnByValue.h
@@ -48,7 +48,7 @@ struct nested<ReturnByValue<Derived>, n, PlainObject>
} // end namespace internal
template<typename Derived> class ReturnByValue
- : public internal::dense_xpr_base< ReturnByValue<Derived> >::type
+ : internal::no_assignment_operator, public internal::dense_xpr_base< ReturnByValue<Derived> >::type
{
public:
typedef typename internal::traits<Derived>::ReturnType ReturnType;
diff --git a/extern/Eigen3/Eigen/src/Core/Select.h b/extern/Eigen3/Eigen/src/Core/Select.h
index 2bf6e91d0aa..87993bbb553 100644
--- a/extern/Eigen3/Eigen/src/Core/Select.h
+++ b/extern/Eigen3/Eigen/src/Core/Select.h
@@ -60,10 +60,10 @@ class Select : internal::no_assignment_operator,
typedef typename internal::dense_xpr_base<Select>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Select)
- Select(const ConditionMatrixType& conditionMatrix,
- const ThenMatrixType& thenMatrix,
- const ElseMatrixType& elseMatrix)
- : m_condition(conditionMatrix), m_then(thenMatrix), m_else(elseMatrix)
+ Select(const ConditionMatrixType& a_conditionMatrix,
+ const ThenMatrixType& a_thenMatrix,
+ const ElseMatrixType& a_elseMatrix)
+ : m_condition(a_conditionMatrix), m_then(a_thenMatrix), m_else(a_elseMatrix)
{
eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());
eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
@@ -136,7 +136,7 @@ template<typename Derived>
template<typename ThenDerived>
inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
- typename ThenDerived::Scalar elseScalar) const
+ const typename ThenDerived::Scalar& elseScalar) const
{
return Select<Derived,ThenDerived,typename ThenDerived::ConstantReturnType>(
derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar));
@@ -150,8 +150,8 @@ DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
template<typename Derived>
template<typename ElseDerived>
inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
-DenseBase<Derived>::select(typename ElseDerived::Scalar thenScalar,
- const DenseBase<ElseDerived>& elseMatrix) const
+DenseBase<Derived>::select(const typename ElseDerived::Scalar& thenScalar,
+ const DenseBase<ElseDerived>& elseMatrix) const
{
return Select<Derived,typename ElseDerived::ConstantReturnType,ElseDerived>(
derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived());
diff --git a/extern/Eigen3/Eigen/src/Core/SelfAdjointView.h b/extern/Eigen3/Eigen/src/Core/SelfAdjointView.h
index 82cc4da736a..6fa7cd15e6f 100644
--- a/extern/Eigen3/Eigen/src/Core/SelfAdjointView.h
+++ b/extern/Eigen3/Eigen/src/Core/SelfAdjointView.h
@@ -132,7 +132,7 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
* \sa rankUpdate(const MatrixBase<DerivedU>&, Scalar)
*/
template<typename DerivedU, typename DerivedV>
- SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, Scalar alpha = Scalar(1));
+ SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha = Scalar(1));
/** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
* \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
@@ -145,7 +145,7 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
* \sa rankUpdate(const MatrixBase<DerivedU>&, const MatrixBase<DerivedV>&, Scalar)
*/
template<typename DerivedU>
- SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+ SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
/////////// Cholesky module ///////////
@@ -214,9 +214,9 @@ struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), U
triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), UnrollCount-1, ClearOpposite>::run(dst, src);
if(row == col)
- dst.coeffRef(row, col) = real(src.coeff(row, col));
+ dst.coeffRef(row, col) = numext::real(src.coeff(row, col));
else if(row < col)
- dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col));
+ dst.coeffRef(col, row) = numext::conj(dst.coeffRef(row, col) = src.coeff(row, col));
}
};
@@ -239,9 +239,9 @@ struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), U
triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), UnrollCount-1, ClearOpposite>::run(dst, src);
if(row == col)
- dst.coeffRef(row, col) = real(src.coeff(row, col));
+ dst.coeffRef(row, col) = numext::real(src.coeff(row, col));
else if(row > col)
- dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col));
+ dst.coeffRef(col, row) = numext::conj(dst.coeffRef(row, col) = src.coeff(row, col));
}
};
@@ -262,7 +262,7 @@ struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, Dyn
for(Index i = 0; i < j; ++i)
{
dst.copyCoeff(i, j, src);
- dst.coeffRef(j,i) = conj(dst.coeff(i,j));
+ dst.coeffRef(j,i) = numext::conj(dst.coeff(i,j));
}
dst.copyCoeff(j, j, src);
}
@@ -280,7 +280,7 @@ struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, Dyn
for(Index j = 0; j < i; ++j)
{
dst.copyCoeff(i, j, src);
- dst.coeffRef(j,i) = conj(dst.coeff(i,j));
+ dst.coeffRef(j,i) = numext::conj(dst.coeff(i,j));
}
dst.copyCoeff(i, i, src);
}
diff --git a/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
index 0caf2bab1d8..22f3047b43f 100644
--- a/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
+++ b/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
@@ -185,7 +185,10 @@ inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
internal::scalar_product_op<Scalar> >::type BinOp;
typedef typename Derived::PlainObject PlainObject;
SelfCwiseBinaryOp<BinOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
- tmp = PlainObject::Constant(rows(),cols(), NumTraits<Scalar>::IsInteger ? other : Scalar(1)/other);
+ Scalar actual_other;
+ if(NumTraits<Scalar>::IsInteger) actual_other = other;
+ else actual_other = Scalar(1)/other;
+ tmp = PlainObject::Constant(rows(),cols(), actual_other);
return derived();
}
diff --git a/extern/Eigen3/Eigen/src/Core/StableNorm.h b/extern/Eigen3/Eigen/src/Core/StableNorm.h
index d8bf7db70e4..389d9427539 100644
--- a/extern/Eigen3/Eigen/src/Core/StableNorm.h
+++ b/extern/Eigen3/Eigen/src/Core/StableNorm.h
@@ -13,131 +13,105 @@
namespace Eigen {
namespace internal {
+
template<typename ExpressionType, typename Scalar>
inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
{
- Scalar max = bl.cwiseAbs().maxCoeff();
- if (max>scale)
+ using std::max;
+ Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
+
+ if (maxCoeff>scale)
{
- ssq = ssq * abs2(scale/max);
- scale = max;
- invScale = Scalar(1)/scale;
+ ssq = ssq * numext::abs2(scale/maxCoeff);
+ Scalar tmp = Scalar(1)/maxCoeff;
+ if(tmp > NumTraits<Scalar>::highest())
+ {
+ invScale = NumTraits<Scalar>::highest();
+ scale = Scalar(1)/invScale;
+ }
+ else
+ {
+ scale = maxCoeff;
+ invScale = tmp;
+ }
}
- // TODO if the max is much much smaller than the current scale,
+
+ // TODO if the maxCoeff is much much smaller than the current scale,
// then we can neglect this sub vector
- ssq += (bl*invScale).squaredNorm();
-}
-}
-
-/** \returns the \em l2 norm of \c *this avoiding underflow and overflow.
- * This version use a blockwise two passes algorithm:
- * 1 - find the absolute largest coefficient \c s
- * 2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way
- *
- * For architecture/scalar types supporting vectorization, this version
- * is faster than blueNorm(). Otherwise the blueNorm() is much faster.
- *
- * \sa norm(), blueNorm(), hypotNorm()
- */
-template<typename Derived>
-inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
-MatrixBase<Derived>::stableNorm() const
-{
- using std::min;
- const Index blockSize = 4096;
- RealScalar scale(0);
- RealScalar invScale(1);
- RealScalar ssq(0); // sum of square
- enum {
- Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0
- };
- Index n = size();
- Index bi = internal::first_aligned(derived());
- if (bi>0)
- internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
- for (; bi<n; bi+=blockSize)
- internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
- return scale * internal::sqrt(ssq);
+ if(scale>Scalar(0)) // if scale==0, then bl is 0
+ ssq += (bl*invScale).squaredNorm();
}
-/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
- * A Portable Fortran Program to Find the Euclidean Norm of a Vector,
- * ACM TOMS, Vol 4, Issue 1, 1978.
- *
- * For architecture/scalar types without vectorization, this version
- * is much faster than stableNorm(). Otherwise the stableNorm() is faster.
- *
- * \sa norm(), stableNorm(), hypotNorm()
- */
template<typename Derived>
-inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
-MatrixBase<Derived>::blueNorm() const
+inline typename NumTraits<typename traits<Derived>::Scalar>::Real
+blueNorm_impl(const EigenBase<Derived>& _vec)
{
+ typedef typename Derived::RealScalar RealScalar;
+ typedef typename Derived::Index Index;
using std::pow;
using std::min;
using std::max;
- static Index nmax = -1;
+ using std::sqrt;
+ using std::abs;
+ const Derived& vec(_vec.derived());
+ static bool initialized = false;
static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
- if(nmax <= 0)
+ if(!initialized)
{
- int nbig, ibeta, it, iemin, iemax, iexp;
- RealScalar abig, eps;
+ int ibeta, it, iemin, iemax, iexp;
+ RealScalar eps;
// This program calculates the machine-dependent constants
- // bl, b2, slm, s2m, relerr overfl, nmax
+ // bl, b2, slm, s2m, relerr overfl
// from the "basic" machine-dependent numbers
// nbig, ibeta, it, iemin, iemax, rbig.
// The following define the basic machine-dependent constants.
// For portability, the PORT subprograms "ilmaeh" and "rlmach"
// are used. For any specific computer, each of the assignment
// statements can be replaced
- nbig = (std::numeric_limits<Index>::max)(); // largest integer
- ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
- it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
- iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
- iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
- rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
+ ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
+ it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
+ iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
+ iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
+ rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
iexp = -((1-iemin)/2);
- b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
+ b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
iexp = (iemax + 1 - it)/2;
- b2 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // upper boundary of midrange
+ b2 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // upper boundary of midrange
iexp = (2-iemin)/2;
- s1m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for lower range
+ s1m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for lower range
iexp = - ((iemax+it)/2);
- s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range
+ s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range
- overfl = rbig*s2m; // overflow boundary for abig
+ overfl = rbig*s2m; // overflow boundary for abig
eps = RealScalar(pow(double(ibeta), 1-it));
- relerr = internal::sqrt(eps); // tolerance for neglecting asml
- abig = RealScalar(1.0/eps - 1.0);
- if (RealScalar(nbig)>abig) nmax = int(abig); // largest safe n
- else nmax = nbig;
+ relerr = sqrt(eps); // tolerance for neglecting asml
+ initialized = true;
}
- Index n = size();
+ Index n = vec.size();
RealScalar ab2 = b2 / RealScalar(n);
RealScalar asml = RealScalar(0);
RealScalar amed = RealScalar(0);
RealScalar abig = RealScalar(0);
- for(Index j=0; j<n; ++j)
+ for(typename Derived::InnerIterator it(vec, 0); it; ++it)
{
- RealScalar ax = internal::abs(coeff(j));
- if(ax > ab2) abig += internal::abs2(ax*s2m);
- else if(ax < b1) asml += internal::abs2(ax*s1m);
- else amed += internal::abs2(ax);
+ RealScalar ax = abs(it.value());
+ if(ax > ab2) abig += numext::abs2(ax*s2m);
+ else if(ax < b1) asml += numext::abs2(ax*s1m);
+ else amed += numext::abs2(ax);
}
if(abig > RealScalar(0))
{
- abig = internal::sqrt(abig);
+ abig = sqrt(abig);
if(abig > overfl)
{
- eigen_assert(false && "overflow");
return rbig;
}
if(amed > RealScalar(0))
{
abig = abig/s2m;
- amed = internal::sqrt(amed);
+ amed = sqrt(amed);
}
else
return abig/s2m;
@@ -146,20 +120,70 @@ MatrixBase<Derived>::blueNorm() const
{
if (amed > RealScalar(0))
{
- abig = internal::sqrt(amed);
- amed = internal::sqrt(asml) / s1m;
+ abig = sqrt(amed);
+ amed = sqrt(asml) / s1m;
}
else
- return internal::sqrt(asml)/s1m;
+ return sqrt(asml)/s1m;
}
else
- return internal::sqrt(amed);
+ return sqrt(amed);
asml = (min)(abig, amed);
abig = (max)(abig, amed);
if(asml <= abig*relerr)
return abig;
else
- return abig * internal::sqrt(RealScalar(1) + internal::abs2(asml/abig));
+ return abig * sqrt(RealScalar(1) + numext::abs2(asml/abig));
+}
+
+} // end namespace internal
+
+/** \returns the \em l2 norm of \c *this avoiding underflow and overflow.
+ * This version use a blockwise two passes algorithm:
+ * 1 - find the absolute largest coefficient \c s
+ * 2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way
+ *
+ * For architecture/scalar types supporting vectorization, this version
+ * is faster than blueNorm(). Otherwise the blueNorm() is much faster.
+ *
+ * \sa norm(), blueNorm(), hypotNorm()
+ */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::stableNorm() const
+{
+ using std::min;
+ using std::sqrt;
+ const Index blockSize = 4096;
+ RealScalar scale(0);
+ RealScalar invScale(1);
+ RealScalar ssq(0); // sum of square
+ enum {
+ Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0
+ };
+ Index n = size();
+ Index bi = internal::first_aligned(derived());
+ if (bi>0)
+ internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
+ for (; bi<n; bi+=blockSize)
+ internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
+ return scale * sqrt(ssq);
+}
+
+/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
+ * A Portable Fortran Program to Find the Euclidean Norm of a Vector,
+ * ACM TOMS, Vol 4, Issue 1, 1978.
+ *
+ * For architecture/scalar types without vectorization, this version
+ * is much faster than stableNorm(). Otherwise the stableNorm() is faster.
+ *
+ * \sa norm(), stableNorm(), hypotNorm()
+ */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::blueNorm() const
+{
+ return internal::blueNorm_impl(*this);
}
/** \returns the \em l2 norm of \c *this avoiding undeflow and overflow.
diff --git a/extern/Eigen3/Eigen/src/Core/Swap.h b/extern/Eigen3/Eigen/src/Core/Swap.h
index fd73cf3ad7e..bf58bd5997d 100644
--- a/extern/Eigen3/Eigen/src/Core/Swap.h
+++ b/extern/Eigen3/Eigen/src/Core/Swap.h
@@ -49,9 +49,9 @@ template<typename ExpressionType> class SwapWrapper
inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
inline const Scalar* data() const { return m_expression.data(); }
- inline Scalar& coeffRef(Index row, Index col)
+ inline Scalar& coeffRef(Index rowId, Index colId)
{
- return m_expression.const_cast_derived().coeffRef(row, col);
+ return m_expression.const_cast_derived().coeffRef(rowId, colId);
}
inline Scalar& coeffRef(Index index)
@@ -59,9 +59,9 @@ template<typename ExpressionType> class SwapWrapper
return m_expression.const_cast_derived().coeffRef(index);
}
- inline Scalar& coeffRef(Index row, Index col) const
+ inline Scalar& coeffRef(Index rowId, Index colId) const
{
- return m_expression.coeffRef(row, col);
+ return m_expression.coeffRef(rowId, colId);
}
inline Scalar& coeffRef(Index index) const
@@ -70,14 +70,14 @@ template<typename ExpressionType> class SwapWrapper
}
template<typename OtherDerived>
- void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
+ void copyCoeff(Index rowId, Index colId, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
- eigen_internal_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
- Scalar tmp = m_expression.coeff(row, col);
- m_expression.coeffRef(row, col) = _other.coeff(row, col);
- _other.coeffRef(row, col) = tmp;
+ eigen_internal_assert(rowId >= 0 && rowId < rows()
+ && colId >= 0 && colId < cols());
+ Scalar tmp = m_expression.coeff(rowId, colId);
+ m_expression.coeffRef(rowId, colId) = _other.coeff(rowId, colId);
+ _other.coeffRef(rowId, colId) = tmp;
}
template<typename OtherDerived>
@@ -91,16 +91,16 @@ template<typename ExpressionType> class SwapWrapper
}
template<typename OtherDerived, int StoreMode, int LoadMode>
- void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
+ void copyPacket(Index rowId, Index colId, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
- eigen_internal_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
- Packet tmp = m_expression.template packet<StoreMode>(row, col);
- m_expression.template writePacket<StoreMode>(row, col,
- _other.template packet<LoadMode>(row, col)
+ eigen_internal_assert(rowId >= 0 && rowId < rows()
+ && colId >= 0 && colId < cols());
+ Packet tmp = m_expression.template packet<StoreMode>(rowId, colId);
+ m_expression.template writePacket<StoreMode>(rowId, colId,
+ _other.template packet<LoadMode>(rowId, colId)
);
- _other.template writePacket<LoadMode>(row, col, tmp);
+ _other.template writePacket<LoadMode>(rowId, colId, tmp);
}
template<typename OtherDerived, int StoreMode, int LoadMode>
diff --git a/extern/Eigen3/Eigen/src/Core/Transpose.h b/extern/Eigen3/Eigen/src/Core/Transpose.h
index 045a1cce671..22096ea2fde 100644
--- a/extern/Eigen3/Eigen/src/Core/Transpose.h
+++ b/extern/Eigen3/Eigen/src/Core/Transpose.h
@@ -62,7 +62,7 @@ template<typename MatrixType> class Transpose
typedef typename TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
- inline Transpose(MatrixType& matrix) : m_matrix(matrix) {}
+ inline Transpose(MatrixType& a_matrix) : m_matrix(a_matrix) {}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
@@ -104,6 +104,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
typedef typename internal::TransposeImpl_base<MatrixType>::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl)
inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
@@ -117,10 +118,10 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
inline const Scalar* data() const { return derived().nestedExpression().data(); }
- inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col)
+ inline ScalarWithConstIfNotLvalue& coeffRef(Index rowId, Index colId)
{
EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
- return derived().nestedExpression().const_cast_derived().coeffRef(col, row);
+ return derived().nestedExpression().const_cast_derived().coeffRef(colId, rowId);
}
inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
@@ -129,9 +130,9 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
return derived().nestedExpression().const_cast_derived().coeffRef(index);
}
- inline const Scalar& coeffRef(Index row, Index col) const
+ inline const Scalar& coeffRef(Index rowId, Index colId) const
{
- return derived().nestedExpression().coeffRef(col, row);
+ return derived().nestedExpression().coeffRef(colId, rowId);
}
inline const Scalar& coeffRef(Index index) const
@@ -139,9 +140,9 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
return derived().nestedExpression().coeffRef(index);
}
- inline CoeffReturnType coeff(Index row, Index col) const
+ inline CoeffReturnType coeff(Index rowId, Index colId) const
{
- return derived().nestedExpression().coeff(col, row);
+ return derived().nestedExpression().coeff(colId, rowId);
}
inline CoeffReturnType coeff(Index index) const
@@ -150,15 +151,15 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
}
template<int LoadMode>
- inline const PacketScalar packet(Index row, Index col) const
+ inline const PacketScalar packet(Index rowId, Index colId) const
{
- return derived().nestedExpression().template packet<LoadMode>(col, row);
+ return derived().nestedExpression().template packet<LoadMode>(colId, rowId);
}
template<int LoadMode>
- inline void writePacket(Index row, Index col, const PacketScalar& x)
+ inline void writePacket(Index rowId, Index colId, const PacketScalar& x)
{
- derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(col, row, x);
+ derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(colId, rowId, x);
}
template<int LoadMode>
@@ -206,7 +207,7 @@ DenseBase<Derived>::transpose()
*
* \sa transposeInPlace(), adjoint() */
template<typename Derived>
-inline const typename DenseBase<Derived>::ConstTransposeReturnType
+inline typename DenseBase<Derived>::ConstTransposeReturnType
DenseBase<Derived>::transpose() const
{
return ConstTransposeReturnType(derived());
@@ -252,7 +253,7 @@ struct inplace_transpose_selector;
template<typename MatrixType>
struct inplace_transpose_selector<MatrixType,true> { // square matrix
static void run(MatrixType& m) {
- m.template triangularView<StrictlyUpper>().swap(m.transpose());
+ m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
}
};
@@ -260,7 +261,7 @@ template<typename MatrixType>
struct inplace_transpose_selector<MatrixType,false> { // non square matrix
static void run(MatrixType& m) {
if (m.rows()==m.cols())
- m.template triangularView<StrictlyUpper>().swap(m.transpose());
+ m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
else
m = m.transpose().eval();
}
@@ -278,17 +279,20 @@ struct inplace_transpose_selector<MatrixType,false> { // non square matrix
* m = m.transpose().eval();
* \endcode
* and is faster and also safer because in the latter line of code, forgetting the eval() results
- * in a bug caused by aliasing.
+ * in a bug caused by \ref TopicAliasing "aliasing".
*
* Notice however that this method is only useful if you want to replace a matrix by its own transpose.
* If you just need the transpose of a matrix, use transpose().
*
- * \note if the matrix is not square, then \c *this must be a resizable matrix.
+ * \note if the matrix is not square, then \c *this must be a resizable matrix.
+ * This excludes (non-square) fixed-size matrices, block-expressions and maps.
*
* \sa transpose(), adjoint(), adjointInPlace() */
template<typename Derived>
inline void DenseBase<Derived>::transposeInPlace()
{
+ eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic))
+ && "transposeInPlace() called on a non-square non-resizable matrix");
internal::inplace_transpose_selector<Derived>::run(derived());
}
@@ -312,6 +316,7 @@ inline void DenseBase<Derived>::transposeInPlace()
* If you just need the adjoint of a matrix, use adjoint().
*
* \note if the matrix is not square, then \c *this must be a resizable matrix.
+ * This excludes (non-square) fixed-size matrices, block-expressions and maps.
*
* \sa transpose(), adjoint(), transposeInPlace() */
template<typename Derived>
@@ -353,7 +358,7 @@ struct check_transpose_aliasing_run_time_selector
{
static bool run(const Scalar* dest, const OtherDerived& src)
{
- return (bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src));
+ return (bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src));
}
};
@@ -362,8 +367,8 @@ struct check_transpose_aliasing_run_time_selector<Scalar,DestIsTransposed,CwiseB
{
static bool run(const Scalar* dest, const CwiseBinaryOp<BinOp,DerivedA,DerivedB>& src)
{
- return ((blas_traits<DerivedA>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src.lhs())))
- || ((blas_traits<DerivedB>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src.rhs())));
+ return ((blas_traits<DerivedA>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.lhs())))
+ || ((blas_traits<DerivedB>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.rhs())));
}
};
@@ -385,7 +390,7 @@ struct checkTransposeAliasing_impl
eigen_assert((!check_transpose_aliasing_run_time_selector
<typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived>
::run(extract_data(dst), other))
- && "aliasing detected during tranposition, use transposeInPlace() "
+ && "aliasing detected during transposition, use transposeInPlace() "
"or evaluate the rhs into a temporary using .eval()");
}
diff --git a/extern/Eigen3/Eigen/src/Core/Transpositions.h b/extern/Eigen3/Eigen/src/Core/Transpositions.h
index 2cd268a5fa0..e4ba0756fa6 100644
--- a/extern/Eigen3/Eigen/src/Core/Transpositions.h
+++ b/extern/Eigen3/Eigen/src/Core/Transpositions.h
@@ -99,9 +99,9 @@ class TranspositionsBase
IndicesType& indices() { return derived().indices(); }
/** Resizes to given size. */
- inline void resize(int size)
+ inline void resize(int newSize)
{
- indices().resize(size);
+ indices().resize(newSize);
}
/** Sets \c *this to represents an identity transformation */
@@ -177,7 +177,7 @@ class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTim
/** Generic constructor from expression of the transposition indices. */
template<typename Other>
- explicit inline Transpositions(const MatrixBase<Other>& indices) : m_indices(indices)
+ explicit inline Transpositions(const MatrixBase<Other>& a_indices) : m_indices(a_indices)
{}
/** Copies the \a other transpositions into \c *this */
@@ -234,12 +234,12 @@ class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,Packe
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar Index;
- inline Map(const Index* indices)
- : m_indices(indices)
+ inline Map(const Index* indicesPtr)
+ : m_indices(indicesPtr)
{}
- inline Map(const Index* indices, Index size)
- : m_indices(indices,size)
+ inline Map(const Index* indicesPtr, Index size)
+ : m_indices(indicesPtr,size)
{}
/** Copies the \a other transpositions into \c *this */
@@ -291,8 +291,8 @@ class TranspositionsWrapper
typedef typename Traits::IndicesType IndicesType;
typedef typename IndicesType::Scalar Index;
- inline TranspositionsWrapper(IndicesType& indices)
- : m_indices(indices)
+ inline TranspositionsWrapper(IndicesType& a_indices)
+ : m_indices(a_indices)
{}
/** Copies the \a other transpositions into \c *this */
diff --git a/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h b/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h
index de9540063c2..fba07365f6f 100644
--- a/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h
+++ b/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h
@@ -511,6 +511,7 @@ template<typename Derived1, typename Derived2, bool ClearOpposite>
struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic, ClearOpposite>
{
typedef typename Derived1::Index Index;
+ typedef typename Derived1::Scalar Scalar;
static inline void run(Derived1 &dst, const Derived2 &src)
{
for(Index j = 0; j < dst.cols(); ++j)
@@ -520,7 +521,7 @@ struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic
dst.copyCoeff(i, j, src);
if (ClearOpposite)
for(Index i = maxi; i < dst.rows(); ++i)
- dst.coeffRef(i, j) = 0;
+ dst.coeffRef(i, j) = Scalar(0);
}
}
};
@@ -778,22 +779,23 @@ MatrixBase<Derived>::triangularView() const
* \sa isLowerTriangular()
*/
template<typename Derived>
-bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
+bool MatrixBase<Derived>::isUpperTriangular(const RealScalar& prec) const
{
+ using std::abs;
RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
for(Index j = 0; j < cols(); ++j)
{
Index maxi = (std::min)(j, rows()-1);
for(Index i = 0; i <= maxi; ++i)
{
- RealScalar absValue = internal::abs(coeff(i,j));
+ RealScalar absValue = abs(coeff(i,j));
if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;
}
}
RealScalar threshold = maxAbsOnUpperPart * prec;
for(Index j = 0; j < cols(); ++j)
for(Index i = j+1; i < rows(); ++i)
- if(internal::abs(coeff(i, j)) > threshold) return false;
+ if(abs(coeff(i, j)) > threshold) return false;
return true;
}
@@ -803,13 +805,14 @@ bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
* \sa isUpperTriangular()
*/
template<typename Derived>
-bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
+bool MatrixBase<Derived>::isLowerTriangular(const RealScalar& prec) const
{
+ using std::abs;
RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);
for(Index j = 0; j < cols(); ++j)
for(Index i = j; i < rows(); ++i)
{
- RealScalar absValue = internal::abs(coeff(i,j));
+ RealScalar absValue = abs(coeff(i,j));
if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;
}
RealScalar threshold = maxAbsOnLowerPart * prec;
@@ -817,7 +820,7 @@ bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
{
Index maxi = (std::min)(j, rows()-1);
for(Index i = 0; i < maxi; ++i)
- if(internal::abs(coeff(i, j)) > threshold) return false;
+ if(abs(coeff(i, j)) > threshold) return false;
}
return true;
}
diff --git a/extern/Eigen3/Eigen/src/Core/VectorBlock.h b/extern/Eigen3/Eigen/src/Core/VectorBlock.h
index 6f4effca055..1a7330f3cfc 100644
--- a/extern/Eigen3/Eigen/src/Core/VectorBlock.h
+++ b/extern/Eigen3/Eigen/src/Core/VectorBlock.h
@@ -90,195 +90,6 @@ template<typename VectorType, int Size> class VectorBlock
};
-/** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this.
- *
- * \only_for_vectors
- *
- * \param start the first coefficient in the segment
- * \param size the number of coefficients in the segment
- *
- * Example: \include MatrixBase_segment_int_int.cpp
- * Output: \verbinclude MatrixBase_segment_int_int.out
- *
- * \note Even though the returned expression has dynamic size, in the case
- * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
- * which means that evaluating it does not cause a dynamic memory allocation.
- *
- * \sa class Block, segment(Index)
- */
-template<typename Derived>
-inline typename DenseBase<Derived>::SegmentReturnType
-DenseBase<Derived>::segment(Index start, Index size)
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return SegmentReturnType(derived(), start, size);
-}
-
-/** This is the const version of segment(Index,Index).*/
-template<typename Derived>
-inline typename DenseBase<Derived>::ConstSegmentReturnType
-DenseBase<Derived>::segment(Index start, Index size) const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return ConstSegmentReturnType(derived(), start, size);
-}
-
-/** \returns a dynamic-size expression of the first coefficients of *this.
- *
- * \only_for_vectors
- *
- * \param size the number of coefficients in the block
- *
- * Example: \include MatrixBase_start_int.cpp
- * Output: \verbinclude MatrixBase_start_int.out
- *
- * \note Even though the returned expression has dynamic size, in the case
- * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
- * which means that evaluating it does not cause a dynamic memory allocation.
- *
- * \sa class Block, block(Index,Index)
- */
-template<typename Derived>
-inline typename DenseBase<Derived>::SegmentReturnType
-DenseBase<Derived>::head(Index size)
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return SegmentReturnType(derived(), 0, size);
-}
-
-/** This is the const version of head(Index).*/
-template<typename Derived>
-inline typename DenseBase<Derived>::ConstSegmentReturnType
-DenseBase<Derived>::head(Index size) const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return ConstSegmentReturnType(derived(), 0, size);
-}
-
-/** \returns a dynamic-size expression of the last coefficients of *this.
- *
- * \only_for_vectors
- *
- * \param size the number of coefficients in the block
- *
- * Example: \include MatrixBase_end_int.cpp
- * Output: \verbinclude MatrixBase_end_int.out
- *
- * \note Even though the returned expression has dynamic size, in the case
- * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
- * which means that evaluating it does not cause a dynamic memory allocation.
- *
- * \sa class Block, block(Index,Index)
- */
-template<typename Derived>
-inline typename DenseBase<Derived>::SegmentReturnType
-DenseBase<Derived>::tail(Index size)
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return SegmentReturnType(derived(), this->size() - size, size);
-}
-
-/** This is the const version of tail(Index).*/
-template<typename Derived>
-inline typename DenseBase<Derived>::ConstSegmentReturnType
-DenseBase<Derived>::tail(Index size) const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return ConstSegmentReturnType(derived(), this->size() - size, size);
-}
-
-/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
- *
- * \only_for_vectors
- *
- * The template parameter \a Size is the number of coefficients in the block
- *
- * \param start the index of the first element of the sub-vector
- *
- * Example: \include MatrixBase_template_int_segment.cpp
- * Output: \verbinclude MatrixBase_template_int_segment.out
- *
- * \sa class Block
- */
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::segment(Index start)
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return typename FixedSegmentReturnType<Size>::Type(derived(), start);
-}
-
-/** This is the const version of segment<int>(Index).*/
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::segment(Index start) const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return typename ConstFixedSegmentReturnType<Size>::Type(derived(), start);
-}
-
-/** \returns a fixed-size expression of the first coefficients of *this.
- *
- * \only_for_vectors
- *
- * The template parameter \a Size is the number of coefficients in the block
- *
- * Example: \include MatrixBase_template_int_start.cpp
- * Output: \verbinclude MatrixBase_template_int_start.out
- *
- * \sa class Block
- */
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::head()
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return typename FixedSegmentReturnType<Size>::Type(derived(), 0);
-}
-
-/** This is the const version of head<int>().*/
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::head() const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return typename ConstFixedSegmentReturnType<Size>::Type(derived(), 0);
-}
-
-/** \returns a fixed-size expression of the last coefficients of *this.
- *
- * \only_for_vectors
- *
- * The template parameter \a Size is the number of coefficients in the block
- *
- * Example: \include MatrixBase_template_int_end.cpp
- * Output: \verbinclude MatrixBase_template_int_end.out
- *
- * \sa class Block
- */
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::tail()
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return typename FixedSegmentReturnType<Size>::Type(derived(), size() - Size);
-}
-
-/** This is the const version of tail<int>.*/
-template<typename Derived>
-template<int Size>
-inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
-DenseBase<Derived>::tail() const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return typename ConstFixedSegmentReturnType<Size>::Type(derived(), size() - Size);
-}
-
} // end namespace Eigen
#endif // EIGEN_VECTORBLOCK_H
diff --git a/extern/Eigen3/Eigen/src/Core/VectorwiseOp.h b/extern/Eigen3/Eigen/src/Core/VectorwiseOp.h
index 862c0f33608..d5ab036647e 100644
--- a/extern/Eigen3/Eigen/src/Core/VectorwiseOp.h
+++ b/extern/Eigen3/Eigen/src/Core/VectorwiseOp.h
@@ -50,7 +50,7 @@ struct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime,
Flags0 = (unsigned int)_MatrixTypeNested::Flags & HereditaryBits,
Flags = (Flags0 & ~RowMajorBit) | (RowsAtCompileTime == 1 ? RowMajorBit : 0),
- TraversalSize = Direction==Vertical ? RowsAtCompileTime : ColsAtCompileTime
+ TraversalSize = Direction==Vertical ? MatrixType::RowsAtCompileTime : MatrixType::ColsAtCompileTime
};
#if EIGEN_GNUC_AT_LEAST(3,4)
typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
@@ -58,7 +58,8 @@ struct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
typedef typename MemberOp::template Cost<InputScalar,TraversalSize> CostOpType;
#endif
enum {
- CoeffReadCost = TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value)
+ CoeffReadCost = TraversalSize==Dynamic ? Dynamic
+ : TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value)
};
};
}
@@ -103,8 +104,8 @@ class PartialReduxExpr : internal::no_assignment_operator,
#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \
template <typename ResultType> \
- struct member_##MEMBER { \
- EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER) \
+ struct member_##MEMBER { \
+ EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER) \
typedef ResultType result_type; \
template<typename Scalar, int Size> struct Cost \
{ enum { value = COST }; }; \
@@ -233,6 +234,28 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
Direction==Vertical ? 1 : m_matrix.rows(),
Direction==Horizontal ? 1 : m_matrix.cols());
}
+
+ template<typename OtherDerived> struct OppositeExtendedType {
+ typedef Replicate<OtherDerived,
+ Direction==Horizontal ? 1 : ExpressionType::RowsAtCompileTime,
+ Direction==Vertical ? 1 : ExpressionType::ColsAtCompileTime> Type;
+ };
+
+ /** \internal
+ * Replicates a vector in the opposite direction to match the size of \c *this */
+ template<typename OtherDerived>
+ typename OppositeExtendedType<OtherDerived>::Type
+ extendedToOpposite(const DenseBase<OtherDerived>& other) const
+ {
+ EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Horizontal, OtherDerived::MaxColsAtCompileTime==1),
+ YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED)
+ EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Vertical, OtherDerived::MaxRowsAtCompileTime==1),
+ YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED)
+ return typename OppositeExtendedType<OtherDerived>::Type
+ (other.derived(),
+ Direction==Horizontal ? 1 : m_matrix.rows(),
+ Direction==Vertical ? 1 : m_matrix.cols());
+ }
public:
@@ -255,6 +278,8 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** \returns a row (or column) vector expression of the smallest coefficient
* of each column (or row) of the referenced expression.
+ *
+ * \warning the result is undefined if \c *this contains NaN.
*
* Example: \include PartialRedux_minCoeff.cpp
* Output: \verbinclude PartialRedux_minCoeff.out
@@ -265,6 +290,8 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
/** \returns a row (or column) vector expression of the largest coefficient
* of each column (or row) of the referenced expression.
+ *
+ * \warning the result is undefined if \c *this contains NaN.
*
* Example: \include PartialRedux_maxCoeff.cpp
* Output: \verbinclude PartialRedux_maxCoeff.out
@@ -504,6 +531,23 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
return m_matrix / extendedTo(other.derived());
}
+
+ /** \returns an expression where each column of row of the referenced matrix are normalized.
+ * The referenced matrix is \b not modified.
+ * \sa MatrixBase::normalized(), normalize()
+ */
+ CwiseBinaryOp<internal::scalar_quotient_op<Scalar>,
+ const ExpressionTypeNestedCleaned,
+ const typename OppositeExtendedType<typename ReturnType<internal::member_norm,RealScalar>::Type>::Type>
+ normalized() const { return m_matrix.cwiseQuotient(extendedToOpposite(this->norm())); }
+
+
+ /** Normalize in-place each row or columns of the referenced matrix.
+ * \sa MatrixBase::normalize(), normalized()
+ */
+ void normalize() {
+ m_matrix = this->normalized();
+ }
/////////// Geometry module ///////////
diff --git a/extern/Eigen3/Eigen/src/Core/Visitor.h b/extern/Eigen3/Eigen/src/Core/Visitor.h
index 916bfd096a9..64867b7a2cb 100644
--- a/extern/Eigen3/Eigen/src/Core/Visitor.h
+++ b/extern/Eigen3/Eigen/src/Core/Visitor.h
@@ -164,25 +164,25 @@ struct functor_traits<max_coeff_visitor<Scalar> > {
} // end namespace internal
-/** \returns the minimum of all coefficients of *this
- * and puts in *row and *col its location.
+/** \returns the minimum of all coefficients of *this and puts in *row and *col its location.
+ * \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
*/
template<typename Derived>
template<typename IndexType>
typename internal::traits<Derived>::Scalar
-DenseBase<Derived>::minCoeff(IndexType* row, IndexType* col) const
+DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
{
internal::min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor);
- *row = minVisitor.row;
- if (col) *col = minVisitor.col;
+ *rowId = minVisitor.row;
+ if (colId) *colId = minVisitor.col;
return minVisitor.res;
}
-/** \returns the minimum of all coefficients of *this
- * and puts in *index its location.
+/** \returns the minimum of all coefficients of *this and puts in *index its location.
+ * \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::minCoeff()
*/
@@ -198,25 +198,25 @@ DenseBase<Derived>::minCoeff(IndexType* index) const
return minVisitor.res;
}
-/** \returns the maximum of all coefficients of *this
- * and puts in *row and *col its location.
+/** \returns the maximum of all coefficients of *this and puts in *row and *col its location.
+ * \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
*/
template<typename Derived>
template<typename IndexType>
typename internal::traits<Derived>::Scalar
-DenseBase<Derived>::maxCoeff(IndexType* row, IndexType* col) const
+DenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const
{
internal::max_coeff_visitor<Derived> maxVisitor;
this->visit(maxVisitor);
- *row = maxVisitor.row;
- if (col) *col = maxVisitor.col;
+ *rowPtr = maxVisitor.row;
+ if (colPtr) *colPtr = maxVisitor.col;
return maxVisitor.res;
}
-/** \returns the maximum of all coefficients of *this
- * and puts in *index its location.
+/** \returns the maximum of all coefficients of *this and puts in *index its location.
+ * \warning the result is undefined if \c *this contains NaN.
*
* \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
*/
diff --git a/extern/Eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h b/extern/Eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h
index 75de1931198..e4089962dea 100644
--- a/extern/Eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h
+++ b/extern/Eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h
@@ -173,6 +173,9 @@ template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return psub<Packet4f>(p4f_ZERO, a); }
template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return psub<Packet4i>(p4i_ZERO, a); }
+template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b,p4f_ZERO); }
/* Commented out: it's actually slower than processing it scalar
*
diff --git a/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h b/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h
index 795b4be7303..f183d31de2a 100644
--- a/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h
+++ b/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h
@@ -68,7 +68,6 @@ template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
{
Packet4f v1, v2;
- float32x2_t a_lo, a_hi;
// Get the real values of a | a1_re | a1_re | a2_re | a2_re |
v1 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 0), vdup_lane_f32(vget_high_f32(a.v), 0));
@@ -81,9 +80,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, con
// Conjugate v2
v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR));
// Swap real/imag elements in v2.
- a_lo = vrev64_f32(vget_low_f32(v2));
- a_hi = vrev64_f32(vget_high_f32(v2));
- v2 = vcombine_f32(a_lo, a_hi);
+ v2 = vrev64q_f32(v2);
// Add and return the result
return Packet2cf(vaddq_f32(v1, v2));
}
@@ -241,13 +238,10 @@ template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, con
// TODO optimize it for AltiVec
Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
Packet4f s, rev_s;
- float32x2_t a_lo, a_hi;
// this computes the norm
s = vmulq_f32(b.v, b.v);
- a_lo = vrev64_f32(vget_low_f32(s));
- a_hi = vrev64_f32(vget_high_f32(s));
- rev_s = vcombine_f32(a_lo, a_hi);
+ rev_s = vrev64q_f32(s);
return Packet2cf(pdiv(res.v, vaddq_f32(s,rev_s)));
}
diff --git a/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h b/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
index a20250f7c65..163bac215e6 100644
--- a/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -115,6 +115,9 @@ template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); }
template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); }
+template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); }
@@ -188,15 +191,15 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EI
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
{
float32x2_t lo, hi;
- lo = vdup_n_f32(*from);
- hi = vdup_n_f32(*(from+1));
+ lo = vld1_dup_f32(from);
+ hi = vld1_dup_f32(from+1);
return vcombine_f32(lo, hi);
}
template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
{
int32x2_t lo, hi;
- lo = vdup_n_s32(*from);
- hi = vdup_n_s32(*(from+1));
+ lo = vld1_dup_s32(from);
+ hi = vld1_dup_s32(from+1);
return vcombine_s32(lo, hi);
}
@@ -237,15 +240,12 @@ template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
{
float32x2_t a_lo, a_hi, sum;
- float s[2];
a_lo = vget_low_f32(a);
a_hi = vget_high_f32(a);
sum = vpadd_f32(a_lo, a_hi);
sum = vpadd_f32(sum, sum);
- vst1_f32(s, sum);
-
- return s[0];
+ return vget_lane_f32(sum, 0);
}
template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
@@ -271,15 +271,12 @@ template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
{
int32x2_t a_lo, a_hi, sum;
- int32_t s[2];
a_lo = vget_low_s32(a);
a_hi = vget_high_s32(a);
sum = vpadd_s32(a_lo, a_hi);
sum = vpadd_s32(sum, sum);
- vst1_s32(s, sum);
-
- return s[0];
+ return vget_lane_s32(sum, 0);
}
template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
@@ -307,7 +304,6 @@ template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
{
float32x2_t a_lo, a_hi, prod;
- float s[2];
// Get a_lo = |a1|a2| and a_hi = |a3|a4|
a_lo = vget_low_f32(a);
@@ -316,14 +312,12 @@ template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
prod = vmul_f32(a_lo, a_hi);
// Multiply prod with its swapped value |a2*a4|a1*a3|
prod = vmul_f32(prod, vrev64_f32(prod));
- vst1_f32(s, prod);
- return s[0];
+ return vget_lane_f32(prod, 0);
}
template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
{
int32x2_t a_lo, a_hi, prod;
- int32_t s[2];
// Get a_lo = |a1|a2| and a_hi = |a3|a4|
a_lo = vget_low_s32(a);
@@ -332,65 +326,57 @@ template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
prod = vmul_s32(a_lo, a_hi);
// Multiply prod with its swapped value |a2*a4|a1*a3|
prod = vmul_s32(prod, vrev64_s32(prod));
- vst1_s32(s, prod);
- return s[0];
+ return vget_lane_s32(prod, 0);
}
// min
template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
{
float32x2_t a_lo, a_hi, min;
- float s[2];
a_lo = vget_low_f32(a);
a_hi = vget_high_f32(a);
min = vpmin_f32(a_lo, a_hi);
min = vpmin_f32(min, min);
- vst1_f32(s, min);
- return s[0];
+ return vget_lane_f32(min, 0);
}
+
template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
{
int32x2_t a_lo, a_hi, min;
- int32_t s[2];
a_lo = vget_low_s32(a);
a_hi = vget_high_s32(a);
min = vpmin_s32(a_lo, a_hi);
min = vpmin_s32(min, min);
- vst1_s32(s, min);
-
- return s[0];
+
+ return vget_lane_s32(min, 0);
}
// max
template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
{
float32x2_t a_lo, a_hi, max;
- float s[2];
a_lo = vget_low_f32(a);
a_hi = vget_high_f32(a);
max = vpmax_f32(a_lo, a_hi);
max = vpmax_f32(max, max);
- vst1_f32(s, max);
- return s[0];
+ return vget_lane_f32(max, 0);
}
+
template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
{
int32x2_t a_lo, a_hi, max;
- int32_t s[2];
a_lo = vget_low_s32(a);
a_hi = vget_high_s32(a);
max = vpmax_s32(a_lo, a_hi);
- max = vpmax_s32(max, max);
- vst1_s32(s, max);
- return s[0];
+ return vget_lane_s32(max, 0);
}
// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
diff --git a/extern/Eigen3/Eigen/src/Core/arch/SSE/Complex.h b/extern/Eigen3/Eigen/src/Core/arch/SSE/Complex.h
index 12df987754c..91bba5e38fe 100644
--- a/extern/Eigen3/Eigen/src/Core/arch/SSE/Complex.h
+++ b/extern/Eigen3/Eigen/src/Core/arch/SSE/Complex.h
@@ -81,25 +81,31 @@ template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a,
template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(a.v,b.v)); }
-template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&real_ref(*from))); }
-template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&real_ref(*from))); }
+template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&numext::real_ref(*from))); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&numext::real_ref(*from))); }
template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
{
Packet2cf res;
- #if EIGEN_GNUC_AT_MOST(4,2)
- // workaround annoying "may be used uninitialized in this function" warning with gcc 4.2
+#if EIGEN_GNUC_AT_MOST(4,2)
+ // Workaround annoying "may be used uninitialized in this function" warning with gcc 4.2
res.v = _mm_loadl_pi(_mm_set1_ps(0.0f), reinterpret_cast<const __m64*>(&from));
- #else
+#elif EIGEN_GNUC_AT_LEAST(4,6)
+ // Suppress annoying "may be used uninitialized in this function" warning with gcc >= 4.6
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wuninitialized"
res.v = _mm_loadl_pi(res.v, (const __m64*)&from);
- #endif
+ #pragma GCC diagnostic pop
+#else
+ res.v = _mm_loadl_pi(res.v, (const __m64*)&from);
+#endif
return Packet2cf(_mm_movelh_ps(res.v,res.v));
}
template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
-template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&real_ref(*to), from.v); }
-template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&real_ref(*to), from.v); }
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
diff --git a/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h b/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
index 3f41a4e2600..99cbd0d95be 100644
--- a/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
+++ b/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -31,7 +31,8 @@ Packet4f plog<Packet4f>(const Packet4f& _x)
/* the smallest non denormalized float number */
_EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
-
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000);//-1.f/0.f);
+
/* natural logarithm computed for 4 simultaneous float
return NaN for x <= 0
*/
@@ -51,7 +52,8 @@ Packet4f plog<Packet4f>(const Packet4f& _x)
Packet4i emm0;
- Packet4f invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
+ Packet4f invalid_mask = _mm_cmplt_ps(x, _mm_setzero_ps());
+ Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps());
x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
@@ -96,7 +98,9 @@ Packet4f plog<Packet4f>(const Packet4f& _x)
y2 = pmul(e, p4f_cephes_log_q2);
x = padd(x, y);
x = padd(x, y2);
- return _mm_or_ps(x, invalid_mask); // negative arg will be NAN
+ // negative arg will be NAN, 0 will be -INF
+ return _mm_or_ps(_mm_andnot_ps(iszero_mask, _mm_or_ps(x, invalid_mask)),
+ _mm_and_ps(iszero_mask, p4f_minus_inf));
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
@@ -131,13 +135,16 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
/* express exp(x) as exp(g + n*log(2)) */
fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
- /* how to perform a floorf with SSE: just below */
+#ifdef EIGEN_VECTORIZE_SSE4_1
+ fx = _mm_floor_ps(fx);
+#else
emm0 = _mm_cvttps_epi32(fx);
tmp = _mm_cvtepi32_ps(emm0);
/* if greater, substract 1 */
Packet4f mask = _mm_cmpgt_ps(tmp, fx);
mask = _mm_and_ps(mask, p4f_1);
fx = psub(tmp, mask);
+#endif
tmp = pmul(fx, p4f_cephes_exp_C1);
Packet4f z = pmul(fx, p4f_cephes_exp_C2);
@@ -161,6 +168,79 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
emm0 = _mm_slli_epi32(emm0, 23);
return pmul(y, _mm_castsi128_ps(emm0));
}
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet2d pexp<Packet2d>(const Packet2d& _x)
+{
+ Packet2d x = _x;
+
+ _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
+ _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
+ _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
+
+ _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437);
+ _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);
+
+ _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
+
+ _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
+ _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
+ _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
+
+ _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
+ _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
+ _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
+ _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
+
+ _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
+ _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
+ static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
+
+ Packet2d tmp = _mm_setzero_pd(), fx;
+ Packet4i emm0;
+
+ // clamp x
+ x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
+ /* express exp(x) as exp(g + n*log(2)) */
+ fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);
+
+#ifdef EIGEN_VECTORIZE_SSE4_1
+ fx = _mm_floor_pd(fx);
+#else
+ emm0 = _mm_cvttpd_epi32(fx);
+ tmp = _mm_cvtepi32_pd(emm0);
+ /* if greater, substract 1 */
+ Packet2d mask = _mm_cmpgt_pd(tmp, fx);
+ mask = _mm_and_pd(mask, p2d_1);
+ fx = psub(tmp, mask);
+#endif
+
+ tmp = pmul(fx, p2d_cephes_exp_C1);
+ Packet2d z = pmul(fx, p2d_cephes_exp_C2);
+ x = psub(x, tmp);
+ x = psub(x, z);
+
+ Packet2d x2 = pmul(x,x);
+
+ Packet2d px = p2d_cephes_exp_p0;
+ px = pmadd(px, x2, p2d_cephes_exp_p1);
+ px = pmadd(px, x2, p2d_cephes_exp_p2);
+ px = pmul (px, x);
+
+ Packet2d qx = p2d_cephes_exp_q0;
+ qx = pmadd(qx, x2, p2d_cephes_exp_q1);
+ qx = pmadd(qx, x2, p2d_cephes_exp_q2);
+ qx = pmadd(qx, x2, p2d_cephes_exp_q3);
+
+ x = pdiv(px,psub(qx,px));
+ x = pmadd(p2d_2,x,p2d_1);
+
+ // build 2^n
+ emm0 = _mm_cvttpd_epi32(fx);
+ emm0 = _mm_add_epi32(emm0, p4i_1023_0);
+ emm0 = _mm_slli_epi32(emm0, 20);
+ emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
+ return pmul(x, _mm_castsi128_pd(emm0));
+}
/* evaluation of 4 sines at onces, using SSE2 intrinsics.
@@ -362,21 +442,32 @@ Packet4f pcos<Packet4f>(const Packet4f& _x)
return _mm_xor_ps(y, sign_bit);
}
+#if EIGEN_FAST_MATH
+
// This is based on Quake3's fast inverse square root.
// For detail see here: http://www.beyond3d.com/content/articles/8/
+// It lacks 1 (or 2 bits in some rare cases) of precision, and does not handle negative, +inf, or denormalized numbers correctly.
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet4f psqrt<Packet4f>(const Packet4f& _x)
{
Packet4f half = pmul(_x, pset1<Packet4f>(.5f));
/* select only the inverse sqrt of non-zero inputs */
- Packet4f non_zero_mask = _mm_cmpgt_ps(_x, pset1<Packet4f>(std::numeric_limits<float>::epsilon()));
+ Packet4f non_zero_mask = _mm_cmpge_ps(_x, pset1<Packet4f>((std::numeric_limits<float>::min)()));
Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x));
x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x))));
return pmul(_x,x);
}
+#else
+
+template<> EIGEN_STRONG_INLINE Packet4f psqrt<Packet4f>(const Packet4f& x) { return _mm_sqrt_ps(x); }
+
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet2d psqrt<Packet2d>(const Packet2d& x) { return _mm_sqrt_pd(x); }
+
} // end namespace internal
} // end namespace Eigen
diff --git a/extern/Eigen3/Eigen/src/Core/arch/SSE/PacketMath.h b/extern/Eigen3/Eigen/src/Core/arch/SSE/PacketMath.h
index 10d9182190f..fc8ae50fed7 100644
--- a/extern/Eigen3/Eigen/src/Core/arch/SSE/PacketMath.h
+++ b/extern/Eigen3/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -48,6 +48,9 @@ template<> struct is_arithmetic<__m128d> { enum { value = true }; };
#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
const Packet4f p4f_##NAME = pset1<Packet4f>(X)
+#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
+ const Packet2d p2d_##NAME = pset1<Packet2d>(X)
+
#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
@@ -63,7 +66,7 @@ template<> struct packet_traits<float> : default_packet_traits
AlignedOnScalar = 1,
size=4,
- HasDiv = 1,
+ HasDiv = 1,
HasSin = EIGEN_FAST_MATH,
HasCos = EIGEN_FAST_MATH,
HasLog = 1,
@@ -79,7 +82,9 @@ template<> struct packet_traits<double> : default_packet_traits
AlignedOnScalar = 1,
size=2,
- HasDiv = 1
+ HasDiv = 1,
+ HasExp = 1,
+ HasSqrt = 1
};
};
template<> struct packet_traits<int> : default_packet_traits
@@ -137,6 +142,10 @@ template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
return psub(_mm_setr_epi32(0,0,0,0), a);
}
+template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+
template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
@@ -169,18 +178,26 @@ template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const
template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
{
+#ifdef EIGEN_VECTORIZE_SSE4_1
+ return _mm_min_epi32(a,b);
+#else
// after some bench, this version *is* faster than a scalar implementation
Packet4i mask = _mm_cmplt_epi32(a,b);
return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
+#endif
}
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
{
+#ifdef EIGEN_VECTORIZE_SSE4_1
+ return _mm_max_epi32(a,b);
+#else
// after some bench, this version *is* faster than a scalar implementation
Packet4i mask = _mm_cmpgt_epi32(a,b);
return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
+#endif
}
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
@@ -491,8 +508,8 @@ template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
// for GCC (eg., it does not like using std::min after the pstore !!)
EIGEN_ALIGN16 int aux[4];
pstore(aux, a);
- register int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
- register int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
+ int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
+ int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
return aux0<aux2 ? aux0 : aux2;
}
@@ -512,8 +529,8 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
// for GCC (eg., it does not like using std::min after the pstore !!)
EIGEN_ALIGN16 int aux[4];
pstore(aux, a);
- register int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
- register int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
+ int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
+ int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
return aux0>aux2 ? aux0 : aux2;
}
diff --git a/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h b/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h
index 403d25fa9eb..c06a0df1c21 100644
--- a/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h
+++ b/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h
@@ -150,7 +150,7 @@ class CoeffBasedProduct
{
// we don't allow taking products of matrices of different real types, as that wouldn't be vectorizable.
// We still allow to mix T and complex<T>.
- EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
+ EIGEN_STATIC_ASSERT((internal::scalar_product_traits<typename Lhs::RealScalar, typename Rhs::RealScalar>::Defined),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
eigen_assert(lhs.cols() == rhs.rows()
&& "invalid matrix product"
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index 5eb03c98ccf..bcdca5b0d23 100644
--- a/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -69,8 +69,8 @@ inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdi
* - the number of scalars that fit into a packet (when vectorization is enabled).
*
* \sa setCpuCacheSizes */
-template<typename LhsScalar, typename RhsScalar, int KcFactor>
-void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n)
+template<typename LhsScalar, typename RhsScalar, int KcFactor, typename SizeType>
+void computeProductBlockingSizes(SizeType& k, SizeType& m, SizeType& n)
{
EIGEN_UNUSED_VARIABLE(n);
// Explanations:
@@ -91,13 +91,13 @@ void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrd
};
manage_caching_sizes(GetAction, &l1, &l2);
- k = std::min<std::ptrdiff_t>(k, l1/kdiv);
- std::ptrdiff_t _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0;
+ k = std::min<SizeType>(k, l1/kdiv);
+ SizeType _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0;
if(_m<m) m = _m & mr_mask;
}
-template<typename LhsScalar, typename RhsScalar>
-inline void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n)
+template<typename LhsScalar, typename RhsScalar, typename SizeType>
+inline void computeProductBlockingSizes(SizeType& k, SizeType& m, SizeType& n)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n);
}
@@ -527,9 +527,16 @@ struct gebp_kernel
ResPacketSize = Traits::ResPacketSize
};
- EIGEN_DONT_INLINE EIGEN_FLATTEN_ATTRIB
+ EIGEN_DONT_INLINE
void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
- Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB = 0)
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB=0);
+};
+
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+EIGEN_DONT_INLINE
+void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
+ ::operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
+ Index strideA, Index strideB, Index offsetA, Index offsetB, RhsScalar* unpackedB)
{
Traits traits;
@@ -1089,7 +1096,7 @@ EIGEN_ASM_COMMENT("mybegin4");
}
}
}
-};
+
#undef CJMADD
@@ -1110,80 +1117,85 @@ EIGEN_ASM_COMMENT("mybegin4");
template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
struct gemm_pack_lhs
{
- EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows,
- Index stride=0, Index offset=0)
+ EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder, Conjugate, PanelMode>
+ ::operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride, Index offset)
+{
+ typedef typename packet_traits<Scalar>::type Packet;
+ enum { PacketSize = packet_traits<Scalar>::size };
+
+ EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
+ EIGEN_UNUSED_VARIABLE(stride)
+ EIGEN_UNUSED_VARIABLE(offset)
+ eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+ eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
+ conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+ const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
+ Index count = 0;
+ Index peeled_mc = (rows/Pack1)*Pack1;
+ for(Index i=0; i<peeled_mc; i+=Pack1)
{
- typedef typename packet_traits<Scalar>::type Packet;
- enum { PacketSize = packet_traits<Scalar>::size };
-
- EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
- eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
- eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
- conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
- const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
- Index count = 0;
- Index peeled_mc = (rows/Pack1)*Pack1;
- for(Index i=0; i<peeled_mc; i+=Pack1)
- {
- if(PanelMode) count += Pack1 * offset;
+ if(PanelMode) count += Pack1 * offset;
- if(StorageOrder==ColMajor)
+ if(StorageOrder==ColMajor)
+ {
+ for(Index k=0; k<depth; k++)
{
- for(Index k=0; k<depth; k++)
- {
- Packet A, B, C, D;
- if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
- if(Pack1>=2*PacketSize) B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
- if(Pack1>=3*PacketSize) C = ploadu<Packet>(&lhs(i+2*PacketSize, k));
- if(Pack1>=4*PacketSize) D = ploadu<Packet>(&lhs(i+3*PacketSize, k));
- if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; }
- if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; }
- if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
- if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
- }
+ Packet A, B, C, D;
+ if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
+ if(Pack1>=2*PacketSize) B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
+ if(Pack1>=3*PacketSize) C = ploadu<Packet>(&lhs(i+2*PacketSize, k));
+ if(Pack1>=4*PacketSize) D = ploadu<Packet>(&lhs(i+3*PacketSize, k));
+ if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; }
+ if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; }
+ if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
+ if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
}
- else
+ }
+ else
+ {
+ for(Index k=0; k<depth; k++)
{
- for(Index k=0; k<depth; k++)
+ // TODO add a vectorized transpose here
+ Index w=0;
+ for(; w<Pack1-3; w+=4)
{
- // TODO add a vectorized transpose here
- Index w=0;
- for(; w<Pack1-3; w+=4)
- {
- Scalar a(cj(lhs(i+w+0, k))),
- b(cj(lhs(i+w+1, k))),
- c(cj(lhs(i+w+2, k))),
- d(cj(lhs(i+w+3, k)));
- blockA[count++] = a;
- blockA[count++] = b;
- blockA[count++] = c;
- blockA[count++] = d;
- }
- if(Pack1%4)
- for(;w<Pack1;++w)
- blockA[count++] = cj(lhs(i+w, k));
+ Scalar a(cj(lhs(i+w+0, k))),
+ b(cj(lhs(i+w+1, k))),
+ c(cj(lhs(i+w+2, k))),
+ d(cj(lhs(i+w+3, k)));
+ blockA[count++] = a;
+ blockA[count++] = b;
+ blockA[count++] = c;
+ blockA[count++] = d;
}
+ if(Pack1%4)
+ for(;w<Pack1;++w)
+ blockA[count++] = cj(lhs(i+w, k));
}
- if(PanelMode) count += Pack1 * (stride-offset-depth);
- }
- if(rows-peeled_mc>=Pack2)
- {
- if(PanelMode) count += Pack2*offset;
- for(Index k=0; k<depth; k++)
- for(Index w=0; w<Pack2; w++)
- blockA[count++] = cj(lhs(peeled_mc+w, k));
- if(PanelMode) count += Pack2 * (stride-offset-depth);
- peeled_mc += Pack2;
- }
- for(Index i=peeled_mc; i<rows; i++)
- {
- if(PanelMode) count += offset;
- for(Index k=0; k<depth; k++)
- blockA[count++] = cj(lhs(i, k));
- if(PanelMode) count += (stride-offset-depth);
}
+ if(PanelMode) count += Pack1 * (stride-offset-depth);
}
-};
+ if(rows-peeled_mc>=Pack2)
+ {
+ if(PanelMode) count += Pack2*offset;
+ for(Index k=0; k<depth; k++)
+ for(Index w=0; w<Pack2; w++)
+ blockA[count++] = cj(lhs(peeled_mc+w, k));
+ if(PanelMode) count += Pack2 * (stride-offset-depth);
+ peeled_mc += Pack2;
+ }
+ for(Index i=peeled_mc; i<rows; i++)
+ {
+ if(PanelMode) count += offset;
+ for(Index k=0; k<depth; k++)
+ blockA[count++] = cj(lhs(i, k));
+ if(PanelMode) count += (stride-offset-depth);
+ }
+}
// copy a complete panel of the rhs
// this version is optimized for column major matrices
@@ -1197,92 +1209,102 @@ struct gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode>
{
typedef typename packet_traits<Scalar>::type Packet;
enum { PacketSize = packet_traits<Scalar>::size };
- EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols,
- Index stride=0, Index offset=0)
+ EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode>
+ ::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset)
+{
+ EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
+ EIGEN_UNUSED_VARIABLE(stride)
+ EIGEN_UNUSED_VARIABLE(offset)
+ eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+ conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+ Index packet_cols = (cols/nr) * nr;
+ Index count = 0;
+ for(Index j2=0; j2<packet_cols; j2+=nr)
{
- EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
- eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
- conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
- Index packet_cols = (cols/nr) * nr;
- Index count = 0;
- for(Index j2=0; j2<packet_cols; j2+=nr)
+ // skip what we have before
+ if(PanelMode) count += nr * offset;
+ const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+ const Scalar* b1 = &rhs[(j2+1)*rhsStride];
+ const Scalar* b2 = &rhs[(j2+2)*rhsStride];
+ const Scalar* b3 = &rhs[(j2+3)*rhsStride];
+ for(Index k=0; k<depth; k++)
{
- // skip what we have before
- if(PanelMode) count += nr * offset;
- const Scalar* b0 = &rhs[(j2+0)*rhsStride];
- const Scalar* b1 = &rhs[(j2+1)*rhsStride];
- const Scalar* b2 = &rhs[(j2+2)*rhsStride];
- const Scalar* b3 = &rhs[(j2+3)*rhsStride];
- for(Index k=0; k<depth; k++)
- {
- blockB[count+0] = cj(b0[k]);
- blockB[count+1] = cj(b1[k]);
- if(nr==4) blockB[count+2] = cj(b2[k]);
- if(nr==4) blockB[count+3] = cj(b3[k]);
- count += nr;
- }
- // skip what we have after
- if(PanelMode) count += nr * (stride-offset-depth);
+ blockB[count+0] = cj(b0[k]);
+ blockB[count+1] = cj(b1[k]);
+ if(nr==4) blockB[count+2] = cj(b2[k]);
+ if(nr==4) blockB[count+3] = cj(b3[k]);
+ count += nr;
}
+ // skip what we have after
+ if(PanelMode) count += nr * (stride-offset-depth);
+ }
- // copy the remaining columns one at a time (nr==1)
- for(Index j2=packet_cols; j2<cols; ++j2)
+ // copy the remaining columns one at a time (nr==1)
+ for(Index j2=packet_cols; j2<cols; ++j2)
+ {
+ if(PanelMode) count += offset;
+ const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+ for(Index k=0; k<depth; k++)
{
- if(PanelMode) count += offset;
- const Scalar* b0 = &rhs[(j2+0)*rhsStride];
- for(Index k=0; k<depth; k++)
- {
- blockB[count] = cj(b0[k]);
- count += 1;
- }
- if(PanelMode) count += (stride-offset-depth);
+ blockB[count] = cj(b0[k]);
+ count += 1;
}
+ if(PanelMode) count += (stride-offset-depth);
}
-};
+}
// this version is optimized for row major matrices
template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
struct gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode>
{
enum { PacketSize = packet_traits<Scalar>::size };
- EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols,
- Index stride=0, Index offset=0)
+ EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode>
+ ::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset)
+{
+ EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
+ EIGEN_UNUSED_VARIABLE(stride)
+ EIGEN_UNUSED_VARIABLE(offset)
+ eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+ conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+ Index packet_cols = (cols/nr) * nr;
+ Index count = 0;
+ for(Index j2=0; j2<packet_cols; j2+=nr)
{
- EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
- eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
- conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
- Index packet_cols = (cols/nr) * nr;
- Index count = 0;
- for(Index j2=0; j2<packet_cols; j2+=nr)
+ // skip what we have before
+ if(PanelMode) count += nr * offset;
+ for(Index k=0; k<depth; k++)
{
- // skip what we have before
- if(PanelMode) count += nr * offset;
- for(Index k=0; k<depth; k++)
- {
- const Scalar* b0 = &rhs[k*rhsStride + j2];
- blockB[count+0] = cj(b0[0]);
- blockB[count+1] = cj(b0[1]);
- if(nr==4) blockB[count+2] = cj(b0[2]);
- if(nr==4) blockB[count+3] = cj(b0[3]);
- count += nr;
- }
- // skip what we have after
- if(PanelMode) count += nr * (stride-offset-depth);
+ const Scalar* b0 = &rhs[k*rhsStride + j2];
+ blockB[count+0] = cj(b0[0]);
+ blockB[count+1] = cj(b0[1]);
+ if(nr==4) blockB[count+2] = cj(b0[2]);
+ if(nr==4) blockB[count+3] = cj(b0[3]);
+ count += nr;
}
- // copy the remaining columns one at a time (nr==1)
- for(Index j2=packet_cols; j2<cols; ++j2)
+ // skip what we have after
+ if(PanelMode) count += nr * (stride-offset-depth);
+ }
+ // copy the remaining columns one at a time (nr==1)
+ for(Index j2=packet_cols; j2<cols; ++j2)
+ {
+ if(PanelMode) count += offset;
+ const Scalar* b0 = &rhs[j2];
+ for(Index k=0; k<depth; k++)
{
- if(PanelMode) count += offset;
- const Scalar* b0 = &rhs[j2];
- for(Index k=0; k<depth; k++)
- {
- blockB[count] = cj(b0[k*rhsStride]);
- count += 1;
- }
- if(PanelMode) count += stride-offset-depth;
+ blockB[count] = cj(b0[k*rhsStride]);
+ count += 1;
}
+ if(PanelMode) count += stride-offset-depth;
}
-};
+}
} // end namespace internal
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h
index 73a465ec5ee..3f5ffcf51c7 100644
--- a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h
@@ -50,6 +50,7 @@ template<
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
+
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
@@ -169,7 +170,6 @@ static void run(Index rows, Index cols, Index depth,
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
-
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for(Index i2=0; i2<rows; i2+=mc)
@@ -183,7 +183,6 @@ static void run(Index rows, Index cols, Index depth,
// Everything is packed, we can now call the block * panel kernel:
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
-
}
}
}
@@ -204,7 +203,7 @@ struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
- gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
+ gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha,
BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
@@ -395,7 +394,7 @@ class GeneralProduct<Lhs, Rhs, GemmProduct>
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
}
- template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
index 432d3a9dc84..5c37639091c 100644
--- a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
@@ -12,6 +12,9 @@
namespace Eigen {
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjLhs, bool ConjRhs>
+struct selfadjoint_rank1_update;
+
namespace internal {
/**********************************************************************
@@ -39,7 +42,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride,
- const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha)
+ const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, const ResScalar& alpha)
{
general_matrix_matrix_triangular_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
@@ -55,7 +58,7 @@ struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride,
- const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha)
+ const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, const ResScalar& alpha)
{
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
@@ -133,7 +136,7 @@ struct tribb_kernel
enum {
BlockSize = EIGEN_PLAIN_ENUM_MAX(mr,nr)
};
- void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, ResScalar alpha, RhsScalar* workspace)
+ void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha, RhsScalar* workspace)
{
gebp_kernel<LhsScalar, RhsScalar, Index, mr, nr, ConjLhs, ConjRhs> gebp_kernel;
Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer;
@@ -180,31 +183,92 @@ struct tribb_kernel
// high level API
+template<typename MatrixType, typename ProductType, int UpLo, bool IsOuterProduct>
+struct general_product_to_triangular_selector;
+
+
+template<typename MatrixType, typename ProductType, int UpLo>
+struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,true>
+{
+ static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha)
+ {
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
+ typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;
+ typedef internal::blas_traits<Lhs> LhsBlasTraits;
+ typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
+ typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
+ typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+
+ typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;
+ typedef internal::blas_traits<Rhs> RhsBlasTraits;
+ typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
+ typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;
+ typename internal::add_const_on_value_type<ActualRhs>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+ Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());
+
+ enum {
+ StorageOrder = (internal::traits<MatrixType>::Flags&RowMajorBit) ? RowMajor : ColMajor,
+ UseLhsDirectly = _ActualLhs::InnerStrideAtCompileTime==1,
+ UseRhsDirectly = _ActualRhs::InnerStrideAtCompileTime==1
+ };
+
+ internal::gemv_static_vector_if<Scalar,Lhs::SizeAtCompileTime,Lhs::MaxSizeAtCompileTime,!UseLhsDirectly> static_lhs;
+ ei_declare_aligned_stack_constructed_variable(Scalar, actualLhsPtr, actualLhs.size(),
+ (UseLhsDirectly ? const_cast<Scalar*>(actualLhs.data()) : static_lhs.data()));
+ if(!UseLhsDirectly) Map<typename _ActualLhs::PlainObject>(actualLhsPtr, actualLhs.size()) = actualLhs;
+
+ internal::gemv_static_vector_if<Scalar,Rhs::SizeAtCompileTime,Rhs::MaxSizeAtCompileTime,!UseRhsDirectly> static_rhs;
+ ei_declare_aligned_stack_constructed_variable(Scalar, actualRhsPtr, actualRhs.size(),
+ (UseRhsDirectly ? const_cast<Scalar*>(actualRhs.data()) : static_rhs.data()));
+ if(!UseRhsDirectly) Map<typename _ActualRhs::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
+
+
+ selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
+ LhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
+ RhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex>
+ ::run(actualLhs.size(), mat.data(), mat.outerStride(), actualLhsPtr, actualRhsPtr, actualAlpha);
+ }
+};
+
+template<typename MatrixType, typename ProductType, int UpLo>
+struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,false>
+{
+ static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha)
+ {
+ typedef typename MatrixType::Index Index;
+
+ typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;
+ typedef internal::blas_traits<Lhs> LhsBlasTraits;
+ typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
+ typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
+ typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+
+ typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;
+ typedef internal::blas_traits<Rhs> RhsBlasTraits;
+ typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
+ typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;
+ typename internal::add_const_on_value_type<ActualRhs>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+ typename ProductType::Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());
+
+ internal::general_matrix_matrix_triangular_product<Index,
+ typename Lhs::Scalar, _ActualLhs::Flags&RowMajorBit ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
+ typename Rhs::Scalar, _ActualRhs::Flags&RowMajorBit ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
+ MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor, UpLo>
+ ::run(mat.cols(), actualLhs.cols(),
+ &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &actualRhs.coeffRef(0,0), actualRhs.outerStride(),
+ mat.data(), mat.outerStride(), actualAlpha);
+ }
+};
+
template<typename MatrixType, unsigned int UpLo>
template<typename ProductDerived, typename _Lhs, typename _Rhs>
TriangularView<MatrixType,UpLo>& TriangularView<MatrixType,UpLo>::assignProduct(const ProductBase<ProductDerived, _Lhs,_Rhs>& prod, const Scalar& alpha)
{
- typedef typename internal::remove_all<typename ProductDerived::LhsNested>::type Lhs;
- typedef internal::blas_traits<Lhs> LhsBlasTraits;
- typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
- typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
- typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
-
- typedef typename internal::remove_all<typename ProductDerived::RhsNested>::type Rhs;
- typedef internal::blas_traits<Rhs> RhsBlasTraits;
- typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
- typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;
- typename internal::add_const_on_value_type<ActualRhs>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
-
- typename ProductDerived::Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());
-
- internal::general_matrix_matrix_triangular_product<Index,
- typename Lhs::Scalar, _ActualLhs::Flags&RowMajorBit ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
- typename Rhs::Scalar, _ActualRhs::Flags&RowMajorBit ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
- MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor, UpLo>
- ::run(m_matrix.cols(), actualLhs.cols(),
- &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &actualRhs.coeffRef(0,0), actualRhs.outerStride(),
- const_cast<Scalar*>(m_matrix.data()), m_matrix.outerStride(), actualAlpha);
+ general_product_to_triangular_selector<MatrixType, ProductDerived, UpLo, (_Lhs::ColsAtCompileTime==1) || (_Rhs::RowsAtCompileTime==1)>::run(m_matrix.const_cast_derived(), prod.derived(), alpha);
return *this;
}
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector.h b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector.h
index ba1f73957db..09387703efa 100644
--- a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector.h
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector.h
@@ -52,12 +52,17 @@ EIGEN_DONT_INLINE static void run(
Index rows, Index cols,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsIncr,
- ResScalar* res, Index
- #ifdef EIGEN_INTERNAL_DEBUGGING
- resIncr
- #endif
- , RhsScalar alpha)
+ ResScalar* res, Index resIncr, RhsScalar alpha);
+};
+
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>::run(
+ Index rows, Index cols,
+ const LhsScalar* lhs, Index lhsStride,
+ const RhsScalar* rhs, Index rhsIncr,
+ ResScalar* res, Index resIncr, RhsScalar alpha)
{
+ EIGEN_UNUSED_VARIABLE(resIncr)
eigen_internal_assert(resIncr==1);
#ifdef _EIGEN_ACCUMULATE_PACKETS
#error _EIGEN_ACCUMULATE_PACKETS has already been defined
@@ -74,21 +79,21 @@ EIGEN_DONT_INLINE static void run(
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
if(ConjugateRhs)
- alpha = conj(alpha);
+ alpha = numext::conj(alpha);
enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
const Index columnsAtOnce = 4;
const Index peels = 2;
const Index LhsPacketAlignedMask = LhsPacketSize-1;
const Index ResPacketAlignedMask = ResPacketSize-1;
- const Index PeelAlignedMask = ResPacketSize*peels-1;
+// const Index PeelAlignedMask = ResPacketSize*peels-1;
const Index size = rows;
// How many coeffs of the result do we have to skip to be aligned.
// Here we assume data are at least aligned on the base scalar type.
Index alignedStart = internal::first_aligned(res,size);
Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0;
- const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
+ const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
Index alignmentPattern = alignmentStep==0 ? AllAligned
@@ -177,6 +182,8 @@ EIGEN_DONT_INLINE static void run(
_EIGEN_ACCUMULATE_PACKETS(d,du,d);
break;
case FirstAligned:
+ {
+ Index j = alignedStart;
if(peels>1)
{
LhsPacket A00, A01, A02, A03, A10, A11, A12, A13;
@@ -186,7 +193,7 @@ EIGEN_DONT_INLINE static void run(
A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
- for (Index j = alignedStart; j<peeledSize; j+=peels*ResPacketSize)
+ for (; j<peeledSize; j+=peels*ResPacketSize)
{
A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11);
A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]); palign<2>(A02,A12);
@@ -210,9 +217,10 @@ EIGEN_DONT_INLINE static void run(
pstore(&res[j+ResPacketSize],T1);
}
}
- for (Index j = peeledSize; j<alignedSize; j+=ResPacketSize)
+ for (; j<alignedSize; j+=ResPacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,du);
break;
+ }
default:
for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
_EIGEN_ACCUMULATE_PACKETS(du,du,du);
@@ -250,7 +258,7 @@ EIGEN_DONT_INLINE static void run(
// process aligned result's coeffs
if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
- pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
+ pstore(&res[i], pcj.pmadd(pload<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
else
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
@@ -271,7 +279,6 @@ EIGEN_DONT_INLINE static void run(
} while(Vectorizable);
#undef _EIGEN_ACCUMULATE_PACKETS
}
-};
/* Optimized row-major matrix * vector product:
* This algorithm processes 4 rows at onces that allows to both reduce
@@ -309,6 +316,15 @@ EIGEN_DONT_INLINE static void run(
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsIncr,
ResScalar* res, Index resIncr,
+ ResScalar alpha);
+};
+
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>::run(
+ Index rows, Index cols,
+ const LhsScalar* lhs, Index lhsStride,
+ const RhsScalar* rhs, Index rhsIncr,
+ ResScalar* res, Index resIncr,
ResScalar alpha)
{
EIGEN_UNUSED_VARIABLE(rhsIncr);
@@ -332,7 +348,7 @@ EIGEN_DONT_INLINE static void run(
const Index peels = 2;
const Index RhsPacketAlignedMask = RhsPacketSize-1;
const Index LhsPacketAlignedMask = LhsPacketSize-1;
- const Index PeelAlignedMask = RhsPacketSize*peels-1;
+// const Index PeelAlignedMask = RhsPacketSize*peels-1;
const Index depth = cols;
// How many coeffs of the result do we have to skip to be aligned.
@@ -340,7 +356,7 @@ EIGEN_DONT_INLINE static void run(
// if that's not the case then vectorization is discarded, see below.
Index alignedStart = internal::first_aligned(rhs, depth);
Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0;
- const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
+ const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
Index alignmentPattern = alignmentStep==0 ? AllAligned
@@ -430,10 +446,12 @@ EIGEN_DONT_INLINE static void run(
_EIGEN_ACCUMULATE_PACKETS(d,du,d);
break;
case FirstAligned:
+ {
+ Index j = alignedStart;
if (peels>1)
{
/* Here we proccess 4 rows with with two peeled iterations to hide
- * tghe overhead of unaligned loads. Moreover unaligned loads are handled
+ * the overhead of unaligned loads. Moreover unaligned loads are handled
* using special shift/move operations between the two aligned packets
* overlaping the desired unaligned packet. This is *much* more efficient
* than basic unaligned loads.
@@ -443,7 +461,7 @@ EIGEN_DONT_INLINE static void run(
A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
- for (Index j = alignedStart; j<peeledSize; j+=peels*RhsPacketSize)
+ for (; j<peeledSize; j+=peels*RhsPacketSize)
{
RhsPacket b = pload<RhsPacket>(&rhs[j]);
A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11);
@@ -465,9 +483,10 @@ EIGEN_DONT_INLINE static void run(
ptmp3 = pcj.pmadd(A13, b, ptmp3);
}
}
- for (Index j = peeledSize; j<alignedSize; j+=RhsPacketSize)
+ for (; j<alignedSize; j+=RhsPacketSize)
_EIGEN_ACCUMULATE_PACKETS(d,du,du);
break;
+ }
default:
for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
_EIGEN_ACCUMULATE_PACKETS(du,du,du);
@@ -539,7 +558,6 @@ EIGEN_DONT_INLINE static void run(
#undef _EIGEN_ACCUMULATE_PACKETS
}
-};
} // end namespace internal
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
index e9de6af3ed1..1cb9fe6b5a7 100644
--- a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
@@ -53,7 +53,7 @@ struct general_matrix_vector_product_gemv :
#define EIGEN_MKL_GEMV_SPECIALIZE(Scalar) \
template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
struct general_matrix_vector_product<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs,Specialized> { \
-static EIGEN_DONT_INLINE void run( \
+static void run( \
Index rows, Index cols, \
const Scalar* lhs, Index lhsStride, \
const Scalar* rhs, Index rhsIncr, \
@@ -70,7 +70,7 @@ static EIGEN_DONT_INLINE void run( \
}; \
template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
struct general_matrix_vector_product<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs,Specialized> { \
-static EIGEN_DONT_INLINE void run( \
+static void run( \
Index rows, Index cols, \
const Scalar* lhs, Index lhsStride, \
const Scalar* rhs, Index rhsIncr, \
@@ -92,7 +92,7 @@ struct general_matrix_vector_product_gemv<Index,EIGTYPE,LhsStorageOrder,Conjugat
{ \
typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> GEMVVector;\
\
-static EIGEN_DONT_INLINE void run( \
+static void run( \
Index rows, Index cols, \
const EIGTYPE* lhs, Index lhsStride, \
const EIGTYPE* rhs, Index rhsIncr, \
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
index 48209636eed..99cf9e0ae27 100644
--- a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -30,9 +30,9 @@ struct symm_pack_lhs
for(Index k=i; k<i+BlockRows; k++)
{
for(Index w=0; w<h; w++)
- blockA[count++] = conj(lhs(k, i+w)); // transposed
+ blockA[count++] = numext::conj(lhs(k, i+w)); // transposed
- blockA[count++] = real(lhs(k,k)); // real (diagonal)
+ blockA[count++] = numext::real(lhs(k,k)); // real (diagonal)
for(Index w=h+1; w<BlockRows; w++)
blockA[count++] = lhs(i+w, k); // normal
@@ -41,7 +41,7 @@ struct symm_pack_lhs
// transposed copy
for(Index k=i+BlockRows; k<cols; k++)
for(Index w=0; w<BlockRows; w++)
- blockA[count++] = conj(lhs(k, i+w)); // transposed
+ blockA[count++] = numext::conj(lhs(k, i+w)); // transposed
}
void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
{
@@ -65,10 +65,10 @@ struct symm_pack_lhs
for(Index k=0; k<i; k++)
blockA[count++] = lhs(i, k); // normal
- blockA[count++] = real(lhs(i, i)); // real (diagonal)
+ blockA[count++] = numext::real(lhs(i, i)); // real (diagonal)
for(Index k=i+1; k<cols; k++)
- blockA[count++] = conj(lhs(k, i)); // transposed
+ blockA[count++] = numext::conj(lhs(k, i)); // transposed
}
}
};
@@ -107,12 +107,12 @@ struct symm_pack_rhs
// transpose
for(Index k=k2; k<j2; k++)
{
- blockB[count+0] = conj(rhs(j2+0,k));
- blockB[count+1] = conj(rhs(j2+1,k));
+ blockB[count+0] = numext::conj(rhs(j2+0,k));
+ blockB[count+1] = numext::conj(rhs(j2+1,k));
if (nr==4)
{
- blockB[count+2] = conj(rhs(j2+2,k));
- blockB[count+3] = conj(rhs(j2+3,k));
+ blockB[count+2] = numext::conj(rhs(j2+2,k));
+ blockB[count+3] = numext::conj(rhs(j2+3,k));
}
count += nr;
}
@@ -124,11 +124,11 @@ struct symm_pack_rhs
for (Index w=0 ; w<h; ++w)
blockB[count+w] = rhs(k,j2+w);
- blockB[count+h] = real(rhs(k,k));
+ blockB[count+h] = numext::real(rhs(k,k));
// transpose
for (Index w=h+1 ; w<nr; ++w)
- blockB[count+w] = conj(rhs(j2+w,k));
+ blockB[count+w] = numext::conj(rhs(j2+w,k));
count += nr;
++h;
}
@@ -151,12 +151,12 @@ struct symm_pack_rhs
{
for(Index k=k2; k<end_k; k++)
{
- blockB[count+0] = conj(rhs(j2+0,k));
- blockB[count+1] = conj(rhs(j2+1,k));
+ blockB[count+0] = numext::conj(rhs(j2+0,k));
+ blockB[count+1] = numext::conj(rhs(j2+1,k));
if (nr==4)
{
- blockB[count+2] = conj(rhs(j2+2,k));
- blockB[count+3] = conj(rhs(j2+3,k));
+ blockB[count+2] = numext::conj(rhs(j2+2,k));
+ blockB[count+3] = numext::conj(rhs(j2+3,k));
}
count += nr;
}
@@ -169,13 +169,13 @@ struct symm_pack_rhs
Index half = (std::min)(end_k,j2);
for(Index k=k2; k<half; k++)
{
- blockB[count] = conj(rhs(j2,k));
+ blockB[count] = numext::conj(rhs(j2,k));
count += 1;
}
if(half==j2 && half<k2+rows)
{
- blockB[count] = real(rhs(j2,j2));
+ blockB[count] = numext::real(rhs(j2,j2));
count += 1;
}
else
@@ -211,7 +211,7 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,Co
const Scalar* lhs, Index lhsStride,
const Scalar* rhs, Index rhsStride,
Scalar* res, Index resStride,
- Scalar alpha)
+ const Scalar& alpha)
{
product_selfadjoint_matrix<Scalar, Index,
EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
@@ -234,7 +234,18 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* res, Index resStride,
- Scalar alpha)
+ const Scalar& alpha);
+};
+
+template <typename Scalar, typename Index,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs>
+EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>::run(
+ Index rows, Index cols,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
+ const Scalar& alpha)
{
Index size = rows;
@@ -301,7 +312,6 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs
}
}
}
-};
// matrix * selfadjoint product
template <typename Scalar, typename Index,
@@ -315,7 +325,18 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLh
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* res, Index resStride,
- Scalar alpha)
+ const Scalar& alpha);
+};
+
+template <typename Scalar, typename Index,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs>
+EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>::run(
+ Index rows, Index cols,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
+ const Scalar& alpha)
{
Index size = cols;
@@ -353,7 +374,6 @@ struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLh
}
}
}
-};
} // end namespace internal
@@ -383,7 +403,7 @@ struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false>
RhsIsSelfAdjoint = (RhsMode&SelfAdjoint)==SelfAdjoint
};
- template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
index 4e5c4125c01..dfa687fefe6 100644
--- a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
@@ -23,7 +23,7 @@
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+//
********************************************************************************
* Content : Eigen bindings to Intel(R) MKL
* Self adjoint matrix * matrix product functionality based on ?SYMM/?HEMM.
@@ -47,7 +47,7 @@ template <typename Index, \
struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
{\
\
- static EIGEN_DONT_INLINE void run( \
+ static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
@@ -98,7 +98,7 @@ template <typename Index, \
int RhsStorageOrder, bool ConjugateRhs> \
struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
{\
- static EIGEN_DONT_INLINE void run( \
+ static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
@@ -174,7 +174,7 @@ template <typename Index, \
struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
{\
\
- static EIGEN_DONT_INLINE void run( \
+ static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
@@ -224,7 +224,7 @@ template <typename Index, \
int RhsStorageOrder, bool ConjugateRhs> \
struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
{\
- static EIGEN_DONT_INLINE void run( \
+ static void run( \
Index rows, Index cols, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h
index c3145c69a5f..f698f67f9b0 100644
--- a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h
@@ -32,10 +32,18 @@ static EIGEN_DONT_INLINE void run(
const Scalar* lhs, Index lhsStride,
const Scalar* _rhs, Index rhsIncr,
Scalar* res,
+ Scalar alpha);
+};
+
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(
+ Index size,
+ const Scalar* lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsIncr,
+ Scalar* res,
Scalar alpha)
{
typedef typename packet_traits<Scalar>::type Packet;
- typedef typename NumTraits<Scalar>::Real RealScalar;
const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
enum {
@@ -51,7 +59,7 @@ static EIGEN_DONT_INLINE void run(
conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0;
conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
- Scalar cjAlpha = ConjugateRhs ? conj(alpha) : alpha;
+ Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;
// FIXME this copy is now handled outside product_selfadjoint_vector, so it could probably be removed.
// if the rhs is not sequentially stored in memory we copy it to a temporary buffer,
@@ -71,8 +79,8 @@ static EIGEN_DONT_INLINE void run(
for (Index j=FirstTriangular ? bound : 0;
j<(FirstTriangular ? size : bound);j+=2)
{
- register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
- register const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
+ const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
+ const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
Scalar t0 = cjAlpha * rhs[j];
Packet ptmp0 = pset1<Packet>(t0);
@@ -90,8 +98,8 @@ static EIGEN_DONT_INLINE void run(
size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);
// TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
- res[j] += cjd.pmul(internal::real(A0[j]), t0);
- res[j+1] += cjd.pmul(internal::real(A1[j+1]), t1);
+ res[j] += cjd.pmul(numext::real(A0[j]), t0);
+ res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1);
if(FirstTriangular)
{
res[j] += cj0.pmul(A1[j], t1);
@@ -106,8 +114,8 @@ static EIGEN_DONT_INLINE void run(
for (size_t i=starti; i<alignedStart; ++i)
{
res[i] += t0 * A0[i] + t1 * A1[i];
- t2 += conj(A0[i]) * rhs[i];
- t3 += conj(A1[i]) * rhs[i];
+ t2 += numext::conj(A0[i]) * rhs[i];
+ t3 += numext::conj(A1[i]) * rhs[i];
}
// Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)
// gcc 4.2 does this optimization automatically.
@@ -139,12 +147,12 @@ static EIGEN_DONT_INLINE void run(
}
for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
{
- register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
+ const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
Scalar t1 = cjAlpha * rhs[j];
Scalar t2(0);
// TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
- res[j] += cjd.pmul(internal::real(A0[j]), t1);
+ res[j] += cjd.pmul(numext::real(A0[j]), t1);
for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)
{
res[i] += cj0.pmul(A0[i], t1);
@@ -153,7 +161,6 @@ static EIGEN_DONT_INLINE void run(
res[j] += alpha * t2;
}
}
-};
} // end namespace internal
@@ -180,7 +187,7 @@ struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
- template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
{
typedef typename Dest::Scalar ResScalar;
typedef typename Base::RhsScalar RhsScalar;
@@ -260,7 +267,7 @@ struct SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>
SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
- template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
{
// let's simply transpose the product
Transpose<Dest> destT(dest);
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
index f88d483b653..86684b66d94 100644
--- a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
@@ -50,7 +50,7 @@ struct selfadjoint_matrix_vector_product_symv :
#define EIGEN_MKL_SYMV_SPECIALIZE(Scalar) \
template<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \
struct selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Specialized> { \
-static EIGEN_DONT_INLINE void run( \
+static void run( \
Index size, const Scalar* lhs, Index lhsStride, \
const Scalar* _rhs, Index rhsIncr, Scalar* res, Scalar alpha) { \
enum {\
@@ -77,7 +77,7 @@ struct selfadjoint_matrix_vector_product_symv<EIGTYPE,Index,StorageOrder,UpLo,Co
{ \
typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> SYMVVector;\
\
-static EIGEN_DONT_INLINE void run( \
+static void run( \
Index size, const EIGTYPE* lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* res, EIGTYPE alpha) \
{ \
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointProduct.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointProduct.h
index 6a55f3d7715..6ca4ae6c0fe 100644
--- a/extern/Eigen3/Eigen/src/Core/products/SelfadjointProduct.h
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointProduct.h
@@ -18,21 +18,19 @@
namespace Eigen {
-template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjLhs, bool ConjRhs>
-struct selfadjoint_rank1_update;
template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
struct selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs>
{
- static void run(Index size, Scalar* mat, Index stride, const Scalar* vec, Scalar alpha)
+ static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)
{
internal::conj_if<ConjRhs> cj;
typedef Map<const Matrix<Scalar,Dynamic,1> > OtherMap;
- typedef typename internal::conditional<ConjLhs,typename OtherMap::ConjugateReturnType,const OtherMap&>::type ConjRhsType;
+ typedef typename internal::conditional<ConjLhs,typename OtherMap::ConjugateReturnType,const OtherMap&>::type ConjLhsType;
for (Index i=0; i<size; ++i)
{
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+(UpLo==Lower ? i : 0), (UpLo==Lower ? size-i : (i+1)))
- += (alpha * cj(vec[i])) * ConjRhsType(OtherMap(vec+(UpLo==Lower ? i : 0),UpLo==Lower ? size-i : (i+1)));
+ += (alpha * cj(vecY[i])) * ConjLhsType(OtherMap(vecX+(UpLo==Lower ? i : 0),UpLo==Lower ? size-i : (i+1)));
}
}
};
@@ -40,9 +38,9 @@ struct selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs>
template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
struct selfadjoint_rank1_update<Scalar,Index,RowMajor,UpLo,ConjLhs,ConjRhs>
{
- static void run(Index size, Scalar* mat, Index stride, const Scalar* vec, Scalar alpha)
+ static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)
{
- selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo==Lower?Upper:Lower,ConjRhs,ConjLhs>::run(size,mat,stride,vec,alpha);
+ selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo==Lower?Upper:Lower,ConjRhs,ConjLhs>::run(size,mat,stride,vecY,vecX,alpha);
}
};
@@ -52,7 +50,7 @@ struct selfadjoint_product_selector;
template<typename MatrixType, typename OtherType, int UpLo>
struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
{
- static void run(MatrixType& mat, const OtherType& other, typename MatrixType::Scalar alpha)
+ static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
@@ -78,14 +76,14 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
OtherBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
(!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex>
- ::run(other.size(), mat.data(), mat.outerStride(), actualOtherPtr, actualAlpha);
+ ::run(other.size(), mat.data(), mat.outerStride(), actualOtherPtr, actualOtherPtr, actualAlpha);
}
};
template<typename MatrixType, typename OtherType, int UpLo>
struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
{
- static void run(MatrixType& mat, const OtherType& other, typename MatrixType::Scalar alpha)
+ static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
@@ -113,7 +111,7 @@ struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
template<typename MatrixType, unsigned int UpLo>
template<typename DerivedU>
SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
-::rankUpdate(const MatrixBase<DerivedU>& u, Scalar alpha)
+::rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha)
{
selfadjoint_product_selector<MatrixType,DerivedU,UpLo>::run(_expression().const_cast_derived(), u.derived(), alpha);
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h
index 57a98cc2de9..8594a97cea3 100644
--- a/extern/Eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h
@@ -24,14 +24,14 @@ struct selfadjoint_rank2_update_selector;
template<typename Scalar, typename Index, typename UType, typename VType>
struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
{
- static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
+ static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
{
const Index size = u.size();
for (Index i=0; i<size; ++i)
{
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) +=
- (conj(alpha) * conj(u.coeff(i))) * v.tail(size-i)
- + (alpha * conj(v.coeff(i))) * u.tail(size-i);
+ (numext::conj(alpha) * numext::conj(u.coeff(i))) * v.tail(size-i)
+ + (alpha * numext::conj(v.coeff(i))) * u.tail(size-i);
}
}
};
@@ -39,13 +39,13 @@ struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
template<typename Scalar, typename Index, typename UType, typename VType>
struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Upper>
{
- static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
+ static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
{
const Index size = u.size();
for (Index i=0; i<size; ++i)
Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) +=
- (conj(alpha) * conj(u.coeff(i))) * v.head(i+1)
- + (alpha * conj(v.coeff(i))) * u.head(i+1);
+ (numext::conj(alpha) * numext::conj(u.coeff(i))) * v.head(i+1)
+ + (alpha * numext::conj(v.coeff(i))) * u.head(i+1);
}
};
@@ -58,7 +58,7 @@ template<bool Cond, typename T> struct conj_expr_if
template<typename MatrixType, unsigned int UpLo>
template<typename DerivedU, typename DerivedV>
SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
-::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, Scalar alpha)
+::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha)
{
typedef internal::blas_traits<DerivedU> UBlasTraits;
typedef typename UBlasTraits::DirectLinearAccessType ActualUType;
@@ -75,9 +75,9 @@ SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
enum { IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived())
- * internal::conj(VBlasTraits::extractScalarFactor(v.derived()));
+ * numext::conj(VBlasTraits::extractScalarFactor(v.derived()));
if (IsRowMajor)
- actualAlpha = internal::conj(actualAlpha);
+ actualAlpha = numext::conj(actualAlpha);
internal::selfadjoint_rank2_update_selector<Scalar, Index,
typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::type>::type,
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h
index 92cba66f615..8110507b50b 100644
--- a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h
@@ -61,7 +61,7 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
const Scalar* lhs, Index lhsStride,
const Scalar* rhs, Index rhsStride,
Scalar* res, Index resStride,
- Scalar alpha, level3_blocking<Scalar,Scalar>& blocking)
+ const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
product_triangular_matrix_matrix<Scalar, Index,
(Mode&(UnitDiag|ZeroDiag)) | ((Mode&Upper) ? Lower : Upper),
@@ -96,7 +96,20 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* res, Index resStride,
- Scalar alpha, level3_blocking<Scalar,Scalar>& blocking)
+ const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
+};
+
+template <typename Scalar, typename Index, int Mode,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
+ LhsStorageOrder,ConjugateLhs,
+ RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
+ Index _rows, Index _cols, Index _depth,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
+ const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
// strip zeros
Index diagSize = (std::min)(_rows,_depth);
@@ -203,15 +216,14 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
}
}
}
-};
// implements col-major += alpha * op(general) * op(triangular)
template <typename Scalar, typename Index, int Mode,
int LhsStorageOrder, bool ConjugateLhs,
int RhsStorageOrder, bool ConjugateRhs, int Version>
struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
- LhsStorageOrder,ConjugateLhs,
- RhsStorageOrder,ConjugateRhs,ColMajor,Version>
+ LhsStorageOrder,ConjugateLhs,
+ RhsStorageOrder,ConjugateRhs,ColMajor,Version>
{
typedef gebp_traits<Scalar,Scalar> Traits;
enum {
@@ -225,7 +237,20 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
const Scalar* _lhs, Index lhsStride,
const Scalar* _rhs, Index rhsStride,
Scalar* res, Index resStride,
- Scalar alpha, level3_blocking<Scalar,Scalar>& blocking)
+ const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
+};
+
+template <typename Scalar, typename Index, int Mode,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
+ LhsStorageOrder,ConjugateLhs,
+ RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
+ Index _rows, Index _cols, Index _depth,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
+ const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
{
// strip zeros
Index diagSize = (std::min)(_cols,_depth);
@@ -343,7 +368,6 @@ struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
}
}
}
-};
/***************************************************************************
* Wrapper to product_triangular_matrix_matrix
@@ -364,7 +388,7 @@ struct TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
- template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
{
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
index 8173da5bb6d..ba41a1c99f6 100644
--- a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
@@ -57,11 +57,11 @@ template <typename Index, int Mode, \
struct product_triangular_matrix_matrix<Scalar,Index, Mode, LhsIsTriangular, \
LhsStorageOrder,ConjugateLhs, RhsStorageOrder,ConjugateRhs,ColMajor,Specialized> { \
static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\
- const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha) { \
+ const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking<Scalar,Scalar>& blocking) { \
product_triangular_matrix_matrix_trmm<Scalar,Index,Mode, \
LhsIsTriangular,LhsStorageOrder,ConjugateLhs, \
RhsStorageOrder, ConjugateRhs, ColMajor>::run( \
- _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \
+ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
} \
};
@@ -91,12 +91,12 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
conjA = ((LhsStorageOrder==ColMajor) && ConjugateLhs) ? 1 : 0 \
}; \
\
- static EIGEN_DONT_INLINE void run( \
+ static void run( \
Index _rows, Index _cols, Index _depth, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
EIGTYPE* res, Index resStride, \
- EIGTYPE alpha) \
+ EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
{ \
Index diagSize = (std::min)(_rows,_depth); \
Index rows = IsLower ? _rows : diagSize; \
@@ -115,16 +115,16 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
product_triangular_matrix_matrix<EIGTYPE,Index,Mode,true, \
LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
- _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \
+ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
/*std::cout << "TRMM_L: A is not square! Go to Eigen TRMM implementation!\n";*/ \
} else { \
/* Make sense to call GEMM */ \
Map<const MatrixLhs, 0, OuterStride<> > lhsMap(_lhs,rows,depth,OuterStride<>(lhsStride)); \
MatrixLhs aa_tmp=lhsMap.template triangularView<Mode>(); \
MKL_INT aStride = aa_tmp.outerStride(); \
- gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> blocking(_rows,_cols,_depth); \
+ gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth); \
general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
- rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, blocking, 0); \
+ rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, gemm_blocking, 0); \
\
/*std::cout << "TRMM_L: A is not square! Go to MKL GEMM implementation! " << nthr<<" \n";*/ \
} \
@@ -205,12 +205,12 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
conjA = ((RhsStorageOrder==ColMajor) && ConjugateRhs) ? 1 : 0 \
}; \
\
- static EIGEN_DONT_INLINE void run( \
+ static void run( \
Index _rows, Index _cols, Index _depth, \
const EIGTYPE* _lhs, Index lhsStride, \
const EIGTYPE* _rhs, Index rhsStride, \
EIGTYPE* res, Index resStride, \
- EIGTYPE alpha) \
+ EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
{ \
Index diagSize = (std::min)(_cols,_depth); \
Index rows = _rows; \
@@ -229,16 +229,16 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
product_triangular_matrix_matrix<EIGTYPE,Index,Mode,false, \
LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
- _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha); \
+ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
/*std::cout << "TRMM_R: A is not square! Go to Eigen TRMM implementation!\n";*/ \
} else { \
/* Make sense to call GEMM */ \
Map<const MatrixRhs, 0, OuterStride<> > rhsMap(_rhs,depth,cols, OuterStride<>(rhsStride)); \
MatrixRhs aa_tmp=rhsMap.template triangularView<Mode>(); \
MKL_INT aStride = aa_tmp.outerStride(); \
- gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> blocking(_rows,_cols,_depth); \
+ gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth); \
general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
- rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, blocking, 0); \
+ rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, gemm_blocking, 0); \
\
/*std::cout << "TRMM_R: A is not square! Go to MKL GEMM implementation! " << nthr<<" \n";*/ \
} \
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector.h b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector.h
index b1c10c201c5..6117d5a8262 100644
--- a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector.h
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector.h
@@ -27,7 +27,13 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
};
static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
- const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha)
+ const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);
+};
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>
+EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>
+ ::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
+ const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)
{
static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
Index size = (std::min)(_rows,_cols);
@@ -78,7 +84,6 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
_res, resIncr, alpha);
}
}
-};
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>
struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>
@@ -89,8 +94,14 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
};
- static void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
- const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha)
+ static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
+ const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);
+};
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>
+EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>
+ ::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
+ const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)
{
static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
Index diagSize = (std::min)(_rows,_cols);
@@ -141,7 +152,6 @@ struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,C
&res.coeffRef(diagSize), resIncr, alpha);
}
}
-};
/***************************************************************************
* Wrapper to product_triangular_vector
@@ -171,7 +181,7 @@ struct TriangularProduct<Mode,true,Lhs,false,Rhs,true>
TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
- template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
@@ -187,7 +197,7 @@ struct TriangularProduct<Mode,false,Lhs,true,Rhs,false>
TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
- template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
@@ -205,7 +215,7 @@ namespace internal {
template<> struct trmv_selector<ColMajor>
{
template<int Mode, typename Lhs, typename Rhs, typename Dest>
- static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar alpha)
+ static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, const typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar& alpha)
{
typedef TriangularProduct<Mode,true,Lhs,false,Rhs,true> ProductType;
typedef typename ProductType::Index Index;
@@ -235,7 +245,7 @@ template<> struct trmv_selector<ColMajor>
gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
- bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0));
+ bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
@@ -246,7 +256,7 @@ template<> struct trmv_selector<ColMajor>
if(!evalToDest)
{
#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
- int size = dest.size();
+ Index size = dest.size();
EIGEN_DENSE_STORAGE_CTOR_PLUGIN
#endif
if(!alphaIsCompatible)
@@ -281,7 +291,7 @@ template<> struct trmv_selector<ColMajor>
template<> struct trmv_selector<RowMajor>
{
template<int Mode, typename Lhs, typename Rhs, typename Dest>
- static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar alpha)
+ static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, const typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar& alpha)
{
typedef TriangularProduct<Mode,true,Lhs,false,Rhs,true> ProductType;
typedef typename ProductType::LhsScalar LhsScalar;
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
index 3589b8c5ef6..09f110da712 100644
--- a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
@@ -50,7 +50,7 @@ struct triangular_matrix_vector_product_trmv :
#define EIGEN_MKL_TRMV_SPECIALIZE(Scalar) \
template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor,Specialized> { \
- static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
+ static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \
triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor>::run( \
_rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
@@ -58,7 +58,7 @@ struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs
}; \
template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor,Specialized> { \
- static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
+ static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \
triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor>::run( \
_rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
@@ -81,12 +81,12 @@ struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,
IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
LowUp = IsLower ? Lower : Upper \
}; \
- static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
- const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
+ static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
{ \
if (ConjLhs || IsZeroDiag) { \
triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor,BuiltIn>::run( \
- _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha, blocking); \
+ _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
return; \
}\
Index size = (std::min)(_rows,_cols); \
@@ -166,12 +166,12 @@ struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,
IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
LowUp = IsLower ? Lower : Upper \
}; \
- static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
- const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
+ static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
+ const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
{ \
if (IsZeroDiag) { \
triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor,BuiltIn>::run( \
- _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha, blocking); \
+ _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
return; \
}\
Index size = (std::min)(_rows,_cols); \
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
index a49ea318345..f103eae72c0 100644
--- a/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
@@ -18,7 +18,7 @@ namespace internal {
template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder>
struct triangular_solve_matrix<Scalar,Index,Side,Mode,Conjugate,TriStorageOrder,RowMajor>
{
- static EIGEN_DONT_INLINE void run(
+ static void run(
Index size, Index cols,
const Scalar* tri, Index triStride,
Scalar* _other, Index otherStride,
@@ -42,6 +42,13 @@ struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageO
Index size, Index otherSize,
const Scalar* _tri, Index triStride,
Scalar* _other, Index otherStride,
+ level3_blocking<Scalar,Scalar>& blocking);
+};
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>::run(
+ Index size, Index otherSize,
+ const Scalar* _tri, Index triStride,
+ Scalar* _other, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking)
{
Index cols = otherSize;
@@ -173,7 +180,6 @@ struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageO
}
}
}
-};
/* Optimized triangular solver with multiple left hand sides and the trinagular matrix on the right
*/
@@ -184,6 +190,13 @@ struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorage
Index size, Index otherSize,
const Scalar* _tri, Index triStride,
Scalar* _other, Index otherStride,
+ level3_blocking<Scalar,Scalar>& blocking);
+};
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>::run(
+ Index size, Index otherSize,
+ const Scalar* _tri, Index triStride,
+ Scalar* _other, Index otherStride,
level3_blocking<Scalar,Scalar>& blocking)
{
Index rows = otherSize;
@@ -308,7 +321,6 @@ struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorage
}
}
}
-};
} // end namespace internal
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
index a4f508b2e83..6a0bb833931 100644
--- a/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
@@ -48,7 +48,7 @@ struct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorage
IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \
}; \
- static EIGEN_DONT_INLINE void run( \
+ static void run( \
Index size, Index otherSize, \
const EIGTYPE* _tri, Index triStride, \
EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
@@ -103,7 +103,7 @@ struct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorag
IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \
conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \
}; \
- static EIGEN_DONT_INLINE void run( \
+ static void run( \
Index size, Index otherSize, \
const EIGTYPE* _tri, Index triStride, \
EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
diff --git a/extern/Eigen3/Eigen/src/Core/util/BlasUtil.h b/extern/Eigen3/Eigen/src/Core/util/BlasUtil.h
index 91496651c82..a28f16fa04b 100644
--- a/extern/Eigen3/Eigen/src/Core/util/BlasUtil.h
+++ b/extern/Eigen3/Eigen/src/Core/util/BlasUtil.h
@@ -42,7 +42,7 @@ template<bool Conjugate> struct conj_if;
template<> struct conj_if<true> {
template<typename T>
- inline T operator()(const T& x) { return conj(x); }
+ inline T operator()(const T& x) { return numext::conj(x); }
template<typename T>
inline T pconj(const T& x) { return internal::pconj(x); }
};
@@ -67,7 +67,7 @@ template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::
{ return c + pmul(x,y); }
EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
- { return Scalar(real(x)*real(y) + imag(x)*imag(y), imag(x)*real(y) - real(x)*imag(y)); }
+ { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::imag(x)*numext::real(y) - numext::real(x)*numext::imag(y)); }
};
template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,false>
@@ -77,7 +77,7 @@ template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::
{ return c + pmul(x,y); }
EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
- { return Scalar(real(x)*real(y) + imag(x)*imag(y), real(x)*imag(y) - imag(x)*real(y)); }
+ { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
};
template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,true>
@@ -87,7 +87,7 @@ template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::
{ return c + pmul(x,y); }
EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
- { return Scalar(real(x)*real(y) - imag(x)*imag(y), - real(x)*imag(y) - imag(x)*real(y)); }
+ { return Scalar(numext::real(x)*numext::real(y) - numext::imag(x)*numext::imag(y), - numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
};
template<typename RealScalar,bool Conj> struct conj_helper<std::complex<RealScalar>, RealScalar, Conj,false>
@@ -113,7 +113,7 @@ template<typename From,typename To> struct get_factor {
};
template<typename Scalar> struct get_factor<Scalar,typename NumTraits<Scalar>::Real> {
- static EIGEN_STRONG_INLINE typename NumTraits<Scalar>::Real run(const Scalar& x) { return real(x); }
+ static EIGEN_STRONG_INLINE typename NumTraits<Scalar>::Real run(const Scalar& x) { return numext::real(x); }
};
// Lightweight helper class to access matrix coefficients.
diff --git a/extern/Eigen3/Eigen/src/Core/util/Constants.h b/extern/Eigen3/Eigen/src/Core/util/Constants.h
index 3fd45e84f8e..14b9624e1d9 100644
--- a/extern/Eigen3/Eigen/src/Core/util/Constants.h
+++ b/extern/Eigen3/Eigen/src/Core/util/Constants.h
@@ -13,13 +13,18 @@
namespace Eigen {
-/** This value means that a quantity is not known at compile-time, and that instead the value is
+/** This value means that a positive quantity (e.g., a size) is not known at compile-time, and that instead the value is
* stored in some runtime variable.
*
* Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix.
*/
const int Dynamic = -1;
+/** This value means that a signed quantity (e.g., a signed index) is not known at compile-time, and that instead its value
+ * has to be specified at runtime.
+ */
+const int DynamicIndex = 0xffffff;
+
/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm<int>().
* The value Infinity there means the L-infinity norm.
*/
@@ -227,7 +232,9 @@ enum {
* scalar loops to handle the unaligned boundaries */
SliceVectorizedTraversal,
/** \internal Special case to properly handle incompatible scalar types or other defecting cases*/
- InvalidTraversal
+ InvalidTraversal,
+ /** \internal Evaluate all entries at once */
+ AllAtOnceTraversal
};
/** \internal \ingroup enums
@@ -257,9 +264,9 @@ enum {
ColMajor = 0,
/** Storage order is row major (see \ref TopicStorageOrders). */
RowMajor = 0x1, // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that
- /** \internal Align the matrix itself if it is vectorizable fixed-size */
+ /** Align the matrix itself if it is vectorizable fixed-size */
AutoAlign = 0,
- /** \internal Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation
+ /** Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation
DontAlign = 0x2
};
diff --git a/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h b/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h
index bcdfe3914e3..d6a814586d3 100644
--- a/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h
+++ b/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h
@@ -78,8 +78,7 @@ template<typename ExpressionType> class NestByValue;
template<typename ExpressionType> class ForceAlignedAccess;
template<typename ExpressionType> class SwapWrapper;
-template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false,
- bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class Block;
+template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false> class Block;
template<typename MatrixType, int Size=Dynamic> class VectorBlock;
template<typename MatrixType> class Transpose;
@@ -154,7 +153,6 @@ template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRh
template<typename Scalar> struct scalar_sum_op;
template<typename Scalar> struct scalar_difference_op;
template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op;
-template<typename Scalar> struct scalar_quotient_op;
template<typename Scalar> struct scalar_opposite_op;
template<typename Scalar> struct scalar_conjugate_op;
template<typename Scalar> struct scalar_real_op;
@@ -185,6 +183,7 @@ template<typename Scalar> struct scalar_identity_op;
template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_product_op;
template<typename LhsScalar,typename RhsScalar> struct scalar_multiple2_op;
+template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_quotient_op;
} // end namespace internal
@@ -271,6 +270,8 @@ template<typename Derived> struct MatrixExponentialReturnValue;
template<typename Derived> class MatrixFunctionReturnValue;
template<typename Derived> class MatrixSquareRootReturnValue;
template<typename Derived> class MatrixLogarithmReturnValue;
+template<typename Derived> class MatrixPowerReturnValue;
+template<typename Derived, typename Lhs, typename Rhs> class MatrixPowerProduct;
namespace internal {
template <typename Scalar>
diff --git a/extern/Eigen3/Eigen/src/Core/util/Macros.h b/extern/Eigen3/Eigen/src/Core/util/Macros.h
index d973a68372f..0088621acf7 100644
--- a/extern/Eigen3/Eigen/src/Core/util/Macros.h
+++ b/extern/Eigen3/Eigen/src/Core/util/Macros.h
@@ -12,7 +12,7 @@
#define EIGEN_MACROS_H
#define EIGEN_WORLD_VERSION 3
-#define EIGEN_MAJOR_VERSION 1
+#define EIGEN_MAJOR_VERSION 2
#define EIGEN_MINOR_VERSION 1
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
@@ -115,12 +115,6 @@
#define EIGEN_MAKESTRING2(a) #a
#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
-#if EIGEN_GNUC_AT_LEAST(4,1) && !defined(__clang__) && !defined(__INTEL_COMPILER)
-#define EIGEN_FLATTEN_ATTRIB __attribute__((flatten))
-#else
-#define EIGEN_FLATTEN_ATTRIB
-#endif
-
// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
// but GCC is still doing fine with just inline.
@@ -151,6 +145,12 @@
#define EIGEN_DONT_INLINE
#endif
+#if (defined __GNUC__)
+#define EIGEN_PERMISSIVE_EXPR __extension__
+#else
+#define EIGEN_PERMISSIVE_EXPR
+#endif
+
// this macro allows to get rid of linking errors about multiply defined functions.
// - static is not very good because it prevents definitions from different object files to be merged.
// So static causes the resulting linked executable to be bloated with multiple copies of the same function.
@@ -238,12 +238,19 @@
#endif
// Suppresses 'unused variable' warnings.
-#define EIGEN_UNUSED_VARIABLE(var) (void)var;
+namespace Eigen {
+ namespace internal {
+ template<typename T> void ignore_unused_variable(const T&) {}
+ }
+}
+#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);
-#if !defined(EIGEN_ASM_COMMENT) && (defined __GNUC__)
-#define EIGEN_ASM_COMMENT(X) asm("#" X)
-#else
-#define EIGEN_ASM_COMMENT(X)
+#if !defined(EIGEN_ASM_COMMENT)
+ #if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
+ #define EIGEN_ASM_COMMENT(X) asm("#" X)
+ #else
+ #define EIGEN_ASM_COMMENT(X)
+ #endif
#endif
/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
@@ -301,6 +308,12 @@
#if defined(_MSC_VER) && (!defined(__INTEL_COMPILER))
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =;
+#elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
+#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+ using Base::operator =; \
+ EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \
+ template <typename OtherDerived> \
+ EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other) { Base::operator=(other.derived()); return *this; }
#else
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =; \
diff --git a/extern/Eigen3/Eigen/src/Core/util/Memory.h b/extern/Eigen3/Eigen/src/Core/util/Memory.h
index 6e06ace44a0..cacbd02fd12 100644
--- a/extern/Eigen3/Eigen/src/Core/util/Memory.h
+++ b/extern/Eigen3/Eigen/src/Core/util/Memory.h
@@ -19,6 +19,10 @@
#ifndef EIGEN_MEMORY_H
#define EIGEN_MEMORY_H
+#ifndef EIGEN_MALLOC_ALREADY_ALIGNED
+
+// Try to determine automatically if malloc is already aligned.
+
// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:
// http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html
// This is true at least since glibc 2.8.
@@ -27,7 +31,7 @@
// page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed
// quite safe, at least within the context of glibc, to equate 64-bit with LP64.
#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \
- && defined(__LP64__)
+ && defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ )
#define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1
#else
#define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0
@@ -52,10 +56,19 @@
#define EIGEN_MALLOC_ALREADY_ALIGNED 0
#endif
-#if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) \
- && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
- #define EIGEN_HAS_POSIX_MEMALIGN 1
-#else
+#endif
+
+// See bug 554 (http://eigen.tuxfamily.org/bz/show_bug.cgi?id=554)
+// It seems to be unsafe to check _POSIX_ADVISORY_INFO without including unistd.h first.
+// Currently, let's include it only on unix systems:
+#if defined(__unix__) || defined(__unix)
+ #include <unistd.h>
+ #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
+ #define EIGEN_HAS_POSIX_MEMALIGN 1
+ #endif
+#endif
+
+#ifndef EIGEN_HAS_POSIX_MEMALIGN
#define EIGEN_HAS_POSIX_MEMALIGN 0
#endif
@@ -88,11 +101,11 @@ inline void throw_std_bad_alloc()
/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
* Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
*/
-inline void* handmade_aligned_malloc(size_t size)
+inline void* handmade_aligned_malloc(std::size_t size)
{
void *original = std::malloc(size+16);
if (original == 0) return 0;
- void *aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(original) & ~(size_t(15))) + 16);
+ void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(15))) + 16);
*(reinterpret_cast<void**>(aligned) - 1) = original;
return aligned;
}
@@ -108,13 +121,18 @@ inline void handmade_aligned_free(void *ptr)
* Since we know that our handmade version is based on std::realloc
* we can use std::realloc to implement efficient reallocation.
*/
-inline void* handmade_aligned_realloc(void* ptr, size_t size, size_t = 0)
+inline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0)
{
if (ptr == 0) return handmade_aligned_malloc(size);
void *original = *(reinterpret_cast<void**>(ptr) - 1);
+ std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original);
original = std::realloc(original,size+16);
if (original == 0) return 0;
- void *aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(original) & ~(size_t(15))) + 16);
+ void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(15))) + 16);
+ void *previous_aligned = static_cast<char *>(original)+previous_offset;
+ if(aligned!=previous_aligned)
+ std::memmove(aligned, previous_aligned, size);
+
*(reinterpret_cast<void**>(aligned) - 1) = original;
return aligned;
}
@@ -123,7 +141,7 @@ inline void* handmade_aligned_realloc(void* ptr, size_t size, size_t = 0)
*** Implementation of generic aligned realloc (when no realloc can be used)***
*****************************************************************************/
-void* aligned_malloc(size_t size);
+void* aligned_malloc(std::size_t size);
void aligned_free(void *ptr);
/** \internal
@@ -204,7 +222,7 @@ inline void* aligned_malloc(size_t size)
if(posix_memalign(&result, 16, size)) result = 0;
#elif EIGEN_HAS_MM_MALLOC
result = _mm_malloc(size, 16);
- #elif (defined _MSC_VER)
+ #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
result = _aligned_malloc(size, 16);
#else
result = handmade_aligned_malloc(size);
@@ -227,7 +245,7 @@ inline void aligned_free(void *ptr)
std::free(ptr);
#elif EIGEN_HAS_MM_MALLOC
_mm_free(ptr);
- #elif defined(_MSC_VER)
+ #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
_aligned_free(ptr);
#else
handmade_aligned_free(ptr);
@@ -446,7 +464,6 @@ template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *
template<typename Scalar, typename Index>
static inline Index first_aligned(const Scalar* array, Index size)
{
- typedef typename packet_traits<Scalar>::type Packet;
enum { PacketSize = packet_traits<Scalar>::size,
PacketAlignedMask = PacketSize-1
};
@@ -470,6 +487,13 @@ static inline Index first_aligned(const Scalar* array, Index size)
}
}
+/** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size
+ */
+template<typename Index>
+inline static Index first_multiple(Index size, Index base)
+{
+ return ((size+base-1)/base)*base;
+}
// std::copy is much slower than memcpy, so let's introduce a smart_copy which
// use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
@@ -554,7 +578,7 @@ template<typename T> class aligned_stack_memory_handler
*/
#ifdef EIGEN_ALLOCA
- #ifdef __arm__
+ #if defined(__arm__) || defined(_WIN32)
#define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16)
#else
#define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
@@ -610,7 +634,9 @@ template<typename T> class aligned_stack_memory_handler
/* memory allocated we can safely let the default implementation handle */ \
/* this particular case. */ \
static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \
+ static void *operator new[](size_t size, void* ptr) { return ::operator new[](size,ptr); } \
void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \
+ void operator delete[](void * memory, void *ptr) throw() { return ::operator delete[](memory,ptr); } \
/* nothrow-new (returns zero instead of std::bad_alloc) */ \
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
void operator delete(void *ptr, const std::nothrow_t&) throw() { \
@@ -705,15 +731,6 @@ public:
::new( p ) T( value );
}
- // Support for c++11
-#if (__cplusplus >= 201103L)
- template<typename... Args>
- void construct(pointer p, Args&&... args)
- {
- ::new(p) T(std::forward<Args>(args)...);
- }
-#endif
-
void destroy( pointer p )
{
p->~T();
@@ -738,14 +755,19 @@ public:
# if defined(__PIC__) && defined(__i386__)
// Case for x86 with PIC
# define EIGEN_CPUID(abcd,func,id) \
- __asm__ __volatile__ ("xchgl %%ebx, %%esi;cpuid; xchgl %%ebx,%%esi": "=a" (abcd[0]), "=S" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
+ __asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
+# elif defined(__PIC__) && defined(__x86_64__)
+ // Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model.
+ // However, we cannot detect which code model is used, and the xchg overhead is negligible anyway.
+# define EIGEN_CPUID(abcd,func,id) \
+ __asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id));
# else
// Case for x86_64 or x86 w/o PIC
# define EIGEN_CPUID(abcd,func,id) \
- __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id) );
+ __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) );
# endif
# elif defined(_MSC_VER)
-# if (_MSC_VER > 1500)
+# if (_MSC_VER > 1500) && ( defined(_M_IX86) || defined(_M_X64) )
# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
# endif
# endif
diff --git a/extern/Eigen3/Eigen/src/Core/util/Meta.h b/extern/Eigen3/Eigen/src/Core/util/Meta.h
index a5f31164d15..71d58710871 100644
--- a/extern/Eigen3/Eigen/src/Core/util/Meta.h
+++ b/extern/Eigen3/Eigen/src/Core/util/Meta.h
@@ -186,23 +186,35 @@ template<int Y, int InfX, int SupX>
class meta_sqrt<Y, InfX, SupX, true> { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; };
/** \internal determines whether the product of two numeric types is allowed and what the return type is */
-template<typename T, typename U> struct scalar_product_traits;
+template<typename T, typename U> struct scalar_product_traits
+{
+ enum { Defined = 0 };
+};
template<typename T> struct scalar_product_traits<T,T>
{
- //enum { Cost = NumTraits<T>::MulCost };
+ enum {
+ // Cost = NumTraits<T>::MulCost,
+ Defined = 1
+ };
typedef T ReturnType;
};
template<typename T> struct scalar_product_traits<T,std::complex<T> >
{
- //enum { Cost = 2*NumTraits<T>::MulCost };
+ enum {
+ // Cost = 2*NumTraits<T>::MulCost,
+ Defined = 1
+ };
typedef std::complex<T> ReturnType;
};
template<typename T> struct scalar_product_traits<std::complex<T>, T>
{
- //enum { Cost = 2*NumTraits<T>::MulCost };
+ enum {
+ // Cost = 2*NumTraits<T>::MulCost,
+ Defined = 1
+ };
typedef std::complex<T> ReturnType;
};
diff --git a/extern/Eigen3/Eigen/src/Core/util/StaticAssert.h b/extern/Eigen3/Eigen/src/Core/util/StaticAssert.h
index b46a75b3783..8872c5b648e 100644
--- a/extern/Eigen3/Eigen/src/Core/util/StaticAssert.h
+++ b/extern/Eigen3/Eigen/src/Core/util/StaticAssert.h
@@ -89,7 +89,8 @@
YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED,
YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
- THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH
+ THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,
+ OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG
};
};
diff --git a/extern/Eigen3/Eigen/src/Core/util/XprHelper.h b/extern/Eigen3/Eigen/src/Core/util/XprHelper.h
index 2a65c7cbfa4..3c4773054b4 100644
--- a/extern/Eigen3/Eigen/src/Core/util/XprHelper.h
+++ b/extern/Eigen3/Eigen/src/Core/util/XprHelper.h
@@ -65,12 +65,34 @@ template<typename T> class variable_if_dynamic<T, Dynamic>
void setValue(T value) { m_value = value; }
};
+/** \internal like variable_if_dynamic but for DynamicIndex
+ */
+template<typename T, int Value> class variable_if_dynamicindex
+{
+ public:
+ EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
+ explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); assert(v == T(Value)); }
+ static T value() { return T(Value); }
+ void setValue(T) {}
+};
+
+template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
+{
+ T m_value;
+ variable_if_dynamicindex() { assert(false); }
+ public:
+ explicit variable_if_dynamicindex(T value) : m_value(value) {}
+ T value() const { return m_value; }
+ void setValue(T value) { m_value = value; }
+};
+
template<typename T> struct functor_traits
{
enum
{
Cost = 10,
- PacketAccess = false
+ PacketAccess = false,
+ IsRepeatable = false
};
};
@@ -301,9 +323,9 @@ template<typename T, int n=1, typename PlainObject = typename eval<T>::type> str
// it's important that this value can still be squared without integer overflowing.
DynamicAsInteger = 10000,
ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
- ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? DynamicAsInteger : ScalarReadCost,
+ ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? int(DynamicAsInteger) : int(ScalarReadCost),
CoeffReadCost = traits<T>::CoeffReadCost,
- CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? DynamicAsInteger : CoeffReadCost,
+ CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? int(DynamicAsInteger) : int(CoeffReadCost),
NAsInteger = n == Dynamic ? int(DynamicAsInteger) : n,
CostEvalAsInteger = (NAsInteger+1) * ScalarReadCostAsInteger + CoeffReadCostAsInteger,
CostNoEvalAsInteger = NAsInteger * CoeffReadCostAsInteger
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AlignedBox.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
index 5c928e8fc2d..2e4309dd945 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
//
@@ -34,7 +34,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
/** Default constructor initializing a null box. */
- inline explicit AlignedBox()
+ inline AlignedBox()
{ if (AmbientDimAtCompileTime!=Dynamic) setNull(); }
/** Constructs a null box with \a _dim the dimension of the ambient space. */
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AngleAxis.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
index 20f1fceeb19..af598a40313 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
//
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
index 19cc1bfd883..b95bf00ecff 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
@@ -44,7 +44,7 @@ public:
typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
/** Default constructor without initialization */
- inline explicit Hyperplane() {}
+ inline Hyperplane() {}
/** Constructs a dynamic-size hyperplane with \a _dim the dimension
* of the ambient space */
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
index 6e4a168a8cd..9b57b7e0bb4 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
@@ -36,7 +36,7 @@ public:
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
/** Default constructor without initialization */
- inline explicit ParametrizedLine() {}
+ inline ParametrizedLine() {}
/** Constructs a dynamic-size line with \a _dim the dimension
* of the ambient space */
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h
index ec87da054d6..4b6390cf1de 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
//
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
index 3e02b7a4fd1..19b8582a1b1 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
//
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h
index 78ad73b60ad..b1c8f38da93 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
//
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h
index a07c1c7c762..b8fa6cd3f64 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
//
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h
index dceb8020383..fab60b251df 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h
index 0fb9a9f9a5a..2b9859f6f4c 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
//
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h b/extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h
index 7aff428dc45..0e6fdb4889d 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/MathFunctions.h b/extern/Eigen3/Eigen/src/Eigen2Support/MathFunctions.h
index 3a8a9ca8146..3544af2538d 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/MathFunctions.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/MathFunctions.h
@@ -12,18 +12,18 @@
namespace Eigen {
-template<typename T> inline typename NumTraits<T>::Real ei_real(const T& x) { return internal::real(x); }
-template<typename T> inline typename NumTraits<T>::Real ei_imag(const T& x) { return internal::imag(x); }
-template<typename T> inline T ei_conj(const T& x) { return internal::conj(x); }
-template<typename T> inline typename NumTraits<T>::Real ei_abs (const T& x) { return internal::abs(x); }
-template<typename T> inline typename NumTraits<T>::Real ei_abs2(const T& x) { return internal::abs2(x); }
-template<typename T> inline T ei_sqrt(const T& x) { return internal::sqrt(x); }
-template<typename T> inline T ei_exp (const T& x) { return internal::exp(x); }
-template<typename T> inline T ei_log (const T& x) { return internal::log(x); }
-template<typename T> inline T ei_sin (const T& x) { return internal::sin(x); }
-template<typename T> inline T ei_cos (const T& x) { return internal::cos(x); }
-template<typename T> inline T ei_atan2(const T& x,const T& y) { return internal::atan2(x,y); }
-template<typename T> inline T ei_pow (const T& x,const T& y) { return internal::pow(x,y); }
+template<typename T> inline typename NumTraits<T>::Real ei_real(const T& x) { return numext::real(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_imag(const T& x) { return numext::imag(x); }
+template<typename T> inline T ei_conj(const T& x) { return numext::conj(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_abs (const T& x) { using std::abs; return abs(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_abs2(const T& x) { return numext::abs2(x); }
+template<typename T> inline T ei_sqrt(const T& x) { using std::sqrt; return sqrt(x); }
+template<typename T> inline T ei_exp (const T& x) { using std::exp; return exp(x); }
+template<typename T> inline T ei_log (const T& x) { using std::log; return log(x); }
+template<typename T> inline T ei_sin (const T& x) { using std::sin; return sin(x); }
+template<typename T> inline T ei_cos (const T& x) { using std::cos; return cos(x); }
+template<typename T> inline T ei_atan2(const T& x,const T& y) { using std::atan2; return atan2(x,y); }
+template<typename T> inline T ei_pow (const T& x,const T& y) { return numext::pow(x,y); }
template<typename T> inline T ei_random () { return internal::random<T>(); }
template<typename T> inline T ei_random (const T& x, const T& y) { return internal::random(x, y); }
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/SVD.h b/extern/Eigen3/Eigen/src/Eigen2Support/SVD.h
index 3d2eeb44586..3d03d2288d8 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/SVD.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/SVD.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
//
@@ -315,7 +315,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
e[p-2] = 0.0;
for (j = p-2; j >= k; --j)
{
- Scalar t(internal::hypot(m_sigma[j],f));
+ Scalar t(numext::hypot(m_sigma[j],f));
Scalar cs(m_sigma[j]/t);
Scalar sn(f/t);
m_sigma[j] = t;
@@ -344,7 +344,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
e[k-1] = 0.0;
for (j = k; j < p; ++j)
{
- Scalar t(internal::hypot(m_sigma[j],f));
+ Scalar t(numext::hypot(m_sigma[j],f));
Scalar cs( m_sigma[j]/t);
Scalar sn(f/t);
m_sigma[j] = t;
@@ -392,7 +392,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
for (j = k; j < p-1; ++j)
{
- Scalar t = internal::hypot(f,g);
+ Scalar t = numext::hypot(f,g);
Scalar cs = f/t;
Scalar sn = g/t;
if (j != k)
@@ -410,7 +410,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
m_matV(i,j) = t;
}
}
- t = internal::hypot(f,g);
+ t = numext::hypot(f,g);
cs = f/t;
sn = g/t;
m_sigma[j] = t;
@@ -512,8 +512,7 @@ template<typename MatrixType>
template<typename OtherDerived, typename ResultType>
bool SVD<MatrixType>::solve(const MatrixBase<OtherDerived> &b, ResultType* result) const
{
- const int rows = m_matU.rows();
- ei_assert(b.rows() == rows);
+ ei_assert(b.rows() == m_matU.rows());
Scalar maxVal = m_sigma.cwise().abs().maxCoeff();
for (int j=0; j<b.cols(); ++j)
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h
index c4b8a308cee..af434bc9bd6 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -3,7 +3,7 @@
//
// Copyright (C) 2009 Claire Maurice
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -220,6 +220,19 @@ template<typename _MatrixType> class ComplexEigenSolver
return m_schur.info();
}
+ /** \brief Sets the maximum number of iterations allowed. */
+ ComplexEigenSolver& setMaxIterations(Index maxIters)
+ {
+ m_schur.setMaxIterations(maxIters);
+ return *this;
+ }
+
+ /** \brief Returns the maximum number of iterations. */
+ Index getMaxIterations()
+ {
+ return m_schur.getMaxIterations();
+ }
+
protected:
EigenvectorType m_eivec;
EigenvalueType m_eivalues;
@@ -229,16 +242,17 @@ template<typename _MatrixType> class ComplexEigenSolver
EigenvectorType m_matX;
private:
- void doComputeEigenvectors(RealScalar matrixnorm);
+ void doComputeEigenvectors(const RealScalar& matrixnorm);
void sortEigenvalues(bool computeEigenvectors);
};
template<typename MatrixType>
-ComplexEigenSolver<MatrixType>& ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
+ComplexEigenSolver<MatrixType>&
+ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
{
// this code is inspired from Jampack
- assert(matrix.cols() == matrix.rows());
+ eigen_assert(matrix.cols() == matrix.rows());
// Do a complex Schur decomposition, A = U T U^*
// The eigenvalues are on the diagonal of T.
@@ -259,7 +273,7 @@ ComplexEigenSolver<MatrixType>& ComplexEigenSolver<MatrixType>::compute(const Ma
template<typename MatrixType>
-void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(RealScalar matrixnorm)
+void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(const RealScalar& matrixnorm)
{
const Index n = m_eivalues.size();
@@ -280,7 +294,7 @@ void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(RealScalar matrixnorm
{
// If the i-th and k-th eigenvalue are equal, then z equals 0.
// Use a small value instead, to prevent division by zero.
- internal::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;
+ numext::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;
}
m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z;
}
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur.h b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur.h
index 16a9a03d219..89e6cade334 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur.h
@@ -3,7 +3,7 @@
//
// Copyright (C) 2009 Claire Maurice
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -96,7 +96,8 @@ template<typename _MatrixType> class ComplexSchur
m_matU(size,size),
m_hess(size),
m_isInitialized(false),
- m_matUisUptodate(false)
+ m_matUisUptodate(false),
+ m_maxIters(-1)
{}
/** \brief Constructor; computes Schur decomposition of given matrix.
@@ -109,11 +110,12 @@ template<typename _MatrixType> class ComplexSchur
* \sa matrixT() and matrixU() for examples.
*/
ComplexSchur(const MatrixType& matrix, bool computeU = true)
- : m_matT(matrix.rows(),matrix.cols()),
- m_matU(matrix.rows(),matrix.cols()),
- m_hess(matrix.rows()),
- m_isInitialized(false),
- m_matUisUptodate(false)
+ : m_matT(matrix.rows(),matrix.cols()),
+ m_matU(matrix.rows(),matrix.cols()),
+ m_hess(matrix.rows()),
+ m_isInitialized(false),
+ m_matUisUptodate(false),
+ m_maxIters(-1)
{
compute(matrix, computeU);
}
@@ -166,6 +168,7 @@ template<typename _MatrixType> class ComplexSchur
*
* \param[in] matrix Square matrix whose Schur decomposition is to be computed.
* \param[in] computeU If true, both T and U are computed; if false, only T is computed.
+
* \returns Reference to \c *this
*
* The Schur decomposition is computed by first reducing the
@@ -180,8 +183,30 @@ template<typename _MatrixType> class ComplexSchur
*
* Example: \include ComplexSchur_compute.cpp
* Output: \verbinclude ComplexSchur_compute.out
+ *
+ * \sa compute(const MatrixType&, bool, Index)
*/
ComplexSchur& compute(const MatrixType& matrix, bool computeU = true);
+
+ /** \brief Compute Schur decomposition from a given Hessenberg matrix
+ * \param[in] matrixH Matrix in Hessenberg form H
+ * \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T
+ * \param computeU Computes the matriX U of the Schur vectors
+ * \return Reference to \c *this
+ *
+ * This routine assumes that the matrix is already reduced in Hessenberg form matrixH
+ * using either the class HessenbergDecomposition or another mean.
+ * It computes the upper quasi-triangular matrix T of the Schur decomposition of H
+ * When computeU is true, this routine computes the matrix U such that
+ * A = U T U^T = (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix
+ *
+ * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix
+ * is not available, the user should give an identity matrix (Q.setIdentity())
+ *
+ * \sa compute(const MatrixType&, bool)
+ */
+ template<typename HessMatrixType, typename OrthMatrixType>
+ ComplexSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU=true);
/** \brief Reports whether previous computation was successful.
*
@@ -189,15 +214,33 @@ template<typename _MatrixType> class ComplexSchur
*/
ComputationInfo info() const
{
- eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+ eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
return m_info;
}
- /** \brief Maximum number of iterations.
+ /** \brief Sets the maximum number of iterations allowed.
*
- * Maximum number of iterations allowed for an eigenvalue to converge.
+ * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
+ * of the matrix.
*/
- static const int m_maxIterations = 30;
+ ComplexSchur& setMaxIterations(Index maxIters)
+ {
+ m_maxIters = maxIters;
+ return *this;
+ }
+
+ /** \brief Returns the maximum number of iterations. */
+ Index getMaxIterations()
+ {
+ return m_maxIters;
+ }
+
+ /** \brief Maximum number of iterations per row.
+ *
+ * If not otherwise specified, the maximum number of iterations is this number times the size of the
+ * matrix. It is currently set to 30.
+ */
+ static const int m_maxIterationsPerRow = 30;
protected:
ComplexMatrixType m_matT, m_matU;
@@ -205,6 +248,7 @@ template<typename _MatrixType> class ComplexSchur
ComputationInfo m_info;
bool m_isInitialized;
bool m_matUisUptodate;
+ Index m_maxIters;
private:
bool subdiagonalEntryIsNeglegible(Index i);
@@ -219,8 +263,8 @@ template<typename _MatrixType> class ComplexSchur
template<typename MatrixType>
inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
{
- RealScalar d = internal::norm1(m_matT.coeff(i,i)) + internal::norm1(m_matT.coeff(i+1,i+1));
- RealScalar sd = internal::norm1(m_matT.coeff(i+1,i));
+ RealScalar d = numext::norm1(m_matT.coeff(i,i)) + numext::norm1(m_matT.coeff(i+1,i+1));
+ RealScalar sd = numext::norm1(m_matT.coeff(i+1,i));
if (internal::isMuchSmallerThan(sd, d, NumTraits<RealScalar>::epsilon()))
{
m_matT.coeffRef(i+1,i) = ComplexScalar(0);
@@ -234,10 +278,11 @@ inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
template<typename MatrixType>
typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter)
{
+ using std::abs;
if (iter == 10 || iter == 20)
{
// exceptional shift, taken from http://www.netlib.org/eispack/comqr.f
- return internal::abs(internal::real(m_matT.coeff(iu,iu-1))) + internal::abs(internal::real(m_matT.coeff(iu-1,iu-2)));
+ return abs(numext::real(m_matT.coeff(iu,iu-1))) + abs(numext::real(m_matT.coeff(iu-1,iu-2)));
}
// compute the shift as one of the eigenvalues of t, the 2x2
@@ -254,13 +299,13 @@ typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::compu
ComplexScalar eival1 = (trace + disc) / RealScalar(2);
ComplexScalar eival2 = (trace - disc) / RealScalar(2);
- if(internal::norm1(eival1) > internal::norm1(eival2))
+ if(numext::norm1(eival1) > numext::norm1(eival2))
eival2 = det / eival1;
else
eival1 = det / eival2;
// choose the eigenvalue closest to the bottom entry of the diagonal
- if(internal::norm1(eival1-t.coeff(1,1)) < internal::norm1(eival2-t.coeff(1,1)))
+ if(numext::norm1(eival1-t.coeff(1,1)) < numext::norm1(eival2-t.coeff(1,1)))
return normt * eival1;
else
return normt * eival2;
@@ -284,10 +329,20 @@ ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::compute(const MatrixType& ma
}
internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>::run(*this, matrix, computeU);
- reduceToTriangularForm(computeU);
+ computeFromHessenberg(m_matT, m_matU, computeU);
return *this;
}
+template<typename MatrixType>
+template<typename HessMatrixType, typename OrthMatrixType>
+ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU)
+{
+ m_matT = matrixH;
+ if(computeU)
+ m_matU = matrixQ;
+ reduceToTriangularForm(computeU);
+ return *this;
+}
namespace internal {
/* Reduce given matrix to Hessenberg form */
@@ -309,7 +364,6 @@ struct complex_schur_reduce_to_hessenberg<MatrixType, false>
static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
{
typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
- typedef typename ComplexSchur<MatrixType>::ComplexMatrixType ComplexMatrixType;
// Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar
_this.m_hess.compute(matrix);
@@ -329,6 +383,10 @@ struct complex_schur_reduce_to_hessenberg<MatrixType, false>
template<typename MatrixType>
void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
{
+ Index maxIters = m_maxIters;
+ if (maxIters == -1)
+ maxIters = m_maxIterationsPerRow * m_matT.rows();
+
// The matrix m_matT is divided in three parts.
// Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
// Rows il,...,iu is the part we are working on (the active submatrix).
@@ -336,6 +394,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
Index iu = m_matT.cols() - 1;
Index il;
Index iter = 0; // number of iterations we are working on the (iu,iu) element
+ Index totalIter = 0; // number of iterations for whole matrix
while(true)
{
@@ -350,9 +409,10 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
// if iu is zero then we are done; the whole matrix is triangularized
if(iu==0) break;
- // if we spent too many iterations on the current element, we give up
+ // if we spent too many iterations, we give up
iter++;
- if(iter > m_maxIterations * m_matT.cols()) break;
+ totalIter++;
+ if(totalIter > maxIters) break;
// find il, the top row of the active submatrix
il = iu-1;
@@ -382,7 +442,7 @@ void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
}
}
- if(iter <= m_maxIterations * m_matT.cols())
+ if(totalIter <= maxIters)
m_info = Success;
else
m_info = NoConvergence;
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur_MKL.h b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur_MKL.h
index aa18e696352..91496ae5bdb 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur_MKL.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur_MKL.h
@@ -40,7 +40,7 @@ namespace Eigen {
/** \internal Specialization for the data types supported by MKL */
#define EIGEN_MKL_SCHUR_COMPLEX(EIGTYPE, MKLTYPE, MKLPREFIX, MKLPREFIX_U, EIGCOLROW, MKLCOLROW) \
-template<> inline\
+template<> inline \
ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
{ \
@@ -49,7 +49,7 @@ ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matri
typedef MatrixType::RealScalar RealScalar; \
typedef std::complex<RealScalar> ComplexScalar; \
\
- assert(matrix.cols() == matrix.rows()); \
+ eigen_assert(matrix.cols() == matrix.rows()); \
\
m_matUisUptodate = false; \
if(matrix.cols() == 1) \
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h
index c16ff2b74e2..6e7150685a2 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h
@@ -2,7 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -281,6 +281,19 @@ template<typename _MatrixType> class EigenSolver
return m_realSchur.info();
}
+ /** \brief Sets the maximum number of iterations allowed. */
+ EigenSolver& setMaxIterations(Index maxIters)
+ {
+ m_realSchur.setMaxIterations(maxIters);
+ return *this;
+ }
+
+ /** \brief Returns the maximum number of iterations. */
+ Index getMaxIterations()
+ {
+ return m_realSchur.getMaxIterations();
+ }
+
private:
void doComputeEigenvectors();
@@ -304,12 +317,12 @@ MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
MatrixType matD = MatrixType::Zero(n,n);
for (Index i=0; i<n; ++i)
{
- if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i))))
- matD.coeffRef(i,i) = internal::real(m_eivalues.coeff(i));
+ if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i))))
+ matD.coeffRef(i,i) = numext::real(m_eivalues.coeff(i));
else
{
- matD.template block<2,2>(i,i) << internal::real(m_eivalues.coeff(i)), internal::imag(m_eivalues.coeff(i)),
- -internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i));
+ matD.template block<2,2>(i,i) << numext::real(m_eivalues.coeff(i)), numext::imag(m_eivalues.coeff(i)),
+ -numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i));
++i;
}
}
@@ -325,7 +338,7 @@ typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eige
EigenvectorsType matV(n,n);
for (Index j=0; j<n; ++j)
{
- if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(j)), internal::real(m_eivalues.coeff(j))) || j+1==n)
+ if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(j)), numext::real(m_eivalues.coeff(j))) || j+1==n)
{
// we have a real eigen value
matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
@@ -348,12 +361,16 @@ typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eige
}
template<typename MatrixType>
-EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
+EigenSolver<MatrixType>&
+EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
{
- assert(matrix.cols() == matrix.rows());
+ using std::sqrt;
+ using std::abs;
+ eigen_assert(matrix.cols() == matrix.rows());
// Reduce to real Schur form.
m_realSchur.compute(matrix, computeEigenvectors);
+
if (m_realSchur.info() == Success)
{
m_matT = m_realSchur.matrixT();
@@ -373,7 +390,7 @@ EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matr
else
{
Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
- Scalar z = internal::sqrt(internal::abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
+ Scalar z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
i += 2;
@@ -393,10 +410,11 @@ EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matr
// Complex scalar division.
template<typename Scalar>
-std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
+std::complex<Scalar> cdiv(const Scalar& xr, const Scalar& xi, const Scalar& yr, const Scalar& yi)
{
+ using std::abs;
Scalar r,d;
- if (internal::abs(yr) > internal::abs(yi))
+ if (abs(yr) > abs(yi))
{
r = yi/yr;
d = yr + r*yi;
@@ -414,6 +432,7 @@ std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
template<typename MatrixType>
void EigenSolver<MatrixType>::doComputeEigenvectors()
{
+ using std::abs;
const Index size = m_eivec.cols();
const Scalar eps = NumTraits<Scalar>::epsilon();
@@ -469,14 +488,14 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
Scalar t = (x * lastr - lastw * r) / denom;
m_matT.coeffRef(i,n) = t;
- if (internal::abs(x) > internal::abs(lastw))
+ if (abs(x) > abs(lastw))
m_matT.coeffRef(i+1,n) = (-r - w * t) / x;
else
m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw;
}
// Overflow control
- Scalar t = internal::abs(m_matT.coeff(i,n));
+ Scalar t = abs(m_matT.coeff(i,n));
if ((eps * t) * t > Scalar(1))
m_matT.col(n).tail(size-i) /= t;
}
@@ -488,7 +507,7 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
Index l = n-1;
// Last vector component imaginary so matrix is triangular
- if (internal::abs(m_matT.coeff(n,n-1)) > internal::abs(m_matT.coeff(n-1,n)))
+ if (abs(m_matT.coeff(n,n-1)) > abs(m_matT.coeff(n-1,n)))
{
m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1);
m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1);
@@ -496,8 +515,8 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
else
{
std::complex<Scalar> cc = cdiv<Scalar>(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q);
- m_matT.coeffRef(n-1,n-1) = internal::real(cc);
- m_matT.coeffRef(n-1,n) = internal::imag(cc);
+ m_matT.coeffRef(n-1,n-1) = numext::real(cc);
+ m_matT.coeffRef(n-1,n) = numext::imag(cc);
}
m_matT.coeffRef(n,n-1) = 0.0;
m_matT.coeffRef(n,n) = 1.0;
@@ -519,8 +538,8 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
if (m_eivalues.coeff(i).imag() == RealScalar(0))
{
std::complex<Scalar> cc = cdiv(-ra,-sa,w,q);
- m_matT.coeffRef(i,n-1) = internal::real(cc);
- m_matT.coeffRef(i,n) = internal::imag(cc);
+ m_matT.coeffRef(i,n-1) = numext::real(cc);
+ m_matT.coeffRef(i,n) = numext::imag(cc);
}
else
{
@@ -530,12 +549,12 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
if ((vr == 0.0) && (vi == 0.0))
- vr = eps * norm * (internal::abs(w) + internal::abs(q) + internal::abs(x) + internal::abs(y) + internal::abs(lastw));
+ vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw));
- std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi);
- m_matT.coeffRef(i,n-1) = internal::real(cc);
- m_matT.coeffRef(i,n) = internal::imag(cc);
- if (internal::abs(x) > (internal::abs(lastw) + internal::abs(q)))
+ std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi);
+ m_matT.coeffRef(i,n-1) = numext::real(cc);
+ m_matT.coeffRef(i,n) = numext::imag(cc);
+ if (abs(x) > (abs(lastw) + abs(q)))
{
m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x;
m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x;
@@ -543,14 +562,14 @@ void EigenSolver<MatrixType>::doComputeEigenvectors()
else
{
cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q);
- m_matT.coeffRef(i+1,n-1) = internal::real(cc);
- m_matT.coeffRef(i+1,n) = internal::imag(cc);
+ m_matT.coeffRef(i+1,n-1) = numext::real(cc);
+ m_matT.coeffRef(i+1,n) = numext::imag(cc);
}
}
// Overflow control
using std::max;
- Scalar t = (max)(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
+ Scalar t = (max)(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n)));
if ((eps * t) * t > Scalar(1))
m_matT.block(i, n-1, size-i, 2) /= t;
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
new file mode 100644
index 00000000000..dc240e13e13
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
@@ -0,0 +1,341 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERALIZEDEIGENSOLVER_H
+#define EIGEN_GENERALIZEDEIGENSOLVER_H
+
+#include "./RealQZ.h"
+
+namespace Eigen {
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \class GeneralizedEigenSolver
+ *
+ * \brief Computes the generalized eigenvalues and eigenvectors of a pair of general matrices
+ *
+ * \tparam _MatrixType the type of the matrices of which we are computing the
+ * eigen-decomposition; this is expected to be an instantiation of the Matrix
+ * class template. Currently, only real matrices are supported.
+ *
+ * The generalized eigenvalues and eigenvectors of a matrix pair \f$ A \f$ and \f$ B \f$ are scalars
+ * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda Bv \f$. If
+ * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
+ * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
+ * B V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
+ * have \f$ A = B V D V^{-1} \f$. This is called the generalized eigen-decomposition.
+ *
+ * The generalized eigenvalues and eigenvectors of a matrix pair may be complex, even when the
+ * matrices are real. Moreover, the generalized eigenvalue might be infinite if the matrix B is
+ * singular. To workaround this difficulty, the eigenvalues are provided as a pair of complex \f$ \alpha \f$
+ * and real \f$ \beta \f$ such that: \f$ \lambda_i = \alpha_i / \beta_i \f$. If \f$ \beta_i \f$ is (nearly) zero,
+ * then one can consider the well defined left eigenvalue \f$ \mu = \beta_i / \alpha_i\f$ such that:
+ * \f$ \mu_i A v_i = B v_i \f$, or even \f$ \mu_i u_i^T A = u_i^T B \f$ where \f$ u_i \f$ is
+ * called the left eigenvector.
+ *
+ * Call the function compute() to compute the generalized eigenvalues and eigenvectors of
+ * a given matrix pair. Alternatively, you can use the
+ * GeneralizedEigenSolver(const MatrixType&, const MatrixType&, bool) constructor which computes the
+ * eigenvalues and eigenvectors at construction time. Once the eigenvalue and
+ * eigenvectors are computed, they can be retrieved with the eigenvalues() and
+ * eigenvectors() functions.
+ *
+ * Here is an usage example of this class:
+ * Example: \include GeneralizedEigenSolver.cpp
+ * Output: \verbinclude GeneralizedEigenSolver.out
+ *
+ * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
+ */
+template<typename _MatrixType> class GeneralizedEigenSolver
+{
+ public:
+
+ /** \brief Synonym for the template parameter \p _MatrixType. */
+ typedef _MatrixType MatrixType;
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+
+ /** \brief Scalar type for matrices of type #MatrixType. */
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ /** \brief Complex scalar type for #MatrixType.
+ *
+ * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+ * \c float or \c double) and just \c Scalar if #Scalar is
+ * complex.
+ */
+ typedef std::complex<RealScalar> ComplexScalar;
+
+ /** \brief Type for vector of real scalar values eigenvalues as returned by betas().
+ *
+ * This is a column vector with entries of type #Scalar.
+ * The length of the vector is the size of #MatrixType.
+ */
+ typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> VectorType;
+
+ /** \brief Type for vector of complex scalar values eigenvalues as returned by betas().
+ *
+ * This is a column vector with entries of type #ComplexScalar.
+ * The length of the vector is the size of #MatrixType.
+ */
+ typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ComplexVectorType;
+
+ /** \brief Expression type for the eigenvalues as returned by eigenvalues().
+ */
+ typedef CwiseBinaryOp<internal::scalar_quotient_op<ComplexScalar,Scalar>,ComplexVectorType,VectorType> EigenvalueType;
+
+ /** \brief Type for matrix of eigenvectors as returned by eigenvectors().
+ *
+ * This is a square matrix with entries of type #ComplexScalar.
+ * The size is the same as the size of #MatrixType.
+ */
+ typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
+
+ /** \brief Default constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via EigenSolver::compute(const MatrixType&, bool).
+ *
+ * \sa compute() for an example.
+ */
+ GeneralizedEigenSolver() : m_eivec(), m_alphas(), m_betas(), m_isInitialized(false), m_realQZ(), m_matS(), m_tmp() {}
+
+ /** \brief Default constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem \a size.
+ * \sa GeneralizedEigenSolver()
+ */
+ GeneralizedEigenSolver(Index size)
+ : m_eivec(size, size),
+ m_alphas(size),
+ m_betas(size),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_realQZ(size),
+ m_matS(size, size),
+ m_tmp(size)
+ {}
+
+ /** \brief Constructor; computes the generalized eigendecomposition of given matrix pair.
+ *
+ * \param[in] A Square matrix whose eigendecomposition is to be computed.
+ * \param[in] B Square matrix whose eigendecomposition is to be computed.
+ * \param[in] computeEigenvectors If true, both the eigenvectors and the
+ * eigenvalues are computed; if false, only the eigenvalues are computed.
+ *
+ * This constructor calls compute() to compute the generalized eigenvalues
+ * and eigenvectors.
+ *
+ * \sa compute()
+ */
+ GeneralizedEigenSolver(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true)
+ : m_eivec(A.rows(), A.cols()),
+ m_alphas(A.cols()),
+ m_betas(A.cols()),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_realQZ(A.cols()),
+ m_matS(A.rows(), A.cols()),
+ m_tmp(A.cols())
+ {
+ compute(A, B, computeEigenvectors);
+ }
+
+ /* \brief Returns the computed generalized eigenvectors.
+ *
+ * \returns %Matrix whose columns are the (possibly complex) eigenvectors.
+ *
+ * \pre Either the constructor
+ * GeneralizedEigenSolver(const MatrixType&,const MatrixType&, bool) or the member function
+ * compute(const MatrixType&, const MatrixType& bool) has been called before, and
+ * \p computeEigenvectors was set to true (the default).
+ *
+ * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+ * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
+ * eigenvectors are normalized to have (Euclidean) norm equal to one. The
+ * matrix returned by this function is the matrix \f$ V \f$ in the
+ * generalized eigendecomposition \f$ A = B V D V^{-1} \f$, if it exists.
+ *
+ * \sa eigenvalues()
+ */
+// EigenvectorsType eigenvectors() const;
+
+ /** \brief Returns an expression of the computed generalized eigenvalues.
+ *
+ * \returns An expression of the column vector containing the eigenvalues.
+ *
+ * It is a shortcut for \code this->alphas().cwiseQuotient(this->betas()); \endcode
+ * Not that betas might contain zeros. It is therefore not recommended to use this function,
+ * but rather directly deal with the alphas and betas vectors.
+ *
+ * \pre Either the constructor
+ * GeneralizedEigenSolver(const MatrixType&,const MatrixType&,bool) or the member function
+ * compute(const MatrixType&,const MatrixType&,bool) has been called before.
+ *
+ * The eigenvalues are repeated according to their algebraic multiplicity,
+ * so there are as many eigenvalues as rows in the matrix. The eigenvalues
+ * are not sorted in any particular order.
+ *
+ * \sa alphas(), betas(), eigenvectors()
+ */
+ EigenvalueType eigenvalues() const
+ {
+ eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+ return EigenvalueType(m_alphas,m_betas);
+ }
+
+ /** \returns A const reference to the vectors containing the alpha values
+ *
+ * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
+ *
+ * \sa betas(), eigenvalues() */
+ ComplexVectorType alphas() const
+ {
+ eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+ return m_alphas;
+ }
+
+ /** \returns A const reference to the vectors containing the beta values
+ *
+ * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
+ *
+ * \sa alphas(), eigenvalues() */
+ VectorType betas() const
+ {
+ eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+ return m_betas;
+ }
+
+ /** \brief Computes generalized eigendecomposition of given matrix.
+ *
+ * \param[in] A Square matrix whose eigendecomposition is to be computed.
+ * \param[in] B Square matrix whose eigendecomposition is to be computed.
+ * \param[in] computeEigenvectors If true, both the eigenvectors and the
+ * eigenvalues are computed; if false, only the eigenvalues are
+ * computed.
+ * \returns Reference to \c *this
+ *
+ * This function computes the eigenvalues of the real matrix \p matrix.
+ * The eigenvalues() function can be used to retrieve them. If
+ * \p computeEigenvectors is true, then the eigenvectors are also computed
+ * and can be retrieved by calling eigenvectors().
+ *
+ * The matrix is first reduced to real generalized Schur form using the RealQZ
+ * class. The generalized Schur decomposition is then used to compute the eigenvalues
+ * and eigenvectors.
+ *
+ * The cost of the computation is dominated by the cost of the
+ * generalized Schur decomposition.
+ *
+ * This method reuses of the allocated data in the GeneralizedEigenSolver object.
+ */
+ GeneralizedEigenSolver& compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true);
+
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+ return m_realQZ.info();
+ }
+
+ /** Sets the maximal number of iterations allowed.
+ */
+ GeneralizedEigenSolver& setMaxIterations(Index maxIters)
+ {
+ m_realQZ.setMaxIterations(maxIters);
+ return *this;
+ }
+
+ protected:
+ MatrixType m_eivec;
+ ComplexVectorType m_alphas;
+ VectorType m_betas;
+ bool m_isInitialized;
+ bool m_eigenvectorsOk;
+ RealQZ<MatrixType> m_realQZ;
+ MatrixType m_matS;
+
+ typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+ ColumnVectorType m_tmp;
+};
+
+//template<typename MatrixType>
+//typename GeneralizedEigenSolver<MatrixType>::EigenvectorsType GeneralizedEigenSolver<MatrixType>::eigenvectors() const
+//{
+// eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+// eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+// Index n = m_eivec.cols();
+// EigenvectorsType matV(n,n);
+// // TODO
+// return matV;
+//}
+
+template<typename MatrixType>
+GeneralizedEigenSolver<MatrixType>&
+GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
+{
+ using std::sqrt;
+ using std::abs;
+ eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
+
+ // Reduce to generalized real Schur form:
+ // A = Q S Z and B = Q T Z
+ m_realQZ.compute(A, B, computeEigenvectors);
+
+ if (m_realQZ.info() == Success)
+ {
+ m_matS = m_realQZ.matrixS();
+ if (computeEigenvectors)
+ m_eivec = m_realQZ.matrixZ().transpose();
+
+ // Compute eigenvalues from matS
+ m_alphas.resize(A.cols());
+ m_betas.resize(A.cols());
+ Index i = 0;
+ while (i < A.cols())
+ {
+ if (i == A.cols() - 1 || m_matS.coeff(i+1, i) == Scalar(0))
+ {
+ m_alphas.coeffRef(i) = m_matS.coeff(i, i);
+ m_betas.coeffRef(i) = m_realQZ.matrixT().coeff(i,i);
+ ++i;
+ }
+ else
+ {
+ Scalar p = Scalar(0.5) * (m_matS.coeff(i, i) - m_matS.coeff(i+1, i+1));
+ Scalar z = sqrt(abs(p * p + m_matS.coeff(i+1, i) * m_matS.coeff(i, i+1)));
+ m_alphas.coeffRef(i) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, z);
+ m_alphas.coeffRef(i+1) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, -z);
+
+ m_betas.coeffRef(i) = m_realQZ.matrixT().coeff(i,i);
+ m_betas.coeffRef(i+1) = m_realQZ.matrixT().coeff(i,i);
+ i += 2;
+ }
+ }
+ }
+
+ m_isInitialized = true;
+ m_eigenvectorsOk = false;//computeEigenvectors;
+
+ return *this;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERALIZEDEIGENSOLVER_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/extern/Eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h
index b8378b08a09..3db0c0106c9 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h
@@ -82,7 +82,7 @@ template<typename _MatrixType> class HessenbergDecomposition
typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
/** \brief Return type of matrixQ() */
- typedef typename HouseholderSequence<MatrixType,CoeffVectorType>::ConjugateReturnType HouseholderSequenceType;
+ typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
typedef internal::HessenbergDecompositionMatrixHReturnType<MatrixType> MatrixHReturnType;
@@ -291,7 +291,7 @@ template<typename _MatrixType> class HessenbergDecomposition
template<typename MatrixType>
void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp)
{
- assert(matA.rows()==matA.cols());
+ eigen_assert(matA.rows()==matA.cols());
Index n = matA.rows();
temp.resize(n);
for (Index i = 0; i<n-1; ++i)
@@ -313,7 +313,7 @@ void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVector
// A = A H'
matA.rightCols(remainingSize)
- .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), internal::conj(h), &temp.coeffRef(0));
+ .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), numext::conj(h), &temp.coeffRef(0));
}
}
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/extern/Eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
index 6af481c75f6..4fec8af0a3e 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
@@ -121,10 +121,11 @@ template<typename Derived>
inline typename MatrixBase<Derived>::RealScalar
MatrixBase<Derived>::operatorNorm() const
{
+ using std::sqrt;
typename Derived::PlainObject m_eval(derived());
// FIXME if it is really guaranteed that the eigenvalues are already sorted,
// then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
- return internal::sqrt((m_eval*m_eval.adjoint())
+ return sqrt((m_eval*m_eval.adjoint())
.eval()
.template selfadjointView<Lower>()
.eigenvalues()
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/RealQZ.h b/extern/Eigen3/Eigen/src/Eigenvalues/RealQZ.h
new file mode 100644
index 00000000000..5706eeebe91
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/RealQZ.h
@@ -0,0 +1,624 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Alexey Korepanov <kaikaikai@yandex.ru>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REAL_QZ_H
+#define EIGEN_REAL_QZ_H
+
+namespace Eigen {
+
+ /** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \class RealQZ
+ *
+ * \brief Performs a real QZ decomposition of a pair of square matrices
+ *
+ * \tparam _MatrixType the type of the matrix of which we are computing the
+ * real QZ decomposition; this is expected to be an instantiation of the
+ * Matrix class template.
+ *
+ * Given a real square matrices A and B, this class computes the real QZ
+ * decomposition: \f$ A = Q S Z \f$, \f$ B = Q T Z \f$ where Q and Z are
+ * real orthogonal matrixes, T is upper-triangular matrix, and S is upper
+ * quasi-triangular matrix. An orthogonal matrix is a matrix whose
+ * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
+ * matrix is a block-triangular matrix whose diagonal consists of 1-by-1
+ * blocks and 2-by-2 blocks where further reduction is impossible due to
+ * complex eigenvalues.
+ *
+ * The eigenvalues of the pencil \f$ A - z B \f$ can be obtained from
+ * 1x1 and 2x2 blocks on the diagonals of S and T.
+ *
+ * Call the function compute() to compute the real QZ decomposition of a
+ * given pair of matrices. Alternatively, you can use the
+ * RealQZ(const MatrixType& B, const MatrixType& B, bool computeQZ)
+ * constructor which computes the real QZ decomposition at construction
+ * time. Once the decomposition is computed, you can use the matrixS(),
+ * matrixT(), matrixQ() and matrixZ() functions to retrieve the matrices
+ * S, T, Q and Z in the decomposition. If computeQZ==false, some time
+ * is saved by not computing matrices Q and Z.
+ *
+ * Example: \include RealQZ_compute.cpp
+ * Output: \include RealQZ_compute.out
+ *
+ * \note The implementation is based on the algorithm in "Matrix Computations"
+ * by Gene H. Golub and Charles F. Van Loan, and a paper "An algorithm for
+ * generalized eigenvalue problems" by C.B.Moler and G.W.Stewart.
+ *
+ * \sa class RealSchur, class ComplexSchur, class EigenSolver, class ComplexEigenSolver
+ */
+
+ template<typename _MatrixType> class RealQZ
+ {
+ public:
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+ typedef typename MatrixType::Index Index;
+
+ typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
+ typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+
+ /** \brief Default constructor.
+ *
+ * \param [in] size Positive integer, size of the matrix whose QZ decomposition will be computed.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via compute(). The \p size parameter is only
+ * used as a hint. It is not an error to give a wrong \p size, but it may
+ * impair performance.
+ *
+ * \sa compute() for an example.
+ */
+ RealQZ(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) :
+ m_S(size, size),
+ m_T(size, size),
+ m_Q(size, size),
+ m_Z(size, size),
+ m_workspace(size*2),
+ m_maxIters(400),
+ m_isInitialized(false)
+ { }
+
+ /** \brief Constructor; computes real QZ decomposition of given matrices
+ *
+ * \param[in] A Matrix A.
+ * \param[in] B Matrix B.
+ * \param[in] computeQZ If false, A and Z are not computed.
+ *
+ * This constructor calls compute() to compute the QZ decomposition.
+ */
+ RealQZ(const MatrixType& A, const MatrixType& B, bool computeQZ = true) :
+ m_S(A.rows(),A.cols()),
+ m_T(A.rows(),A.cols()),
+ m_Q(A.rows(),A.cols()),
+ m_Z(A.rows(),A.cols()),
+ m_workspace(A.rows()*2),
+ m_maxIters(400),
+ m_isInitialized(false) {
+ compute(A, B, computeQZ);
+ }
+
+ /** \brief Returns matrix Q in the QZ decomposition.
+ *
+ * \returns A const reference to the matrix Q.
+ */
+ const MatrixType& matrixQ() const {
+ eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+ eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
+ return m_Q;
+ }
+
+ /** \brief Returns matrix Z in the QZ decomposition.
+ *
+ * \returns A const reference to the matrix Z.
+ */
+ const MatrixType& matrixZ() const {
+ eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+ eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
+ return m_Z;
+ }
+
+ /** \brief Returns matrix S in the QZ decomposition.
+ *
+ * \returns A const reference to the matrix S.
+ */
+ const MatrixType& matrixS() const {
+ eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+ return m_S;
+ }
+
+ /** \brief Returns matrix S in the QZ decomposition.
+ *
+ * \returns A const reference to the matrix S.
+ */
+ const MatrixType& matrixT() const {
+ eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+ return m_T;
+ }
+
+ /** \brief Computes QZ decomposition of given matrix.
+ *
+ * \param[in] A Matrix A.
+ * \param[in] B Matrix B.
+ * \param[in] computeQZ If false, A and Z are not computed.
+ * \returns Reference to \c *this
+ */
+ RealQZ& compute(const MatrixType& A, const MatrixType& B, bool computeQZ = true);
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+ return m_info;
+ }
+
+ /** \brief Returns number of performed QR-like iterations.
+ */
+ Index iterations() const
+ {
+ eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+ return m_global_iter;
+ }
+
+ /** Sets the maximal number of iterations allowed to converge to one eigenvalue
+ * or decouple the problem.
+ */
+ RealQZ& setMaxIterations(Index maxIters)
+ {
+ m_maxIters = maxIters;
+ return *this;
+ }
+
+ private:
+
+ MatrixType m_S, m_T, m_Q, m_Z;
+ Matrix<Scalar,Dynamic,1> m_workspace;
+ ComputationInfo m_info;
+ Index m_maxIters;
+ bool m_isInitialized;
+ bool m_computeQZ;
+ Scalar m_normOfT, m_normOfS;
+ Index m_global_iter;
+
+ typedef Matrix<Scalar,3,1> Vector3s;
+ typedef Matrix<Scalar,2,1> Vector2s;
+ typedef Matrix<Scalar,2,2> Matrix2s;
+ typedef JacobiRotation<Scalar> JRs;
+
+ void hessenbergTriangular();
+ void computeNorms();
+ Index findSmallSubdiagEntry(Index iu);
+ Index findSmallDiagEntry(Index f, Index l);
+ void splitOffTwoRows(Index i);
+ void pushDownZero(Index z, Index f, Index l);
+ void step(Index f, Index l, Index iter);
+
+ }; // RealQZ
+
+ /** \internal Reduces S and T to upper Hessenberg - triangular form */
+ template<typename MatrixType>
+ void RealQZ<MatrixType>::hessenbergTriangular()
+ {
+
+ const Index dim = m_S.cols();
+
+ // perform QR decomposition of T, overwrite T with R, save Q
+ HouseholderQR<MatrixType> qrT(m_T);
+ m_T = qrT.matrixQR();
+ m_T.template triangularView<StrictlyLower>().setZero();
+ m_Q = qrT.householderQ();
+ // overwrite S with Q* S
+ m_S.applyOnTheLeft(m_Q.adjoint());
+ // init Z as Identity
+ if (m_computeQZ)
+ m_Z = MatrixType::Identity(dim,dim);
+ // reduce S to upper Hessenberg with Givens rotations
+ for (Index j=0; j<=dim-3; j++) {
+ for (Index i=dim-1; i>=j+2; i--) {
+ JRs G;
+ // kill S(i,j)
+ if(m_S.coeff(i,j) != 0)
+ {
+ G.makeGivens(m_S.coeff(i-1,j), m_S.coeff(i,j), &m_S.coeffRef(i-1, j));
+ m_S.coeffRef(i,j) = Scalar(0.0);
+ m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
+ m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
+ }
+ // update Q
+ if (m_computeQZ)
+ m_Q.applyOnTheRight(i-1,i,G);
+ // kill T(i,i-1)
+ if(m_T.coeff(i,i-1)!=Scalar(0))
+ {
+ G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i));
+ m_T.coeffRef(i,i-1) = Scalar(0.0);
+ m_S.applyOnTheRight(i,i-1,G);
+ m_T.topRows(i).applyOnTheRight(i,i-1,G);
+ }
+ // update Z
+ if (m_computeQZ)
+ m_Z.applyOnTheLeft(i,i-1,G.adjoint());
+ }
+ }
+ }
+
+ /** \internal Computes vector L1 norms of S and T when in Hessenberg-Triangular form already */
+ template<typename MatrixType>
+ inline void RealQZ<MatrixType>::computeNorms()
+ {
+ const Index size = m_S.cols();
+ m_normOfS = Scalar(0.0);
+ m_normOfT = Scalar(0.0);
+ for (Index j = 0; j < size; ++j)
+ {
+ m_normOfS += m_S.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
+ m_normOfT += m_T.row(j).segment(j, size - j).cwiseAbs().sum();
+ }
+ }
+
+
+ /** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */
+ template<typename MatrixType>
+ inline typename MatrixType::Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu)
+ {
+ using std::abs;
+ Index res = iu;
+ while (res > 0)
+ {
+ Scalar s = abs(m_S.coeff(res-1,res-1)) + abs(m_S.coeff(res,res));
+ if (s == Scalar(0.0))
+ s = m_normOfS;
+ if (abs(m_S.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
+ break;
+ res--;
+ }
+ return res;
+ }
+
+ /** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1) */
+ template<typename MatrixType>
+ inline typename MatrixType::Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l)
+ {
+ using std::abs;
+ Index res = l;
+ while (res >= f) {
+ if (abs(m_T.coeff(res,res)) <= NumTraits<Scalar>::epsilon() * m_normOfT)
+ break;
+ res--;
+ }
+ return res;
+ }
+
+ /** \internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */
+ template<typename MatrixType>
+ inline void RealQZ<MatrixType>::splitOffTwoRows(Index i)
+ {
+ using std::abs;
+ using std::sqrt;
+ const Index dim=m_S.cols();
+ if (abs(m_S.coeff(i+1,i)==Scalar(0)))
+ return;
+ Index z = findSmallDiagEntry(i,i+1);
+ if (z==i-1)
+ {
+ // block of (S T^{-1})
+ Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView<Upper>().
+ template solve<OnTheRight>(m_S.template block<2,2>(i,i));
+ Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1));
+ Scalar q = p*p + STi(1,0)*STi(0,1);
+ if (q>=0) {
+ Scalar z = sqrt(q);
+ // one QR-like iteration for ABi - lambda I
+ // is enough - when we know exact eigenvalue in advance,
+ // convergence is immediate
+ JRs G;
+ if (p>=0)
+ G.makeGivens(p + z, STi(1,0));
+ else
+ G.makeGivens(p - z, STi(1,0));
+ m_S.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
+ m_T.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
+ // update Q
+ if (m_computeQZ)
+ m_Q.applyOnTheRight(i,i+1,G);
+
+ G.makeGivens(m_T.coeff(i+1,i+1), m_T.coeff(i+1,i));
+ m_S.topRows(i+2).applyOnTheRight(i+1,i,G);
+ m_T.topRows(i+2).applyOnTheRight(i+1,i,G);
+ // update Z
+ if (m_computeQZ)
+ m_Z.applyOnTheLeft(i+1,i,G.adjoint());
+
+ m_S.coeffRef(i+1,i) = Scalar(0.0);
+ m_T.coeffRef(i+1,i) = Scalar(0.0);
+ }
+ }
+ else
+ {
+ pushDownZero(z,i,i+1);
+ }
+ }
+
+ /** \internal use zero in T(z,z) to zero S(l,l-1), working in block f..l */
+ template<typename MatrixType>
+ inline void RealQZ<MatrixType>::pushDownZero(Index z, Index f, Index l)
+ {
+ JRs G;
+ const Index dim = m_S.cols();
+ for (Index zz=z; zz<l; zz++)
+ {
+ // push 0 down
+ Index firstColS = zz>f ? (zz-1) : zz;
+ G.makeGivens(m_T.coeff(zz, zz+1), m_T.coeff(zz+1, zz+1));
+ m_S.rightCols(dim-firstColS).applyOnTheLeft(zz,zz+1,G.adjoint());
+ m_T.rightCols(dim-zz).applyOnTheLeft(zz,zz+1,G.adjoint());
+ m_T.coeffRef(zz+1,zz+1) = Scalar(0.0);
+ // update Q
+ if (m_computeQZ)
+ m_Q.applyOnTheRight(zz,zz+1,G);
+ // kill S(zz+1, zz-1)
+ if (zz>f)
+ {
+ G.makeGivens(m_S.coeff(zz+1, zz), m_S.coeff(zz+1,zz-1));
+ m_S.topRows(zz+2).applyOnTheRight(zz, zz-1,G);
+ m_T.topRows(zz+1).applyOnTheRight(zz, zz-1,G);
+ m_S.coeffRef(zz+1,zz-1) = Scalar(0.0);
+ // update Z
+ if (m_computeQZ)
+ m_Z.applyOnTheLeft(zz,zz-1,G.adjoint());
+ }
+ }
+ // finally kill S(l,l-1)
+ G.makeGivens(m_S.coeff(l,l), m_S.coeff(l,l-1));
+ m_S.applyOnTheRight(l,l-1,G);
+ m_T.applyOnTheRight(l,l-1,G);
+ m_S.coeffRef(l,l-1)=Scalar(0.0);
+ // update Z
+ if (m_computeQZ)
+ m_Z.applyOnTheLeft(l,l-1,G.adjoint());
+ }
+
+ /** \internal QR-like iterative step for block f..l */
+ template<typename MatrixType>
+ inline void RealQZ<MatrixType>::step(Index f, Index l, Index iter)
+ {
+ using std::abs;
+ const Index dim = m_S.cols();
+
+ // x, y, z
+ Scalar x, y, z;
+ if (iter==10)
+ {
+ // Wilkinson ad hoc shift
+ const Scalar
+ a11=m_S.coeff(f+0,f+0), a12=m_S.coeff(f+0,f+1),
+ a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1),
+ b12=m_T.coeff(f+0,f+1),
+ b11i=Scalar(1.0)/m_T.coeff(f+0,f+0),
+ b22i=Scalar(1.0)/m_T.coeff(f+1,f+1),
+ a87=m_S.coeff(l-1,l-2),
+ a98=m_S.coeff(l-0,l-1),
+ b77i=Scalar(1.0)/m_T.coeff(l-2,l-2),
+ b88i=Scalar(1.0)/m_T.coeff(l-1,l-1);
+ Scalar ss = abs(a87*b77i) + abs(a98*b88i),
+ lpl = Scalar(1.5)*ss,
+ ll = ss*ss;
+ x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i
+ - a11*a21*b12*b11i*b11i*b22i;
+ y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i
+ - a21*a21*b12*b11i*b11i*b22i;
+ z = a21*a32*b11i*b22i;
+ }
+ else if (iter==16)
+ {
+ // another exceptional shift
+ x = m_S.coeff(f,f)/m_T.coeff(f,f)-m_S.coeff(l,l)/m_T.coeff(l,l) + m_S.coeff(l,l-1)*m_T.coeff(l-1,l) /
+ (m_T.coeff(l-1,l-1)*m_T.coeff(l,l));
+ y = m_S.coeff(f+1,f)/m_T.coeff(f,f);
+ z = 0;
+ }
+ else if (iter>23 && !(iter%8))
+ {
+ // extremely exceptional shift
+ x = internal::random<Scalar>(-1.0,1.0);
+ y = internal::random<Scalar>(-1.0,1.0);
+ z = internal::random<Scalar>(-1.0,1.0);
+ }
+ else
+ {
+ // Compute the shifts: (x,y,z,0...) = (AB^-1 - l1 I) (AB^-1 - l2 I) e1
+ // where l1 and l2 are the eigenvalues of the 2x2 matrix C = U V^-1 where
+ // U and V are 2x2 bottom right sub matrices of A and B. Thus:
+ // = AB^-1AB^-1 + l1 l2 I - (l1+l2)(AB^-1)
+ // = AB^-1AB^-1 + det(M) - tr(M)(AB^-1)
+ // Since we are only interested in having x, y, z with a correct ratio, we have:
+ const Scalar
+ a11 = m_S.coeff(f,f), a12 = m_S.coeff(f,f+1),
+ a21 = m_S.coeff(f+1,f), a22 = m_S.coeff(f+1,f+1),
+ a32 = m_S.coeff(f+2,f+1),
+
+ a88 = m_S.coeff(l-1,l-1), a89 = m_S.coeff(l-1,l),
+ a98 = m_S.coeff(l,l-1), a99 = m_S.coeff(l,l),
+
+ b11 = m_T.coeff(f,f), b12 = m_T.coeff(f,f+1),
+ b22 = m_T.coeff(f+1,f+1),
+
+ b88 = m_T.coeff(l-1,l-1), b89 = m_T.coeff(l-1,l),
+ b99 = m_T.coeff(l,l);
+
+ x = ( (a88/b88 - a11/b11)*(a99/b99 - a11/b11) - (a89/b99)*(a98/b88) + (a98/b88)*(b89/b99)*(a11/b11) ) * (b11/a21)
+ + a12/b22 - (a11/b11)*(b12/b22);
+ y = (a22/b22-a11/b11) - (a21/b11)*(b12/b22) - (a88/b88-a11/b11) - (a99/b99-a11/b11) + (a98/b88)*(b89/b99);
+ z = a32/b22;
+ }
+
+ JRs G;
+
+ for (Index k=f; k<=l-2; k++)
+ {
+ // variables for Householder reflections
+ Vector2s essential2;
+ Scalar tau, beta;
+
+ Vector3s hr(x,y,z);
+
+ // Q_k to annihilate S(k+1,k-1) and S(k+2,k-1)
+ hr.makeHouseholderInPlace(tau, beta);
+ essential2 = hr.template bottomRows<2>();
+ Index fc=(std::max)(k-1,Index(0)); // first col to update
+ m_S.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
+ m_T.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
+ if (m_computeQZ)
+ m_Q.template middleCols<3>(k).applyHouseholderOnTheRight(essential2, tau, m_workspace.data());
+ if (k>f)
+ m_S.coeffRef(k+2,k-1) = m_S.coeffRef(k+1,k-1) = Scalar(0.0);
+
+ // Z_{k1} to annihilate T(k+2,k+1) and T(k+2,k)
+ hr << m_T.coeff(k+2,k+2),m_T.coeff(k+2,k),m_T.coeff(k+2,k+1);
+ hr.makeHouseholderInPlace(tau, beta);
+ essential2 = hr.template bottomRows<2>();
+ {
+ Index lr = (std::min)(k+4,dim); // last row to update
+ Map<Matrix<Scalar,Dynamic,1> > tmp(m_workspace.data(),lr);
+ // S
+ tmp = m_S.template middleCols<2>(k).topRows(lr) * essential2;
+ tmp += m_S.col(k+2).head(lr);
+ m_S.col(k+2).head(lr) -= tau*tmp;
+ m_S.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
+ // T
+ tmp = m_T.template middleCols<2>(k).topRows(lr) * essential2;
+ tmp += m_T.col(k+2).head(lr);
+ m_T.col(k+2).head(lr) -= tau*tmp;
+ m_T.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
+ }
+ if (m_computeQZ)
+ {
+ // Z
+ Map<Matrix<Scalar,1,Dynamic> > tmp(m_workspace.data(),dim);
+ tmp = essential2.adjoint()*(m_Z.template middleRows<2>(k));
+ tmp += m_Z.row(k+2);
+ m_Z.row(k+2) -= tau*tmp;
+ m_Z.template middleRows<2>(k) -= essential2 * (tau*tmp);
+ }
+ m_T.coeffRef(k+2,k) = m_T.coeffRef(k+2,k+1) = Scalar(0.0);
+
+ // Z_{k2} to annihilate T(k+1,k)
+ G.makeGivens(m_T.coeff(k+1,k+1), m_T.coeff(k+1,k));
+ m_S.applyOnTheRight(k+1,k,G);
+ m_T.applyOnTheRight(k+1,k,G);
+ // update Z
+ if (m_computeQZ)
+ m_Z.applyOnTheLeft(k+1,k,G.adjoint());
+ m_T.coeffRef(k+1,k) = Scalar(0.0);
+
+ // update x,y,z
+ x = m_S.coeff(k+1,k);
+ y = m_S.coeff(k+2,k);
+ if (k < l-2)
+ z = m_S.coeff(k+3,k);
+ } // loop over k
+
+ // Q_{n-1} to annihilate y = S(l,l-2)
+ G.makeGivens(x,y);
+ m_S.applyOnTheLeft(l-1,l,G.adjoint());
+ m_T.applyOnTheLeft(l-1,l,G.adjoint());
+ if (m_computeQZ)
+ m_Q.applyOnTheRight(l-1,l,G);
+ m_S.coeffRef(l,l-2) = Scalar(0.0);
+
+ // Z_{n-1} to annihilate T(l,l-1)
+ G.makeGivens(m_T.coeff(l,l),m_T.coeff(l,l-1));
+ m_S.applyOnTheRight(l,l-1,G);
+ m_T.applyOnTheRight(l,l-1,G);
+ if (m_computeQZ)
+ m_Z.applyOnTheLeft(l,l-1,G.adjoint());
+ m_T.coeffRef(l,l-1) = Scalar(0.0);
+ }
+
+
+ template<typename MatrixType>
+ RealQZ<MatrixType>& RealQZ<MatrixType>::compute(const MatrixType& A_in, const MatrixType& B_in, bool computeQZ)
+ {
+
+ const Index dim = A_in.cols();
+
+ eigen_assert (A_in.rows()==dim && A_in.cols()==dim
+ && B_in.rows()==dim && B_in.cols()==dim
+ && "Need square matrices of the same dimension");
+
+ m_isInitialized = true;
+ m_computeQZ = computeQZ;
+ m_S = A_in; m_T = B_in;
+ m_workspace.resize(dim*2);
+ m_global_iter = 0;
+
+ // entrance point: hessenberg triangular decomposition
+ hessenbergTriangular();
+ // compute L1 vector norms of T, S into m_normOfS, m_normOfT
+ computeNorms();
+
+ Index l = dim-1,
+ f,
+ local_iter = 0;
+
+ while (l>0 && local_iter<m_maxIters)
+ {
+ f = findSmallSubdiagEntry(l);
+ // now rows and columns f..l (including) decouple from the rest of the problem
+ if (f>0) m_S.coeffRef(f,f-1) = Scalar(0.0);
+ if (f == l) // One root found
+ {
+ l--;
+ local_iter = 0;
+ }
+ else if (f == l-1) // Two roots found
+ {
+ splitOffTwoRows(f);
+ l -= 2;
+ local_iter = 0;
+ }
+ else // No convergence yet
+ {
+ // if there's zero on diagonal of T, we can isolate an eigenvalue with Givens rotations
+ Index z = findSmallDiagEntry(f,l);
+ if (z>=f)
+ {
+ // zero found
+ pushDownZero(z,f,l);
+ }
+ else
+ {
+ // We are sure now that S.block(f,f, l-f+1,l-f+1) is underuced upper-Hessenberg
+ // and T.block(f,f, l-f+1,l-f+1) is invertible uper-triangular, which allows to
+ // apply a QR-like iteration to rows and columns f..l.
+ step(f,l, local_iter);
+ local_iter++;
+ m_global_iter++;
+ }
+ }
+ }
+ // check if we converged before reaching iterations limit
+ m_info = (local_iter<m_maxIters) ? Success : NoConvergence;
+ return *this;
+ } // end compute
+
+} // end namespace Eigen
+
+#endif //EIGEN_REAL_QZ
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h b/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h
index 781692eccd3..64d13634141 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h
@@ -2,7 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -86,7 +86,8 @@ template<typename _MatrixType> class RealSchur
m_workspaceVector(size),
m_hess(size),
m_isInitialized(false),
- m_matUisUptodate(false)
+ m_matUisUptodate(false),
+ m_maxIters(-1)
{ }
/** \brief Constructor; computes real Schur decomposition of given matrix.
@@ -105,7 +106,8 @@ template<typename _MatrixType> class RealSchur
m_workspaceVector(matrix.rows()),
m_hess(matrix.rows()),
m_isInitialized(false),
- m_matUisUptodate(false)
+ m_matUisUptodate(false),
+ m_maxIters(-1)
{
compute(matrix, computeU);
}
@@ -160,9 +162,30 @@ template<typename _MatrixType> class RealSchur
*
* Example: \include RealSchur_compute.cpp
* Output: \verbinclude RealSchur_compute.out
+ *
+ * \sa compute(const MatrixType&, bool, Index)
*/
RealSchur& compute(const MatrixType& matrix, bool computeU = true);
+ /** \brief Computes Schur decomposition of a Hessenberg matrix H = Z T Z^T
+ * \param[in] matrixH Matrix in Hessenberg form H
+ * \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T
+ * \param computeU Computes the matriX U of the Schur vectors
+ * \return Reference to \c *this
+ *
+ * This routine assumes that the matrix is already reduced in Hessenberg form matrixH
+ * using either the class HessenbergDecomposition or another mean.
+ * It computes the upper quasi-triangular matrix T of the Schur decomposition of H
+ * When computeU is true, this routine computes the matrix U such that
+ * A = U T U^T = (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix
+ *
+ * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix
+ * is not available, the user should give an identity matrix (Q.setIdentity())
+ *
+ * \sa compute(const MatrixType&, bool)
+ */
+ template<typename HessMatrixType, typename OrthMatrixType>
+ RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU);
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
@@ -173,11 +196,29 @@ template<typename _MatrixType> class RealSchur
return m_info;
}
- /** \brief Maximum number of iterations.
+ /** \brief Sets the maximum number of iterations allowed.
+ *
+ * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
+ * of the matrix.
+ */
+ RealSchur& setMaxIterations(Index maxIters)
+ {
+ m_maxIters = maxIters;
+ return *this;
+ }
+
+ /** \brief Returns the maximum number of iterations. */
+ Index getMaxIterations()
+ {
+ return m_maxIters;
+ }
+
+ /** \brief Maximum number of iterations per row.
*
- * Maximum number of iterations allowed for an eigenvalue to converge.
+ * If not otherwise specified, the maximum number of iterations is this number times the size of the
+ * matrix. It is currently set to 40.
*/
- static const int m_maxIterations = 40;
+ static const int m_maxIterationsPerRow = 40;
private:
@@ -188,12 +229,13 @@ template<typename _MatrixType> class RealSchur
ComputationInfo m_info;
bool m_isInitialized;
bool m_matUisUptodate;
+ Index m_maxIters;
typedef Matrix<Scalar,3,1> Vector3s;
Scalar computeNormOfT();
- Index findSmallSubdiagEntry(Index iu, Scalar norm);
- void splitOffTwoRows(Index iu, bool computeU, Scalar exshift);
+ Index findSmallSubdiagEntry(Index iu, const Scalar& norm);
+ void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);
@@ -203,15 +245,30 @@ template<typename _MatrixType> class RealSchur
template<typename MatrixType>
RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
{
- assert(matrix.cols() == matrix.rows());
+ eigen_assert(matrix.cols() == matrix.rows());
+ Index maxIters = m_maxIters;
+ if (maxIters == -1)
+ maxIters = m_maxIterationsPerRow * matrix.rows();
// Step 1. Reduce to Hessenberg form
m_hess.compute(matrix);
- m_matT = m_hess.matrixH();
- if (computeU)
- m_matU = m_hess.matrixQ();
// Step 2. Reduce to real Schur form
+ computeFromHessenberg(m_hess.matrixH(), m_hess.matrixQ(), computeU);
+
+ return *this;
+}
+template<typename MatrixType>
+template<typename HessMatrixType, typename OrthMatrixType>
+RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU)
+{
+ m_matT = matrixH;
+ if(computeU)
+ m_matU = matrixQ;
+
+ Index maxIters = m_maxIters;
+ if (maxIters == -1)
+ maxIters = m_maxIterationsPerRow * matrixH.rows();
m_workspaceVector.resize(m_matT.cols());
Scalar* workspace = &m_workspaceVector.coeffRef(0);
@@ -220,8 +277,9 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix,
// Rows il,...,iu is the part we are working on (the active window).
// Rows iu+1,...,end are already brought in triangular form.
Index iu = m_matT.cols() - 1;
- Index iter = 0; // iteration count
- Scalar exshift(0); // sum of exceptional shifts
+ Index iter = 0; // iteration count for current eigenvalue
+ Index totalIter = 0; // iteration count for whole matrix
+ Scalar exshift(0); // sum of exceptional shifts
Scalar norm = computeNormOfT();
if(norm!=0)
@@ -251,14 +309,15 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix,
Vector3s firstHouseholderVector(0,0,0), shiftInfo;
computeShift(iu, iter, exshift, shiftInfo);
iter = iter + 1;
- if (iter > m_maxIterations * m_matT.cols()) break;
+ totalIter = totalIter + 1;
+ if (totalIter > maxIters) break;
Index im;
initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);
}
}
}
- if(iter <= m_maxIterations * m_matT.cols())
+ if(totalIter <= maxIters)
m_info = Success;
else
m_info = NoConvergence;
@@ -278,21 +337,22 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
// + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
Scalar norm(0);
for (Index j = 0; j < size; ++j)
- norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
+ norm += m_matT.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
return norm;
}
/** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType>
-inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, Scalar norm)
+inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& norm)
{
+ using std::abs;
Index res = iu;
while (res > 0)
{
- Scalar s = internal::abs(m_matT.coeff(res-1,res-1)) + internal::abs(m_matT.coeff(res,res));
+ Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
if (s == 0.0)
s = norm;
- if (internal::abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
+ if (abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
break;
res--;
}
@@ -301,8 +361,10 @@ inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(I
/** \internal Update T given that rows iu-1 and iu decouple from the rest. */
template<typename MatrixType>
-inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scalar exshift)
+inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift)
{
+ using std::sqrt;
+ using std::abs;
const Index size = m_matT.cols();
// The eigenvalues of the 2x2 matrix [a b; c d] are
@@ -314,7 +376,7 @@ inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scal
if (q >= Scalar(0)) // Two real eigenvalues
{
- Scalar z = internal::sqrt(internal::abs(q));
+ Scalar z = sqrt(abs(q));
JacobiRotation<Scalar> rot;
if (p >= Scalar(0))
rot.makeGivens(p + z, m_matT.coeff(iu, iu-1));
@@ -336,6 +398,8 @@ inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scal
template<typename MatrixType>
inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)
{
+ using std::sqrt;
+ using std::abs;
shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);
shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);
shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);
@@ -346,7 +410,7 @@ inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& ex
exshift += shiftInfo.coeff(0);
for (Index i = 0; i <= iu; ++i)
m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);
- Scalar s = internal::abs(m_matT.coeff(iu,iu-1)) + internal::abs(m_matT.coeff(iu-1,iu-2));
+ Scalar s = abs(m_matT.coeff(iu,iu-1)) + abs(m_matT.coeff(iu-1,iu-2));
shiftInfo.coeffRef(0) = Scalar(0.75) * s;
shiftInfo.coeffRef(1) = Scalar(0.75) * s;
shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s;
@@ -359,7 +423,7 @@ inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& ex
s = s * s + shiftInfo.coeff(2);
if (s > Scalar(0))
{
- s = internal::sqrt(s);
+ s = sqrt(s);
if (shiftInfo.coeff(1) < shiftInfo.coeff(0))
s = -s;
s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
@@ -376,6 +440,7 @@ inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& ex
template<typename MatrixType>
inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)
{
+ using std::abs;
Vector3s& v = firstHouseholderVector; // alias to save typing
for (im = iu-2; im >= il; --im)
@@ -389,9 +454,9 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
if (im == il) {
break;
}
- const Scalar lhs = m_matT.coeff(im,im-1) * (internal::abs(v.coeff(1)) + internal::abs(v.coeff(2)));
- const Scalar rhs = v.coeff(0) * (internal::abs(m_matT.coeff(im-1,im-1)) + internal::abs(Tmm) + internal::abs(m_matT.coeff(im+1,im+1)));
- if (internal::abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
+ const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
+ const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
+ if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
{
break;
}
@@ -402,8 +467,8 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
template<typename MatrixType>
inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)
{
- assert(im >= il);
- assert(im <= iu-2);
+ eigen_assert(im >= il);
+ eigen_assert(im <= iu-2);
const Index size = m_matT.cols();
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h b/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h
index 960ec3c764a..ad973646028 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur_MKL.h
@@ -48,7 +48,7 @@ RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<E
typedef MatrixType::Scalar Scalar; \
typedef MatrixType::RealScalar RealScalar; \
\
- assert(matrix.cols() == matrix.rows()); \
+ eigen_assert(matrix.cols() == matrix.rows()); \
\
lapack_int n = matrix.cols(), sdim, info; \
lapack_int lda = matrix.outerStride(); \
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index acc5576feb1..3993046a88e 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -384,6 +384,7 @@ template<typename MatrixType>
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
::compute(const MatrixType& matrix, int options)
{
+ using std::abs;
eigen_assert(matrix.cols() == matrix.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
@@ -394,7 +395,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
if(n==1)
{
- m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0));
+ m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0));
if(computeEigenvectors)
m_eivec.setOnes(n,n);
m_info = Success;
@@ -408,9 +409,10 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
MatrixType& mat = m_eivec;
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
- RealScalar scale = matrix.cwiseAbs().maxCoeff();
+ mat = matrix.template triangularView<Lower>();
+ RealScalar scale = mat.cwiseAbs().maxCoeff();
if(scale==RealScalar(0)) scale = RealScalar(1);
- mat = matrix / scale;
+ mat.template triangularView<Lower>() /= scale;
m_subdiag.resize(n-1);
internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
@@ -421,7 +423,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
while (end>0)
{
for (Index i = start; i<end; ++i)
- if (internal::isMuchSmallerThan(internal::abs(m_subdiag[i]),(internal::abs(diag[i])+internal::abs(diag[i+1]))))
+ if (internal::isMuchSmallerThan(abs(m_subdiag[i]),(abs(diag[i])+abs(diag[i+1]))))
m_subdiag[i] = 0;
// find the largest unreduced block
@@ -667,7 +669,7 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
using std::sqrt;
- const Scalar t0 = Scalar(0.5) * sqrt( abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
+ const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
roots(0) = t1 - t0;
roots(1) = t1 + t0;
@@ -675,6 +677,7 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
+ using std::sqrt;
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
@@ -696,9 +699,9 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
if(computeEigenvectors)
{
scaledMat.diagonal().array () -= eivals(1);
- Scalar a2 = abs2(scaledMat(0,0));
- Scalar c2 = abs2(scaledMat(1,1));
- Scalar b2 = abs2(scaledMat(1,0));
+ Scalar a2 = numext::abs2(scaledMat(0,0));
+ Scalar c2 = numext::abs2(scaledMat(1,1));
+ Scalar b2 = numext::abs2(scaledMat(1,0));
if(a2>c2)
{
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
@@ -736,14 +739,24 @@ namespace internal {
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
{
+ using std::abs;
RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
RealScalar e = subdiag[end-1];
// Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
// underflow thus leading to inf/NaN values when using the following commented code:
-// RealScalar e2 = abs2(subdiag[end-1]);
+// RealScalar e2 = numext::abs2(subdiag[end-1]);
// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
// This explain the following, somewhat more complicated, version:
- RealScalar mu = diag[end] - (e / (td + (td>0 ? 1 : -1))) * (e / hypot(td,e));
+ RealScalar mu = diag[end];
+ if(td==0)
+ mu -= abs(e);
+ else
+ {
+ RealScalar e2 = numext::abs2(subdiag[end-1]);
+ RealScalar h = numext::hypot(td,e);
+ if(e2==0) mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h);
+ else mu -= e2 / (td + (td>0 ? h : -h));
+ }
RealScalar x = diag[start] - mu;
RealScalar z = subdiag[start];
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h b/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
index 9380956b5f9..17c0dadd23e 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
@@ -40,7 +40,7 @@ namespace Eigen {
/** \internal Specialization for the data types supported by MKL */
#define EIGEN_MKL_EIG_SELFADJ(EIGTYPE, MKLTYPE, MKLRTYPE, MKLNAME, EIGCOLROW, MKLCOLROW ) \
-template<> inline\
+template<> inline \
SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, int options) \
{ \
@@ -56,7 +56,7 @@ SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(c
\
if(n==1) \
{ \
- m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0)); \
+ m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0)); \
if(computeEigenvectors) m_eivec.setOnes(n,n); \
m_info = Success; \
m_isInitialized = true; \
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h b/extern/Eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h
index c34b7b3b801..192278d685d 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h
@@ -96,7 +96,7 @@ template<typename _MatrixType> class Tridiagonalization
>::type SubDiagonalReturnType;
/** \brief Return type of matrixQ() */
- typedef typename HouseholderSequence<MatrixType,CoeffVectorType>::ConjugateReturnType HouseholderSequenceType;
+ typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
/** \brief Default constructor.
*
@@ -345,6 +345,7 @@ namespace internal {
template<typename MatrixType, typename CoeffVectorType>
void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
{
+ using numext::conj;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
@@ -425,8 +426,6 @@ struct tridiagonalization_inplace_selector;
template<typename MatrixType, typename DiagonalType, typename SubDiagonalType>
void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
- typedef typename MatrixType::Index Index;
- //Index n = mat.rows();
eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);
tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);
}
@@ -467,8 +466,9 @@ struct tridiagonalization_inplace_selector<MatrixType,3,false>
template<typename DiagonalType, typename SubDiagonalType>
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
{
+ using std::sqrt;
diag[0] = mat(0,0);
- RealScalar v1norm2 = abs2(mat(2,0));
+ RealScalar v1norm2 = numext::abs2(mat(2,0));
if(v1norm2 == RealScalar(0))
{
diag[1] = mat(1,1);
@@ -480,7 +480,7 @@ struct tridiagonalization_inplace_selector<MatrixType,3,false>
}
else
{
- RealScalar beta = sqrt(abs2(mat(1,0)) + v1norm2);
+ RealScalar beta = sqrt(numext::abs2(mat(1,0)) + v1norm2);
RealScalar invBeta = RealScalar(1)/beta;
Scalar m01 = mat(1,0) * invBeta;
Scalar m02 = mat(2,0) * invBeta;
@@ -510,7 +510,7 @@ struct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>
template<typename DiagonalType, typename SubDiagonalType>
static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
{
- diag(0,0) = real(mat(0,0));
+ diag(0,0) = numext::real(mat(0,0));
if(extractQ)
mat(0,0) = Scalar(1);
}
diff --git a/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h b/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h
index 5830fcd35fc..8e186d57a34 100644
--- a/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h
+++ b/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h
@@ -56,7 +56,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** Default constructor initializing a null box. */
- inline explicit AlignedBox()
+ inline AlignedBox()
{ if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); }
/** Constructs a null box with \a _dim the dimension of the ambient space. */
@@ -71,7 +71,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
template<typename Derived>
inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
{
- const typename internal::nested<Derived,2>::type p(a_p.derived());
+ typename internal::nested<Derived,2>::type p(a_p.derived());
m_min = p;
m_max = p;
}
@@ -79,7 +79,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
~AlignedBox() {}
/** \returns the dimension in which the box holds */
- inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : Index(AmbientDimAtCompileTime); }
+ inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
/** \deprecated use isEmpty */
inline bool isNull() const { return isEmpty(); }
@@ -248,14 +248,14 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
*/
template<typename Derived>
inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
- { return internal::sqrt(NonInteger(squaredExteriorDistance(p))); }
+ { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(p))); }
/** \returns the distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
* \sa squaredExteriorDistance()
*/
inline NonInteger exteriorDistance(const AlignedBox& b) const
- { return internal::sqrt(NonInteger(squaredExteriorDistance(b))); }
+ { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); }
/** \returns \c *this with scalar type casted to \a NewScalarType
*
@@ -282,7 +282,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const AlignedBox& other, RealScalar prec = ScalarTraits::dummy_precision()) const
+ bool isApprox(const AlignedBox& other, const RealScalar& prec = ScalarTraits::dummy_precision()) const
{ return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
protected:
@@ -296,7 +296,7 @@ template<typename Scalar,int AmbientDim>
template<typename Derived>
inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const MatrixBase<Derived>& a_p) const
{
- const typename internal::nested<Derived,2*AmbientDim>::type p(a_p.derived());
+ typename internal::nested<Derived,2*AmbientDim>::type p(a_p.derived());
Scalar dist2(0);
Scalar aux;
for (Index k=0; k<dim(); ++k)
diff --git a/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h b/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h
index 67197ac78c3..553d38c7449 100644
--- a/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h
+++ b/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h
@@ -76,7 +76,7 @@ public:
* \warning If the \a axis vector is not normalized, then the angle-axis object
* represents an invalid rotation. */
template<typename Derived>
- inline AngleAxis(Scalar angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
+ inline AngleAxis(const Scalar& angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
/** Constructs and initialize the angle-axis rotation from a quaternion \a q. */
template<typename QuatDerived> inline explicit AngleAxis(const QuaternionBase<QuatDerived>& q) { *this = q; }
/** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */
@@ -137,7 +137,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const AngleAxis& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ bool isApprox(const AngleAxis& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); }
};
@@ -161,6 +161,7 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived
using std::acos;
using std::min;
using std::max;
+ using std::sqrt;
Scalar n2 = q.vec().squaredNorm();
if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
{
@@ -170,7 +171,7 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived
else
{
m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1)));
- m_axis = q.vec() / internal::sqrt(n2);
+ m_axis = q.vec() / sqrt(n2);
}
return *this;
}
@@ -202,9 +203,11 @@ template<typename Scalar>
typename AngleAxis<Scalar>::Matrix3
AngleAxis<Scalar>::toRotationMatrix(void) const
{
+ using std::sin;
+ using std::cos;
Matrix3 res;
- Vector3 sin_axis = internal::sin(m_angle) * m_axis;
- Scalar c = internal::cos(m_angle);
+ Vector3 sin_axis = sin(m_angle) * m_axis;
+ Scalar c = cos(m_angle);
Vector3 cos1_axis = (Scalar(1)-c) * m_axis;
Scalar tmp;
diff --git a/extern/Eigen3/Eigen/src/Geometry/EulerAngles.h b/extern/Eigen3/Eigen/src/Geometry/EulerAngles.h
index e424d240604..82802fb43cf 100644
--- a/extern/Eigen3/Eigen/src/Geometry/EulerAngles.h
+++ b/extern/Eigen3/Eigen/src/Geometry/EulerAngles.h
@@ -27,55 +27,75 @@ namespace Eigen {
* * AngleAxisf(ea[1], Vector3f::UnitX())
* * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode
* This corresponds to the right-multiply conventions (with right hand side frames).
+ *
+ * The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi].
+ *
+ * \sa class AngleAxis
*/
template<typename Derived>
inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>
MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const
{
+ using std::atan2;
+ using std::sin;
+ using std::cos;
/* Implemented from Graphics Gems IV */
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3)
Matrix<Scalar,3,1> res;
typedef Matrix<typename Derived::Scalar,2,1> Vector2;
- const Scalar epsilon = NumTraits<Scalar>::dummy_precision();
const Index odd = ((a0+1)%3 == a1) ? 0 : 1;
const Index i = a0;
const Index j = (a0 + 1 + odd)%3;
const Index k = (a0 + 2 - odd)%3;
-
+
if (a0==a2)
{
- Scalar s = Vector2(coeff(j,i) , coeff(k,i)).norm();
- res[1] = internal::atan2(s, coeff(i,i));
- if (s > epsilon)
+ res[0] = atan2(coeff(j,i), coeff(k,i));
+ if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0)))
{
- res[0] = internal::atan2(coeff(j,i), coeff(k,i));
- res[2] = internal::atan2(coeff(i,j),-coeff(i,k));
+ res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI);
+ Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();
+ res[1] = -atan2(s2, coeff(i,i));
}
else
{
- res[0] = Scalar(0);
- res[2] = (coeff(i,i)>0?1:-1)*internal::atan2(-coeff(k,j), coeff(j,j));
+ Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();
+ res[1] = atan2(s2, coeff(i,i));
}
- }
+
+ // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles,
+ // we can compute their respective rotation, and apply its inverse to M. Since the result must
+ // be a rotation around x, we have:
+ //
+ // c2 s1.s2 c1.s2 1 0 0
+ // 0 c1 -s1 * M = 0 c3 s3
+ // -s2 s1.c2 c1.c2 0 -s3 c3
+ //
+ // Thus: m11.c1 - m21.s1 = c3 & m12.c1 - m22.s1 = s3
+
+ Scalar s1 = sin(res[0]);
+ Scalar c1 = cos(res[0]);
+ res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j));
+ }
else
{
- Scalar c = Vector2(coeff(i,i) , coeff(i,j)).norm();
- res[1] = internal::atan2(-coeff(i,k), c);
- if (c > epsilon)
- {
- res[0] = internal::atan2(coeff(j,k), coeff(k,k));
- res[2] = internal::atan2(coeff(i,j), coeff(i,i));
+ res[0] = atan2(coeff(j,k), coeff(k,k));
+ Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm();
+ if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0))) {
+ res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI);
+ res[1] = atan2(-coeff(i,k), -c2);
}
else
- {
- res[0] = Scalar(0);
- res[2] = (coeff(i,k)>0?1:-1)*internal::atan2(-coeff(k,j), coeff(j,j));
- }
+ res[1] = atan2(-coeff(i,k), c2);
+ Scalar s1 = sin(res[0]);
+ Scalar c1 = cos(res[0]);
+ res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j));
}
if (!odd)
res = -res;
+
return res;
}
diff --git a/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h b/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h
index df03feb55c6..00e71d190c3 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h
@@ -59,7 +59,7 @@ template<typename MatrixType,typename Rhs> struct homogeneous_right_product_impl
} // end namespace internal
template<typename MatrixType,int _Direction> class Homogeneous
- : public MatrixBase<Homogeneous<MatrixType,_Direction> >
+ : internal::no_assignment_operator, public MatrixBase<Homogeneous<MatrixType,_Direction> >
{
public:
diff --git a/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h b/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h
index 1b7c7c78c80..aeff43fefa6 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h
@@ -50,7 +50,7 @@ public:
typedef const Block<const Coefficients,AmbientDimAtCompileTime,1> ConstNormalReturnType;
/** Default constructor without initialization */
- inline explicit Hyperplane() {}
+ inline Hyperplane() {}
template<int OtherOptions>
Hyperplane(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
@@ -75,7 +75,7 @@ public:
* such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$.
* \warning the vector normal is assumed to be normalized.
*/
- inline Hyperplane(const VectorType& n, Scalar d)
+ inline Hyperplane(const VectorType& n, const Scalar& d)
: m_coeffs(n.size()+1)
{
normal() = n;
@@ -135,7 +135,7 @@ public:
/** \returns the absolute distance between the plane \c *this and a point \a p.
* \sa signedDistance()
*/
- inline Scalar absDistance(const VectorType& p) const { return internal::abs(signedDistance(p)); }
+ inline Scalar absDistance(const VectorType& p) const { using std::abs; return abs(signedDistance(p)); }
/** \returns the projection of a point \a p onto the plane \c *this.
*/
@@ -178,13 +178,14 @@ public:
*/
VectorType intersection(const Hyperplane& other) const
{
+ using std::abs;
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
// since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests
// whether the two lines are approximately parallel.
if(internal::isMuchSmallerThan(det, Scalar(1)))
{ // special case where the two lines are approximately parallel. Pick any point on the first line.
- if(internal::abs(coeffs().coeff(1))>internal::abs(coeffs().coeff(0)))
+ if(abs(coeffs().coeff(1))>abs(coeffs().coeff(0)))
return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));
else
return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));
@@ -256,7 +257,7 @@ public:
*
* \sa MatrixBase::isApprox() */
template<int OtherOptions>
- bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_coeffs.isApprox(other.m_coeffs, prec); }
protected:
diff --git a/extern/Eigen3/Eigen/src/Geometry/OrthoMethods.h b/extern/Eigen3/Eigen/src/Geometry/OrthoMethods.h
index 11ad5829cda..556bc81604e 100644
--- a/extern/Eigen3/Eigen/src/Geometry/OrthoMethods.h
+++ b/extern/Eigen3/Eigen/src/Geometry/OrthoMethods.h
@@ -33,9 +33,9 @@ MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
typename internal::nested<Derived,2>::type lhs(derived());
typename internal::nested<OtherDerived,2>::type rhs(other.derived());
return typename cross_product_return_type<OtherDerived>::type(
- internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
- internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
- internal::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0))
+ numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
+ numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
+ numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0))
);
}
@@ -49,9 +49,9 @@ struct cross3_impl {
run(const VectorLhs& lhs, const VectorRhs& rhs)
{
return typename internal::plain_matrix_type<VectorLhs>::type(
- internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
- internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
- internal::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)),
+ numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
+ numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
+ numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)),
0
);
}
@@ -78,8 +78,8 @@ MatrixBase<Derived>::cross3(const MatrixBase<OtherDerived>& other) const
typedef typename internal::nested<Derived,2>::type DerivedNested;
typedef typename internal::nested<OtherDerived,2>::type OtherDerivedNested;
- const DerivedNested lhs(derived());
- const OtherDerivedNested rhs(other.derived());
+ DerivedNested lhs(derived());
+ OtherDerivedNested rhs(other.derived());
return internal::cross3_impl<Architecture::Target,
typename internal::remove_all<DerivedNested>::type,
@@ -141,8 +141,8 @@ struct unitOrthogonal_selector
if (maxi==0)
sndi = 1;
RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm();
- perp.coeffRef(maxi) = -conj(src.coeff(sndi)) * invnm;
- perp.coeffRef(sndi) = conj(src.coeff(maxi)) * invnm;
+ perp.coeffRef(maxi) = -numext::conj(src.coeff(sndi)) * invnm;
+ perp.coeffRef(sndi) = numext::conj(src.coeff(maxi)) * invnm;
return perp;
}
@@ -168,8 +168,8 @@ struct unitOrthogonal_selector<Derived,3>
|| (!isMuchSmallerThan(src.y(), src.z())))
{
RealScalar invnm = RealScalar(1)/src.template head<2>().norm();
- perp.coeffRef(0) = -conj(src.y())*invnm;
- perp.coeffRef(1) = conj(src.x())*invnm;
+ perp.coeffRef(0) = -numext::conj(src.y())*invnm;
+ perp.coeffRef(1) = numext::conj(src.x())*invnm;
perp.coeffRef(2) = 0;
}
/* if both x and y are close to zero, then the vector is close
@@ -180,8 +180,8 @@ struct unitOrthogonal_selector<Derived,3>
{
RealScalar invnm = RealScalar(1)/src.template tail<2>().norm();
perp.coeffRef(0) = 0;
- perp.coeffRef(1) = -conj(src.z())*invnm;
- perp.coeffRef(2) = conj(src.y())*invnm;
+ perp.coeffRef(1) = -numext::conj(src.z())*invnm;
+ perp.coeffRef(2) = numext::conj(src.y())*invnm;
}
return perp;
@@ -193,7 +193,7 @@ struct unitOrthogonal_selector<Derived,2>
{
typedef typename plain_matrix_type<Derived>::type VectorType;
static inline VectorType run(const Derived& src)
- { return VectorType(-conj(src.y()), conj(src.x())).normalized(); }
+ { return VectorType(-numext::conj(src.y()), numext::conj(src.x())).normalized(); }
};
} // end namespace internal
diff --git a/extern/Eigen3/Eigen/src/Geometry/ParametrizedLine.h b/extern/Eigen3/Eigen/src/Geometry/ParametrizedLine.h
index 719a904413d..77fa228e6a5 100644
--- a/extern/Eigen3/Eigen/src/Geometry/ParametrizedLine.h
+++ b/extern/Eigen3/Eigen/src/Geometry/ParametrizedLine.h
@@ -41,7 +41,7 @@ public:
typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
/** Default constructor without initialization */
- inline explicit ParametrizedLine() {}
+ inline ParametrizedLine() {}
template<int OtherOptions>
ParametrizedLine(const ParametrizedLine<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
@@ -87,13 +87,13 @@ public:
/** \returns the distance of a point \a p to its projection onto the line \c *this.
* \sa squaredDistance()
*/
- RealScalar distance(const VectorType& p) const { return internal::sqrt(squaredDistance(p)); }
+ RealScalar distance(const VectorType& p) const { using std::sqrt; return sqrt(squaredDistance(p)); }
/** \returns the projection of a point \a p onto the line \c *this. */
VectorType projection(const VectorType& p) const
{ return origin() + direction().dot(p-origin()) * direction(); }
- VectorType pointAt( Scalar t ) const;
+ VectorType pointAt(const Scalar& t) const;
template <int OtherOptions>
Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
@@ -154,7 +154,7 @@ inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const H
*/
template <typename _Scalar, int _AmbientDim, int _Options>
inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType
-ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt( _Scalar t ) const
+ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt(const _Scalar& t) const
{
return origin() + (direction()*t);
}
diff --git a/extern/Eigen3/Eigen/src/Geometry/Quaternion.h b/extern/Eigen3/Eigen/src/Geometry/Quaternion.h
index 8792e2da2ae..9fee0c91980 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Quaternion.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Quaternion.h
@@ -150,18 +150,14 @@ public:
/** \returns the conjugated quaternion */
Quaternion<Scalar> conjugate() const;
- /** \returns an interpolation for a constant motion between \a other and \c *this
- * \a t in [0;1]
- * see http://en.wikipedia.org/wiki/Slerp
- */
- template<class OtherDerived> Quaternion<Scalar> slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const;
+ template<class OtherDerived> Quaternion<Scalar> slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const;
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
template<class OtherDerived>
- bool isApprox(const QuaternionBase<OtherDerived>& other, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ bool isApprox(const QuaternionBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
{ return coeffs().isApprox(other.coeffs(), prec); }
/** return the result vector of \a v through the rotation*/
@@ -193,11 +189,12 @@ public:
*
* \brief The quaternion class used to represent 3D orientations and rotations
*
- * \param _Scalar the scalar type, i.e., the type of the coefficients
+ * \tparam _Scalar the scalar type, i.e., the type of the coefficients
+ * \tparam _Options controls the memory alignment of the coefficients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign.
*
* This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
* orientations and rotations of objects in three dimensions. Compared to other representations
- * like Euler angles or 3x3 matrices, quatertions offer the following advantages:
+ * like Euler angles or 3x3 matrices, quaternions offer the following advantages:
* \li \b compact storage (4 scalars)
* \li \b efficient to compose (28 flops),
* \li \b stable spherical interpolation
@@ -248,7 +245,7 @@ public:
* while internally the coefficients are stored in the following order:
* [\c x, \c y, \c z, \c w]
*/
- inline Quaternion(Scalar w, Scalar x, Scalar y, Scalar z) : m_coeffs(x, y, z, w){}
+ inline Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, w){}
/** Constructs and initialize a quaternion from the array data */
inline Quaternion(const Scalar* data) : m_coeffs(data) {}
@@ -304,41 +301,29 @@ typedef Quaternion<double> Quaterniond;
namespace internal {
template<typename _Scalar, int _Options>
- struct traits<Map<Quaternion<_Scalar>, _Options> >:
- traits<Quaternion<_Scalar, _Options> >
+ struct traits<Map<Quaternion<_Scalar>, _Options> > : traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> >
{
- typedef _Scalar Scalar;
typedef Map<Matrix<_Scalar,4,1>, _Options> Coefficients;
-
- typedef traits<Quaternion<_Scalar, _Options> > TraitsBase;
- enum {
- IsAligned = TraitsBase::IsAligned,
-
- Flags = TraitsBase::Flags
- };
};
}
namespace internal {
template<typename _Scalar, int _Options>
- struct traits<Map<const Quaternion<_Scalar>, _Options> >:
- traits<Quaternion<_Scalar> >
+ struct traits<Map<const Quaternion<_Scalar>, _Options> > : traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> >
{
- typedef _Scalar Scalar;
typedef Map<const Matrix<_Scalar,4,1>, _Options> Coefficients;
-
- typedef traits<Quaternion<_Scalar, _Options> > TraitsBase;
+ typedef traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> > TraitsBase;
enum {
- IsAligned = TraitsBase::IsAligned,
Flags = TraitsBase::Flags & ~LvalueBit
};
};
}
-/** \brief Quaternion expression mapping a constant memory buffer
+/** \ingroup Geometry_Module
+ * \brief Quaternion expression mapping a constant memory buffer
*
- * \param _Scalar the type of the Quaternion coefficients
- * \param _Options see class Map
+ * \tparam _Scalar the type of the Quaternion coefficients
+ * \tparam _Options see class Map
*
* This is a specialization of class Map for Quaternion. This class allows to view
* a 4 scalar memory buffer as an Eigen's Quaternion object.
@@ -371,10 +356,11 @@ class Map<const Quaternion<_Scalar>, _Options >
const Coefficients m_coeffs;
};
-/** \brief Expression of a quaternion from a memory buffer
+/** \ingroup Geometry_Module
+ * \brief Expression of a quaternion from a memory buffer
*
- * \param _Scalar the type of the Quaternion coefficients
- * \param _Options see class Map
+ * \tparam _Scalar the type of the Quaternion coefficients
+ * \tparam _Options see class Map
*
* This is a specialization of class Map for Quaternion. This class allows to view
* a 4 scalar memory buffer as an Eigen's Quaternion object.
@@ -395,7 +381,7 @@ class Map<Quaternion<_Scalar>, _Options >
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
*
- * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
+ * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order:
* \code *coeffs == {x, y, z, w} \endcode
*
* If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
@@ -409,16 +395,16 @@ class Map<Quaternion<_Scalar>, _Options >
};
/** \ingroup Geometry_Module
- * Map an unaligned array of single precision scalar as a quaternion */
+ * Map an unaligned array of single precision scalars as a quaternion */
typedef Map<Quaternion<float>, 0> QuaternionMapf;
/** \ingroup Geometry_Module
- * Map an unaligned array of double precision scalar as a quaternion */
+ * Map an unaligned array of double precision scalars as a quaternion */
typedef Map<Quaternion<double>, 0> QuaternionMapd;
/** \ingroup Geometry_Module
- * Map a 16-bits aligned array of double precision scalars as a quaternion */
+ * Map a 16-byte aligned array of single precision scalars as a quaternion */
typedef Map<Quaternion<float>, Aligned> QuaternionMapAlignedf;
/** \ingroup Geometry_Module
- * Map a 16-bits aligned array of double precision scalars as a quaternion */
+ * Map a 16-byte aligned array of double precision scalars as a quaternion */
typedef Map<Quaternion<double>, Aligned> QuaternionMapAlignedd;
/***************************************************************************
@@ -505,9 +491,11 @@ EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const Quaternion
template<class Derived>
EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa)
{
+ using std::cos;
+ using std::sin;
Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
- this->w() = internal::cos(ha);
- this->vec() = internal::sin(ha) * aa.axis();
+ this->w() = cos(ha);
+ this->vec() = sin(ha) * aa.axis();
return derived();
}
@@ -581,12 +569,13 @@ template<typename Derived1, typename Derived2>
inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
{
using std::max;
+ using std::sqrt;
Vector3 v0 = a.normalized();
Vector3 v1 = b.normalized();
Scalar c = v1.dot(v0);
// if dot == -1, vectors are nearly opposites
- // => accuraletly compute the rotation axis by computing the
+ // => accurately compute the rotation axis by computing the
// intersection of the two planes. This is done by solving:
// x^T v0 = 0
// x^T v1 = 0
@@ -601,12 +590,12 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri
Vector3 axis = svd.matrixV().col(2);
Scalar w2 = (Scalar(1)+c)*Scalar(0.5);
- this->w() = internal::sqrt(w2);
- this->vec() = axis * internal::sqrt(Scalar(1) - w2);
+ this->w() = sqrt(w2);
+ this->vec() = axis * sqrt(Scalar(1) - w2);
return derived();
}
Vector3 axis = v0.cross(v1);
- Scalar s = internal::sqrt((Scalar(1)+c)*Scalar(2));
+ Scalar s = sqrt((Scalar(1)+c)*Scalar(2));
Scalar invs = Scalar(1)/s;
this->vec() = axis * invs;
this->w() = s * Scalar(0.5);
@@ -677,24 +666,32 @@ inline typename internal::traits<Derived>::Scalar
QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
{
using std::acos;
- double d = internal::abs(this->dot(other));
+ using std::abs;
+ double d = abs(this->dot(other));
if (d>=1.0)
return Scalar(0);
return static_cast<Scalar>(2 * acos(d));
}
+
+
/** \returns the spherical linear interpolation between the two quaternions
- * \c *this and \a other at the parameter \a t
+ * \c *this and \a other at the parameter \a t in [0;1].
+ *
+ * This represents an interpolation for a constant motion between \c *this and \a other,
+ * see also http://en.wikipedia.org/wiki/Slerp.
*/
template <class Derived>
template <class OtherDerived>
Quaternion<typename internal::traits<Derived>::Scalar>
-QuaternionBase<Derived>::slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const
+QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const
{
using std::acos;
+ using std::sin;
+ using std::abs;
static const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon();
Scalar d = this->dot(other);
- Scalar absD = internal::abs(d);
+ Scalar absD = abs(d);
Scalar scale0;
Scalar scale1;
@@ -708,10 +705,10 @@ QuaternionBase<Derived>::slerp(Scalar t, const QuaternionBase<OtherDerived>& oth
{
// theta is the angle between the 2 quaternions
Scalar theta = acos(absD);
- Scalar sinTheta = internal::sin(theta);
+ Scalar sinTheta = sin(theta);
- scale0 = internal::sin( ( Scalar(1) - t ) * theta) / sinTheta;
- scale1 = internal::sin( ( t * theta) ) / sinTheta;
+ scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
+ scale1 = sin( ( t * theta) ) / sinTheta;
}
if(d<0) scale1 = -scale1;
@@ -728,6 +725,7 @@ struct quaternionbase_assign_impl<Other,3,3>
typedef DenseIndex Index;
template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& mat)
{
+ using std::sqrt;
// This algorithm comes from "Quaternion Calculus and Fast Animation",
// Ken Shoemake, 1987 SIGGRAPH course notes
Scalar t = mat.trace();
diff --git a/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h b/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h
index 868e2ef312f..1cac343a5ee 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h
@@ -59,7 +59,7 @@ protected:
public:
/** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
- inline Rotation2D(Scalar a) : m_angle(a) {}
+ inline Rotation2D(const Scalar& a) : m_angle(a) {}
/** \returns the rotation angle */
inline Scalar angle() const { return m_angle; }
@@ -89,7 +89,7 @@ public:
/** \returns the spherical interpolation between \c *this and \a other using
* parameter \a t. It is in fact equivalent to a linear interpolation.
*/
- inline Rotation2D slerp(Scalar t, const Rotation2D& other) const
+ inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const
{ return m_angle * (1-t) + other.angle() * t; }
/** \returns \c *this with scalar type casted to \a NewScalarType
@@ -114,7 +114,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const Rotation2D& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ bool isApprox(const Rotation2D& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return internal::isApprox(m_angle,other.m_angle, prec); }
};
@@ -133,8 +133,9 @@ template<typename Scalar>
template<typename Derived>
Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
{
+ using std::atan2;
EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
- m_angle = internal::atan2(mat.coeff(1,0), mat.coeff(0,0));
+ m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0));
return *this;
}
@@ -144,8 +145,10 @@ template<typename Scalar>
typename Rotation2D<Scalar>::Matrix2
Rotation2D<Scalar>::toRotationMatrix(void) const
{
- Scalar sinA = internal::sin(m_angle);
- Scalar cosA = internal::cos(m_angle);
+ using std::sin;
+ using std::cos;
+ Scalar sinA = sin(m_angle);
+ Scalar cosA = cos(m_angle);
return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
}
diff --git a/extern/Eigen3/Eigen/src/Geometry/Scaling.h b/extern/Eigen3/Eigen/src/Geometry/Scaling.h
index 8edcac31c74..1c25f36fe62 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Scaling.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Scaling.h
@@ -99,7 +99,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const UniformScaling& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ bool isApprox(const UniformScaling& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return internal::isApprox(m_factor, other.factor(), prec); }
};
@@ -122,11 +122,11 @@ static inline UniformScaling<std::complex<RealScalar> > Scaling(const std::compl
/** Constructs a 2D axis aligned scaling */
template<typename Scalar>
-static inline DiagonalMatrix<Scalar,2> Scaling(Scalar sx, Scalar sy)
+static inline DiagonalMatrix<Scalar,2> Scaling(const Scalar& sx, const Scalar& sy)
{ return DiagonalMatrix<Scalar,2>(sx, sy); }
/** Constructs a 3D axis aligned scaling */
template<typename Scalar>
-static inline DiagonalMatrix<Scalar,3> Scaling(Scalar sx, Scalar sy, Scalar sz)
+static inline DiagonalMatrix<Scalar,3> Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)
{ return DiagonalMatrix<Scalar,3>(sx, sy, sz); }
/** Constructs an axis aligned scaling expression from vector expression \a coeffs
diff --git a/extern/Eigen3/Eigen/src/Geometry/Transform.h b/extern/Eigen3/Eigen/src/Geometry/Transform.h
index 4c1ef8eaade..498fea41a90 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Transform.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Transform.h
@@ -506,8 +506,8 @@ public:
template<typename OtherDerived>
inline Transform& prescale(const MatrixBase<OtherDerived> &other);
- inline Transform& scale(Scalar s);
- inline Transform& prescale(Scalar s);
+ inline Transform& scale(const Scalar& s);
+ inline Transform& prescale(const Scalar& s);
template<typename OtherDerived>
inline Transform& translate(const MatrixBase<OtherDerived> &other);
@@ -521,8 +521,8 @@ public:
template<typename RotationType>
inline Transform& prerotate(const RotationType& rotation);
- Transform& shear(Scalar sx, Scalar sy);
- Transform& preshear(Scalar sx, Scalar sy);
+ Transform& shear(const Scalar& sx, const Scalar& sy);
+ Transform& preshear(const Scalar& sx, const Scalar& sy);
inline Transform& operator=(const TranslationType& t);
inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
@@ -530,9 +530,9 @@ public:
inline Transform& operator=(const UniformScaling<Scalar>& t);
inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
- inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry)> operator*(const UniformScaling<Scalar>& s) const
+ inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?int(Affine):int(Mode))> operator*(const UniformScaling<Scalar>& s) const
{
- Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry),Options> res = *this;
+ Transform<Scalar,Dim,(int(Mode)==int(Isometry)?int(Affine):int(Mode)),Options> res = *this;
res.scale(s.factor());
return res;
}
@@ -584,7 +584,7 @@ public:
* determined by \a prec.
*
* \sa MatrixBase::isApprox() */
- bool isApprox(const Transform& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ bool isApprox(const Transform& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
{ return m_matrix.isApprox(other.m_matrix, prec); }
/** Sets the last row to [0 ... 0 1]
@@ -794,7 +794,7 @@ Transform<Scalar,Dim,Mode,Options>::scale(const MatrixBase<OtherDerived> &other)
* \sa prescale(Scalar)
*/
template<typename Scalar, int Dim, int Mode, int Options>
-inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(Scalar s)
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(const Scalar& s)
{
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
linearExt() *= s;
@@ -821,7 +821,7 @@ Transform<Scalar,Dim,Mode,Options>::prescale(const MatrixBase<OtherDerived> &oth
* \sa scale(Scalar)
*/
template<typename Scalar, int Dim, int Mode, int Options>
-inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(Scalar s)
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(const Scalar& s)
{
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
m_matrix.template topRows<Dim>() *= s;
@@ -909,7 +909,7 @@ Transform<Scalar,Dim,Mode,Options>::prerotate(const RotationType& rotation)
*/
template<typename Scalar, int Dim, int Mode, int Options>
Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::shear(Scalar sx, Scalar sy)
+Transform<Scalar,Dim,Mode,Options>::shear(const Scalar& sx, const Scalar& sy)
{
EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
@@ -925,7 +925,7 @@ Transform<Scalar,Dim,Mode,Options>::shear(Scalar sx, Scalar sy)
*/
template<typename Scalar, int Dim, int Mode, int Options>
Transform<Scalar,Dim,Mode,Options>&
-Transform<Scalar,Dim,Mode,Options>::preshear(Scalar sx, Scalar sy)
+Transform<Scalar,Dim,Mode,Options>::preshear(const Scalar& sx, const Scalar& sy)
{
EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
diff --git a/extern/Eigen3/Eigen/src/Geometry/Umeyama.h b/extern/Eigen3/Eigen/src/Geometry/Umeyama.h
index ac0939cde52..345b47e0c37 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Umeyama.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Umeyama.h
@@ -153,16 +153,21 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
}
- // Eq. (42)
- const Scalar c = 1/src_var * svd.singularValues().dot(S);
-
- // Eq. (41)
- // Note that we first assign dst_mean to the destination so that there no need
- // for a temporary.
- Rt.col(m).head(m) = dst_mean;
- Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean;
-
- if (with_scaling) Rt.block(0,0,m,m) *= c;
+ if (with_scaling)
+ {
+ // Eq. (42)
+ const Scalar c = 1/src_var * svd.singularValues().dot(S);
+
+ // Eq. (41)
+ Rt.col(m).head(m) = dst_mean;
+ Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean;
+ Rt.block(0,0,m,m) *= c;
+ }
+ else
+ {
+ Rt.col(m).head(m) = dst_mean;
+ Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean;
+ }
return Rt;
}
diff --git a/extern/Eigen3/Eigen/src/Householder/Householder.h b/extern/Eigen3/Eigen/src/Householder/Householder.h
index 3f64b7dde2f..32112af9bfd 100644
--- a/extern/Eigen3/Eigen/src/Householder/Householder.h
+++ b/extern/Eigen3/Eigen/src/Householder/Householder.h
@@ -67,25 +67,28 @@ void MatrixBase<Derived>::makeHouseholder(
Scalar& tau,
RealScalar& beta) const
{
+ using std::sqrt;
+ using numext::conj;
+
EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart)
VectorBlock<const Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1);
RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm();
Scalar c0 = coeff(0);
- if(tailSqNorm == RealScalar(0) && internal::imag(c0)==RealScalar(0))
+ if(tailSqNorm == RealScalar(0) && numext::imag(c0)==RealScalar(0))
{
tau = RealScalar(0);
- beta = internal::real(c0);
+ beta = numext::real(c0);
essential.setZero();
}
else
{
- beta = internal::sqrt(internal::abs2(c0) + tailSqNorm);
- if (internal::real(c0)>=RealScalar(0))
+ beta = sqrt(numext::abs2(c0) + tailSqNorm);
+ if (numext::real(c0)>=RealScalar(0))
beta = -beta;
essential = tail / (c0 - beta);
- tau = internal::conj((beta - c0) / beta);
+ tau = conj((beta - c0) / beta);
}
}
diff --git a/extern/Eigen3/Eigen/src/Householder/HouseholderSequence.h b/extern/Eigen3/Eigen/src/Householder/HouseholderSequence.h
index 1e71e16a785..d800ca1fa42 100644
--- a/extern/Eigen3/Eigen/src/Householder/HouseholderSequence.h
+++ b/extern/Eigen3/Eigen/src/Householder/HouseholderSequence.h
@@ -112,6 +112,9 @@ template<typename OtherScalarType, typename MatrixType> struct matrix_type_times
template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence
: public EigenBase<HouseholderSequence<VectorsType,CoeffsType,Side> >
{
+ typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType EssentialVectorType;
+
+ public:
enum {
RowsAtCompileTime = internal::traits<HouseholderSequence>::RowsAtCompileTime,
ColsAtCompileTime = internal::traits<HouseholderSequence>::ColsAtCompileTime,
@@ -121,13 +124,10 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
typedef typename internal::traits<HouseholderSequence>::Scalar Scalar;
typedef typename VectorsType::Index Index;
- typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType
- EssentialVectorType;
-
- public:
-
typedef HouseholderSequence<
- VectorsType,
+ typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ typename internal::remove_all<typename VectorsType::ConjugateReturnType>::type,
+ VectorsType>::type,
typename internal::conditional<NumTraits<Scalar>::IsComplex,
typename internal::remove_all<typename CoeffsType::ConjugateReturnType>::type,
CoeffsType>::type,
@@ -208,7 +208,7 @@ template<typename VectorsType, typename CoeffsType, int Side> class HouseholderS
/** \brief Complex conjugate of the Householder sequence. */
ConjugateReturnType conjugate() const
{
- return ConjugateReturnType(m_vectors, m_coeffs.conjugate())
+ return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate())
.setTrans(m_trans)
.setLength(m_length)
.setShift(m_shift);
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
index 126341be8d8..6fc6ab85225 100644
--- a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -39,10 +39,17 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
int maxIters = iters;
int n = mat.cols();
+ x = precond.solve(x);
VectorType r = rhs - mat * x;
VectorType r0 = r;
RealScalar r0_sqnorm = r0.squaredNorm();
+ RealScalar rhs_sqnorm = rhs.squaredNorm();
+ if(rhs_sqnorm == 0)
+ {
+ x.setZero();
+ return true;
+ }
Scalar rho = 1;
Scalar alpha = 1;
Scalar w = 1;
@@ -55,13 +62,22 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
RealScalar tol2 = tol*tol;
int i = 0;
+ int restarts = 0;
- while ( r.squaredNorm()/r0_sqnorm > tol2 && i<maxIters )
+ while ( r.squaredNorm()/rhs_sqnorm > tol2 && i<maxIters )
{
Scalar rho_old = rho;
rho = r0.dot(r);
- if (rho == Scalar(0)) return false; /* New search directions cannot be found */
+ if (internal::isMuchSmallerThan(rho,r0_sqnorm))
+ {
+ // The new residual vector became too orthogonal to the arbitrarily choosen direction r0
+ // Let's restart with a new r0:
+ r0 = r;
+ rho = r0_sqnorm = r.squaredNorm();
+ if(restarts++ == 0)
+ i = 0;
+ }
Scalar beta = (rho/rho_old) * (alpha / w);
p = r + beta * (p - w * v);
@@ -75,12 +91,16 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
z = precond.solve(s);
t.noalias() = mat * z;
- w = t.dot(s) / t.squaredNorm();
+ RealScalar tmp = t.squaredNorm();
+ if(tmp>RealScalar(0))
+ w = t.dot(s) / tmp;
+ else
+ w = Scalar(0);
x += alpha * y + w * z;
r = s - w * t;
++i;
}
- tol_error = sqrt(r.squaredNorm()/r0_sqnorm);
+ tol_error = sqrt(r.squaredNorm()/rhs_sqnorm);
iters = i;
return true;
}
@@ -223,7 +243,8 @@ public:
template<typename Rhs,typename Dest>
void _solve(const Rhs& b, Dest& x) const
{
- x.setZero();
+// x.setZero();
+ x = b;
_solveWithGuess(b,x);
}
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
index f64f2534d28..a74a8155e68 100644
--- a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -41,15 +41,29 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
int n = mat.cols();
VectorType residual = rhs - mat * x; //initial residual
- VectorType p(n);
+ RealScalar rhsNorm2 = rhs.squaredNorm();
+ if(rhsNorm2 == 0)
+ {
+ x.setZero();
+ iters = 0;
+ tol_error = 0;
+ return;
+ }
+ RealScalar threshold = tol*tol*rhsNorm2;
+ RealScalar residualNorm2 = residual.squaredNorm();
+ if (residualNorm2 < threshold)
+ {
+ iters = 0;
+ tol_error = sqrt(residualNorm2 / rhsNorm2);
+ return;
+ }
+
+ VectorType p(n);
p = precond.solve(residual); //initial search direction
VectorType z(n), tmp(n);
- RealScalar absNew = internal::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
- RealScalar rhsNorm2 = rhs.squaredNorm();
- RealScalar residualNorm2 = 0;
- RealScalar threshold = tol*tol*rhsNorm2;
+ RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
int i = 0;
while(i < maxIters)
{
@@ -66,7 +80,7 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
z = precond.solve(residual); // approximately solve for "A z = residual"
RealScalar absOld = absNew;
- absNew = internal::real(residual.dot(z)); // update the absolute value of r
+ absNew = numext::real(residual.dot(z)); // update the absolute value of r
RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
p = z + beta * p; // update search direction
i++;
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
index 224304f0eb8..b55afc13636 100644
--- a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -10,36 +10,88 @@
#ifndef EIGEN_INCOMPLETE_LUT_H
#define EIGEN_INCOMPLETE_LUT_H
+
namespace Eigen {
-/**
- * \brief Incomplete LU factorization with dual-threshold strategy
- * During the numerical factorization, two dropping rules are used :
- * 1) any element whose magnitude is less than some tolerance is dropped.
- * This tolerance is obtained by multiplying the input tolerance @p droptol
- * by the average magnitude of all the original elements in the current row.
- * 2) After the elimination of the row, only the @p fill largest elements in
- * the L part and the @p fill largest elements in the U part are kept
- * (in addition to the diagonal element ). Note that @p fill is computed from
- * the input parameter @p fillfactor which is used the ratio to control the fill_in
- * relatively to the initial number of nonzero elements.
- *
- * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
- * and when @p fill=n/2 with @p droptol being different to zero.
- *
- * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization,
- * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
- *
- * NOTE : The following implementation is derived from the ILUT implementation
- * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota
- * released under the terms of the GNU LGPL:
- * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
- * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
- * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
- * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
- * alternatively, on GMANE:
- * http://comments.gmane.org/gmane.comp.lib.eigen/3302
- */
+namespace internal {
+
+/** \internal
+ * Compute a quick-sort split of a vector
+ * On output, the vector row is permuted such that its elements satisfy
+ * abs(row(i)) >= abs(row(ncut)) if i<ncut
+ * abs(row(i)) <= abs(row(ncut)) if i>ncut
+ * \param row The vector of values
+ * \param ind The array of index for the elements in @p row
+ * \param ncut The number of largest elements to keep
+ **/
+template <typename VectorV, typename VectorI, typename Index>
+Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
+{
+ typedef typename VectorV::RealScalar RealScalar;
+ using std::swap;
+ using std::abs;
+ Index mid;
+ Index n = row.size(); /* length of the vector */
+ Index first, last ;
+
+ ncut--; /* to fit the zero-based indices */
+ first = 0;
+ last = n-1;
+ if (ncut < first || ncut > last ) return 0;
+
+ do {
+ mid = first;
+ RealScalar abskey = abs(row(mid));
+ for (Index j = first + 1; j <= last; j++) {
+ if ( abs(row(j)) > abskey) {
+ ++mid;
+ swap(row(mid), row(j));
+ swap(ind(mid), ind(j));
+ }
+ }
+ /* Interchange for the pivot element */
+ swap(row(mid), row(first));
+ swap(ind(mid), ind(first));
+
+ if (mid > ncut) last = mid - 1;
+ else if (mid < ncut ) first = mid + 1;
+ } while (mid != ncut );
+
+ return 0; /* mid is equal to ncut */
+}
+
+}// end namespace internal
+
+/** \ingroup IterativeLinearSolvers_Module
+ * \class IncompleteLUT
+ * \brief Incomplete LU factorization with dual-threshold strategy
+ *
+ * During the numerical factorization, two dropping rules are used :
+ * 1) any element whose magnitude is less than some tolerance is dropped.
+ * This tolerance is obtained by multiplying the input tolerance @p droptol
+ * by the average magnitude of all the original elements in the current row.
+ * 2) After the elimination of the row, only the @p fill largest elements in
+ * the L part and the @p fill largest elements in the U part are kept
+ * (in addition to the diagonal element ). Note that @p fill is computed from
+ * the input parameter @p fillfactor which is used the ratio to control the fill_in
+ * relatively to the initial number of nonzero elements.
+ *
+ * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
+ * and when @p fill=n/2 with @p droptol being different to zero.
+ *
+ * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization,
+ * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
+ *
+ * NOTE : The following implementation is derived from the ILUT implementation
+ * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota
+ * released under the terms of the GNU LGPL:
+ * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
+ * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
+ * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
+ * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
+ * alternatively, on GMANE:
+ * http://comments.gmane.org/gmane.comp.lib.eigen/3302
+ */
template <typename _Scalar>
class IncompleteLUT : internal::noncopyable
{
@@ -59,7 +111,7 @@ class IncompleteLUT : internal::noncopyable
{}
template<typename MatrixType>
- IncompleteLUT(const MatrixType& mat, RealScalar droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
+ IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
: m_droptol(droptol),m_fillfactor(fillfactor),
m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false)
{
@@ -98,12 +150,11 @@ class IncompleteLUT : internal::noncopyable
{
analyzePattern(amat);
factorize(amat);
- eigen_assert(m_factorizationIsOk == true);
- m_isInitialized = true;
+ m_isInitialized = m_factorizationIsOk;
return *this;
}
- void setDroptol(RealScalar droptol);
+ void setDroptol(const RealScalar& droptol);
void setFillfactor(int fillfactor);
template<typename Rhs, typename Dest>
@@ -126,10 +177,6 @@ class IncompleteLUT : internal::noncopyable
protected:
- template <typename VectorV, typename VectorI>
- int QuickSplit(VectorV &row, VectorI &ind, int ncut);
-
-
/** keeps off-diagonal entries; drops diagonal entries */
struct keep_diag {
inline bool operator() (const Index& row, const Index& col, const Scalar&) const
@@ -156,7 +203,7 @@ protected:
* \param droptol Drop any element whose magnitude is less than this tolerance
**/
template<typename Scalar>
-void IncompleteLUT<Scalar>::setDroptol(RealScalar droptol)
+void IncompleteLUT<Scalar>::setDroptol(const RealScalar& droptol)
{
this->m_droptol = droptol;
}
@@ -171,51 +218,6 @@ void IncompleteLUT<Scalar>::setFillfactor(int fillfactor)
this->m_fillfactor = fillfactor;
}
-
-/**
- * Compute a quick-sort split of a vector
- * On output, the vector row is permuted such that its elements satisfy
- * abs(row(i)) >= abs(row(ncut)) if i<ncut
- * abs(row(i)) <= abs(row(ncut)) if i>ncut
- * \param row The vector of values
- * \param ind The array of index for the elements in @p row
- * \param ncut The number of largest elements to keep
- **/
-template <typename Scalar>
-template <typename VectorV, typename VectorI>
-int IncompleteLUT<Scalar>::QuickSplit(VectorV &row, VectorI &ind, int ncut)
-{
- using std::swap;
- int mid;
- int n = row.size(); /* length of the vector */
- int first, last ;
-
- ncut--; /* to fit the zero-based indices */
- first = 0;
- last = n-1;
- if (ncut < first || ncut > last ) return 0;
-
- do {
- mid = first;
- RealScalar abskey = std::abs(row(mid));
- for (int j = first + 1; j <= last; j++) {
- if ( std::abs(row(j)) > abskey) {
- ++mid;
- swap(row(mid), row(j));
- swap(ind(mid), ind(j));
- }
- }
- /* Interchange for the pivot element */
- swap(row(mid), row(first));
- swap(ind(mid), ind(first));
-
- if (mid > ncut) last = mid - 1;
- else if (mid < ncut ) first = mid + 1;
- } while (mid != ncut );
-
- return 0; /* mid is equal to ncut */
-}
-
template <typename Scalar>
template<typename _MatrixType>
void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
@@ -244,7 +246,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
using std::abs;
eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
- int n = amat.cols(); // Size of the matrix
+ Index n = amat.cols(); // Size of the matrix
m_lu.resize(n,n);
// Declare Working vectors and variables
Vector u(n) ; // real values of the row -- maximum size is n --
@@ -262,21 +264,21 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
u.fill(0);
// number of largest elements to keep in each row:
- int fill_in = static_cast<int> (amat.nonZeros()*m_fillfactor)/n+1;
+ Index fill_in = static_cast<Index> (amat.nonZeros()*m_fillfactor)/n+1;
if (fill_in > n) fill_in = n;
// number of largest nonzero elements to keep in the L and the U part of the current row:
- int nnzL = fill_in/2;
- int nnzU = nnzL;
+ Index nnzL = fill_in/2;
+ Index nnzU = nnzL;
m_lu.reserve(n * (nnzL + nnzU + 1));
// global loop over the rows of the sparse matrix
- for (int ii = 0; ii < n; ii++)
+ for (Index ii = 0; ii < n; ii++)
{
// 1 - copy the lower and the upper part of the row i of mat in the working vector u
- int sizeu = 1; // number of nonzero elements in the upper part of the current row
- int sizel = 0; // number of nonzero elements in the lower part of the current row
+ Index sizeu = 1; // number of nonzero elements in the upper part of the current row
+ Index sizel = 0; // number of nonzero elements in the lower part of the current row
ju(ii) = ii;
u(ii) = 0;
jr(ii) = ii;
@@ -285,7 +287,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
for (; j_it; ++j_it)
{
- int k = j_it.index();
+ Index k = j_it.index();
if (k < ii)
{
// copy the lower part
@@ -301,13 +303,13 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
else
{
// copy the upper part
- int jpos = ii + sizeu;
+ Index jpos = ii + sizeu;
ju(jpos) = k;
u(jpos) = j_it.value();
jr(k) = jpos;
++sizeu;
}
- rownorm += internal::abs2(j_it.value());
+ rownorm += numext::abs2(j_it.value());
}
// 2 - detect possible zero row
@@ -320,19 +322,19 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
rownorm = sqrt(rownorm);
// 3 - eliminate the previous nonzero rows
- int jj = 0;
- int len = 0;
+ Index jj = 0;
+ Index len = 0;
while (jj < sizel)
{
// In order to eliminate in the correct order,
// we must select first the smallest column index among ju(jj:sizel)
- int k;
- int minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
+ Index k;
+ Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
k += jj;
if (minrow != ju(jj))
{
// swap the two locations
- int j = ju(jj);
+ Index j = ju(jj);
swap(ju(jj), ju(k));
jr(minrow) = jj; jr(j) = k;
swap(u(jj), u(k));
@@ -358,11 +360,11 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
for (; ki_it; ++ki_it)
{
Scalar prod = fact * ki_it.value();
- int j = ki_it.index();
- int jpos = jr(j);
+ Index j = ki_it.index();
+ Index jpos = jr(j);
if (jpos == -1) // fill-in element
{
- int newpos;
+ Index newpos;
if (j >= ii) // dealing with the upper part
{
newpos = ii + sizeu;
@@ -391,7 +393,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
} // end of the elimination on the row ii
// reset the upper part of the pointer jr to zero
- for(int k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
+ for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
// 4 - partially sort and insert the elements in the m_lu matrix
@@ -400,11 +402,11 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
len = (std::min)(sizel, nnzL);
typename Vector::SegmentReturnType ul(u.segment(0, sizel));
typename VectorXi::SegmentReturnType jul(ju.segment(0, sizel));
- QuickSplit(ul, jul, len);
+ internal::QuickSplit(ul, jul, len);
// store the largest m_fill elements of the L part
m_lu.startVec(ii);
- for(int k = 0; k < len; k++)
+ for(Index k = 0; k < len; k++)
m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
// store the diagonal element
@@ -416,7 +418,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
// sort the U-part of the row
// apply the dropping rule first
len = 0;
- for(int k = 1; k < sizeu; k++)
+ for(Index k = 1; k < sizeu; k++)
{
if(abs(u(ii+k)) > m_droptol * rownorm )
{
@@ -429,10 +431,10 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
len = (std::min)(sizeu, nnzU);
typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
typename VectorXi::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
- QuickSplit(uu, juu, len);
+ internal::QuickSplit(uu, juu, len);
// store the largest elements of the U part
- for(int k = ii + 1; k < ii + len; k++)
+ for(Index k = ii + 1; k < ii + len; k++)
m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
}
@@ -463,4 +465,3 @@ struct solve_retval<IncompleteLUT<_MatrixType>, Rhs>
} // end namespace Eigen
#endif // EIGEN_INCOMPLETE_LUT_H
-
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
index 11706cebabd..2036922d69c 100644
--- a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
@@ -120,7 +120,7 @@ public:
RealScalar tolerance() const { return m_tolerance; }
/** Sets the tolerance threshold used by the stopping criteria */
- Derived& setTolerance(RealScalar tolerance)
+ Derived& setTolerance(const RealScalar& tolerance)
{
m_tolerance = tolerance;
return derived();
diff --git a/extern/Eigen3/Eigen/src/Jacobi/Jacobi.h b/extern/Eigen3/Eigen/src/Jacobi/Jacobi.h
index a9c17dcdf19..956f72d5701 100644
--- a/extern/Eigen3/Eigen/src/Jacobi/Jacobi.h
+++ b/extern/Eigen3/Eigen/src/Jacobi/Jacobi.h
@@ -50,19 +50,20 @@ template<typename Scalar> class JacobiRotation
/** Concatenates two planar rotation */
JacobiRotation operator*(const JacobiRotation& other)
{
- return JacobiRotation(m_c * other.m_c - internal::conj(m_s) * other.m_s,
- internal::conj(m_c * internal::conj(other.m_s) + internal::conj(m_s) * internal::conj(other.m_c)));
+ using numext::conj;
+ return JacobiRotation(m_c * other.m_c - conj(m_s) * other.m_s,
+ conj(m_c * conj(other.m_s) + conj(m_s) * conj(other.m_c)));
}
/** Returns the transposed transformation */
- JacobiRotation transpose() const { return JacobiRotation(m_c, -internal::conj(m_s)); }
+ JacobiRotation transpose() const { using numext::conj; return JacobiRotation(m_c, -conj(m_s)); }
/** Returns the adjoint transformation */
- JacobiRotation adjoint() const { return JacobiRotation(internal::conj(m_c), -m_s); }
+ JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); }
template<typename Derived>
bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
- bool makeJacobi(RealScalar x, Scalar y, RealScalar z);
+ bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z);
void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
@@ -79,8 +80,10 @@ template<typename Scalar> class JacobiRotation
* \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
*/
template<typename Scalar>
-bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
+bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z)
{
+ using std::sqrt;
+ using std::abs;
typedef typename NumTraits<Scalar>::Real RealScalar;
if(y == Scalar(0))
{
@@ -90,8 +93,8 @@ bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
}
else
{
- RealScalar tau = (x-z)/(RealScalar(2)*internal::abs(y));
- RealScalar w = internal::sqrt(internal::abs2(tau) + RealScalar(1));
+ RealScalar tau = (x-z)/(RealScalar(2)*abs(y));
+ RealScalar w = sqrt(numext::abs2(tau) + RealScalar(1));
RealScalar t;
if(tau>RealScalar(0))
{
@@ -102,8 +105,8 @@ bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
t = RealScalar(1) / (tau - w);
}
RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
- RealScalar n = RealScalar(1) / internal::sqrt(internal::abs2(t)+RealScalar(1));
- m_s = - sign_t * (internal::conj(y) / internal::abs(y)) * internal::abs(t) * n;
+ RealScalar n = RealScalar(1) / sqrt(numext::abs2(t)+RealScalar(1));
+ m_s = - sign_t * (numext::conj(y) / abs(y)) * abs(t) * n;
m_c = n;
return true;
}
@@ -122,7 +125,7 @@ template<typename Scalar>
template<typename Derived>
inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, typename Derived::Index p, typename Derived::Index q)
{
- return makeJacobi(internal::real(m.coeff(p,p)), m.coeff(p,q), internal::real(m.coeff(q,q)));
+ return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q)));
}
/** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector
@@ -152,52 +155,56 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
template<typename Scalar>
void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type)
{
+ using std::sqrt;
+ using std::abs;
+ using numext::conj;
+
if(q==Scalar(0))
{
- m_c = internal::real(p)<0 ? Scalar(-1) : Scalar(1);
+ m_c = numext::real(p)<0 ? Scalar(-1) : Scalar(1);
m_s = 0;
if(r) *r = m_c * p;
}
else if(p==Scalar(0))
{
m_c = 0;
- m_s = -q/internal::abs(q);
- if(r) *r = internal::abs(q);
+ m_s = -q/abs(q);
+ if(r) *r = abs(q);
}
else
{
- RealScalar p1 = internal::norm1(p);
- RealScalar q1 = internal::norm1(q);
+ RealScalar p1 = numext::norm1(p);
+ RealScalar q1 = numext::norm1(q);
if(p1>=q1)
{
Scalar ps = p / p1;
- RealScalar p2 = internal::abs2(ps);
+ RealScalar p2 = numext::abs2(ps);
Scalar qs = q / p1;
- RealScalar q2 = internal::abs2(qs);
+ RealScalar q2 = numext::abs2(qs);
- RealScalar u = internal::sqrt(RealScalar(1) + q2/p2);
- if(internal::real(p)<RealScalar(0))
+ RealScalar u = sqrt(RealScalar(1) + q2/p2);
+ if(numext::real(p)<RealScalar(0))
u = -u;
m_c = Scalar(1)/u;
- m_s = -qs*internal::conj(ps)*(m_c/p2);
+ m_s = -qs*conj(ps)*(m_c/p2);
if(r) *r = p * u;
}
else
{
Scalar ps = p / q1;
- RealScalar p2 = internal::abs2(ps);
+ RealScalar p2 = numext::abs2(ps);
Scalar qs = q / q1;
- RealScalar q2 = internal::abs2(qs);
+ RealScalar q2 = numext::abs2(qs);
- RealScalar u = q1 * internal::sqrt(p2 + q2);
- if(internal::real(p)<RealScalar(0))
+ RealScalar u = q1 * sqrt(p2 + q2);
+ if(numext::real(p)<RealScalar(0))
u = -u;
- p1 = internal::abs(p);
+ p1 = abs(p);
ps = p/p1;
m_c = p1/u;
- m_s = -internal::conj(ps) * (q/u);
+ m_s = -conj(ps) * (q/u);
if(r) *r = ps * u;
}
}
@@ -207,23 +214,24 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
template<typename Scalar>
void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type)
{
-
+ using std::sqrt;
+ using std::abs;
if(q==Scalar(0))
{
m_c = p<Scalar(0) ? Scalar(-1) : Scalar(1);
m_s = Scalar(0);
- if(r) *r = internal::abs(p);
+ if(r) *r = abs(p);
}
else if(p==Scalar(0))
{
m_c = Scalar(0);
m_s = q<Scalar(0) ? Scalar(1) : Scalar(-1);
- if(r) *r = internal::abs(q);
+ if(r) *r = abs(q);
}
- else if(internal::abs(p) > internal::abs(q))
+ else if(abs(p) > abs(q))
{
Scalar t = q/p;
- Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t));
+ Scalar u = sqrt(Scalar(1) + numext::abs2(t));
if(p<Scalar(0))
u = -u;
m_c = Scalar(1)/u;
@@ -233,7 +241,7 @@ void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar
else
{
Scalar t = p/q;
- Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t));
+ Scalar u = sqrt(Scalar(1) + numext::abs2(t));
if(q<Scalar(0))
u = -u;
m_s = -Scalar(1)/u;
@@ -303,6 +311,11 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0);
Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0);
+
+ OtherScalar c = j.c();
+ OtherScalar s = j.s();
+ if (c==OtherScalar(1) && s==OtherScalar(0))
+ return;
/*** dynamic-size vectorized paths ***/
@@ -316,16 +329,16 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
Index alignedStart = internal::first_aligned(y, size);
Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
- const Packet pc = pset1<Packet>(j.c());
- const Packet ps = pset1<Packet>(j.s());
+ const Packet pc = pset1<Packet>(c);
+ const Packet ps = pset1<Packet>(s);
conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
for(Index i=0; i<alignedStart; ++i)
{
Scalar xi = x[i];
Scalar yi = y[i];
- x[i] = j.c() * xi + conj(j.s()) * yi;
- y[i] = -j.s() * xi + conj(j.c()) * yi;
+ x[i] = c * xi + numext::conj(s) * yi;
+ y[i] = -s * xi + numext::conj(c) * yi;
}
Scalar* EIGEN_RESTRICT px = x + alignedStart;
@@ -372,8 +385,8 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
{
Scalar xi = x[i];
Scalar yi = y[i];
- x[i] = j.c() * xi + conj(j.s()) * yi;
- y[i] = -j.s() * xi + conj(j.c()) * yi;
+ x[i] = c * xi + numext::conj(s) * yi;
+ y[i] = -s * xi + numext::conj(c) * yi;
}
}
@@ -382,8 +395,8 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
(VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
(VectorX::Flags & VectorY::Flags & AlignedBit))
{
- const Packet pc = pset1<Packet>(j.c());
- const Packet ps = pset1<Packet>(j.s());
+ const Packet pc = pset1<Packet>(c);
+ const Packet ps = pset1<Packet>(s);
conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
Scalar* EIGEN_RESTRICT px = x;
Scalar* EIGEN_RESTRICT py = y;
@@ -405,8 +418,8 @@ void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y,
{
Scalar xi = *x;
Scalar yi = *y;
- *x = j.c() * xi + conj(j.s()) * yi;
- *y = -j.s() * xi + conj(j.c()) * yi;
+ *x = c * xi + numext::conj(s) * yi;
+ *y = -s * xi + numext::conj(c) * yi;
x += incrx;
y += incry;
}
diff --git a/extern/Eigen3/Eigen/src/LU/Determinant.h b/extern/Eigen3/Eigen/src/LU/Determinant.h
index d862c5d7784..bb8e78a8a8a 100644
--- a/extern/Eigen3/Eigen/src/LU/Determinant.h
+++ b/extern/Eigen3/Eigen/src/LU/Determinant.h
@@ -91,7 +91,7 @@ template<typename Derived> struct determinant_impl<Derived, 4>
template<typename Derived>
inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
{
- assert(rows() == cols());
+ eigen_assert(rows() == cols());
typedef typename internal::nested<Derived,Base::RowsAtCompileTime>::type Nested;
return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived());
}
diff --git a/extern/Eigen3/Eigen/src/LU/FullPivLU.h b/extern/Eigen3/Eigen/src/LU/FullPivLU.h
index e23f96cdcf1..dfe25f424d7 100644
--- a/extern/Eigen3/Eigen/src/LU/FullPivLU.h
+++ b/extern/Eigen3/Eigen/src/LU/FullPivLU.h
@@ -293,11 +293,12 @@ template<typename _MatrixType> class FullPivLU
*/
inline Index rank() const
{
+ using std::abs;
eigen_assert(m_isInitialized && "LU is not initialized.");
- RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold();
+ RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
Index result = 0;
for(Index i = 0; i < m_nonzero_pivots; ++i)
- result += (internal::abs(m_lu.coeff(i,i)) > premultiplied_threshold);
+ result += (abs(m_lu.coeff(i,i)) > premultiplied_threshold);
return result;
}
@@ -416,6 +417,9 @@ FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix)
template<typename MatrixType>
FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
{
+ // the permutations are stored as int indices, so just to be sure:
+ eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
+
m_isInitialized = true;
m_lu = matrix;
@@ -547,6 +551,7 @@ struct kernel_retval<FullPivLU<_MatrixType> >
template<typename Dest> void evalTo(Dest& dst) const
{
+ using std::abs;
const Index cols = dec().matrixLU().cols(), dimker = cols - rank();
if(dimker == 0)
{
@@ -632,6 +637,7 @@ struct image_retval<FullPivLU<_MatrixType> >
template<typename Dest> void evalTo(Dest& dst) const
{
+ using std::abs;
if(rank() == 0)
{
// The Image is just {0}, so it doesn't have a basis properly speaking, but let's
diff --git a/extern/Eigen3/Eigen/src/LU/Inverse.h b/extern/Eigen3/Eigen/src/LU/Inverse.h
index 39b8cdbc8dc..3cf8871932f 100644
--- a/extern/Eigen3/Eigen/src/LU/Inverse.h
+++ b/extern/Eigen3/Eigen/src/LU/Inverse.h
@@ -55,6 +55,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 1>
bool& invertible
)
{
+ using std::abs;
determinant = matrix.coeff(0,0);
invertible = abs(determinant) > absDeterminantThreshold;
if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant;
@@ -98,6 +99,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 2>
bool& invertible
)
{
+ using std::abs;
typedef typename ResultType::Scalar Scalar;
determinant = matrix.determinant();
invertible = abs(determinant) > absDeterminantThreshold;
@@ -167,6 +169,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 3>
bool& invertible
)
{
+ using std::abs;
typedef typename ResultType::Scalar Scalar;
Matrix<Scalar,3,1> cofactors_col0;
cofactors_col0.coeffRef(0) = cofactor_3x3<MatrixType,0,0>(matrix);
@@ -251,6 +254,7 @@ struct compute_inverse_and_det_with_check<MatrixType, ResultType, 4>
bool& invertible
)
{
+ using std::abs;
determinant = matrix.determinant();
invertible = abs(determinant) > absDeterminantThreshold;
if(invertible) compute_inverse<MatrixType, ResultType>::run(matrix, inverse);
@@ -327,7 +331,7 @@ inline const internal::inverse_impl<Derived> MatrixBase<Derived>::inverse() cons
* This is only for fixed-size square matrices of size up to 4x4.
*
* \param inverse Reference to the matrix in which to store the inverse.
- * \param determinant Reference to the variable in which to store the inverse.
+ * \param determinant Reference to the variable in which to store the determinant.
* \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
* \param absDeterminantThreshold Optional parameter controlling the invertibility check.
* The matrix will be declared invertible if the absolute value of its
diff --git a/extern/Eigen3/Eigen/src/LU/PartialPivLU.h b/extern/Eigen3/Eigen/src/LU/PartialPivLU.h
index c9ff9dd5a36..740ee694c45 100644
--- a/extern/Eigen3/Eigen/src/LU/PartialPivLU.h
+++ b/extern/Eigen3/Eigen/src/LU/PartialPivLU.h
@@ -242,7 +242,7 @@ struct partial_lu_impl
const Index cols = lu.cols();
const Index size = (std::min)(rows,cols);
nb_transpositions = 0;
- int first_zero_pivot = -1;
+ Index first_zero_pivot = -1;
for(Index k = 0; k < size; ++k)
{
Index rrows = rows-k-1;
@@ -253,7 +253,7 @@ struct partial_lu_impl
= lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col);
row_of_biggest_in_col += k;
- row_transpositions[k] = row_of_biggest_in_col;
+ row_transpositions[k] = PivIndex(row_of_biggest_in_col);
if(biggest_in_corner != RealScalar(0))
{
@@ -318,7 +318,7 @@ struct partial_lu_impl
}
nb_transpositions = 0;
- int first_zero_pivot = -1;
+ Index first_zero_pivot = -1;
for(Index k = 0; k < size; k+=blockSize)
{
Index bs = (std::min)(size-k,blockSize); // actual size of the block
@@ -386,6 +386,9 @@ void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, t
template<typename MatrixType>
PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
{
+ // the row permutation is stored as int indices, so just to be sure:
+ eigen_assert(matrix.rows()<NumTraits<int>::highest());
+
m_lu = matrix;
eigen_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
diff --git a/extern/Eigen3/Eigen/src/MetisSupport/MetisSupport.h b/extern/Eigen3/Eigen/src/MetisSupport/MetisSupport.h
new file mode 100644
index 00000000000..f2bbef20c8f
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/MetisSupport/MetisSupport.h
@@ -0,0 +1,137 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#ifndef METIS_SUPPORT_H
+#define METIS_SUPPORT_H
+
+namespace Eigen {
+/**
+ * Get the fill-reducing ordering from the METIS package
+ *
+ * If A is the original matrix and Ap is the permuted matrix,
+ * the fill-reducing permutation is defined as follows :
+ * Row (column) i of A is the matperm(i) row (column) of Ap.
+ * WARNING: As computed by METIS, this corresponds to the vector iperm (instead of perm)
+ */
+template <typename Index>
+class MetisOrdering
+{
+public:
+ typedef PermutationMatrix<Dynamic,Dynamic,Index> PermutationType;
+ typedef Matrix<Index,Dynamic,1> IndexVector;
+
+ template <typename MatrixType>
+ void get_symmetrized_graph(const MatrixType& A)
+ {
+ Index m = A.cols();
+ eigen_assert((A.rows() == A.cols()) && "ONLY FOR SQUARED MATRICES");
+ // Get the transpose of the input matrix
+ MatrixType At = A.transpose();
+ // Get the number of nonzeros elements in each row/col of At+A
+ Index TotNz = 0;
+ IndexVector visited(m);
+ visited.setConstant(-1);
+ for (int j = 0; j < m; j++)
+ {
+ // Compute the union structure of of A(j,:) and At(j,:)
+ visited(j) = j; // Do not include the diagonal element
+ // Get the nonzeros in row/column j of A
+ for (typename MatrixType::InnerIterator it(A, j); it; ++it)
+ {
+ Index idx = it.index(); // Get the row index (for column major) or column index (for row major)
+ if (visited(idx) != j )
+ {
+ visited(idx) = j;
+ ++TotNz;
+ }
+ }
+ //Get the nonzeros in row/column j of At
+ for (typename MatrixType::InnerIterator it(At, j); it; ++it)
+ {
+ Index idx = it.index();
+ if(visited(idx) != j)
+ {
+ visited(idx) = j;
+ ++TotNz;
+ }
+ }
+ }
+ // Reserve place for A + At
+ m_indexPtr.resize(m+1);
+ m_innerIndices.resize(TotNz);
+
+ // Now compute the real adjacency list of each column/row
+ visited.setConstant(-1);
+ Index CurNz = 0;
+ for (int j = 0; j < m; j++)
+ {
+ m_indexPtr(j) = CurNz;
+
+ visited(j) = j; // Do not include the diagonal element
+ // Add the pattern of row/column j of A to A+At
+ for (typename MatrixType::InnerIterator it(A,j); it; ++it)
+ {
+ Index idx = it.index(); // Get the row index (for column major) or column index (for row major)
+ if (visited(idx) != j )
+ {
+ visited(idx) = j;
+ m_innerIndices(CurNz) = idx;
+ CurNz++;
+ }
+ }
+ //Add the pattern of row/column j of At to A+At
+ for (typename MatrixType::InnerIterator it(At, j); it; ++it)
+ {
+ Index idx = it.index();
+ if(visited(idx) != j)
+ {
+ visited(idx) = j;
+ m_innerIndices(CurNz) = idx;
+ ++CurNz;
+ }
+ }
+ }
+ m_indexPtr(m) = CurNz;
+ }
+
+ template <typename MatrixType>
+ void operator() (const MatrixType& A, PermutationType& matperm)
+ {
+ Index m = A.cols();
+ IndexVector perm(m),iperm(m);
+ // First, symmetrize the matrix graph.
+ get_symmetrized_graph(A);
+ int output_error;
+
+ // Call the fill-reducing routine from METIS
+ output_error = METIS_NodeND(&m, m_indexPtr.data(), m_innerIndices.data(), NULL, NULL, perm.data(), iperm.data());
+
+ if(output_error != METIS_OK)
+ {
+ //FIXME The ordering interface should define a class of possible errors
+ std::cerr << "ERROR WHILE CALLING THE METIS PACKAGE \n";
+ return;
+ }
+
+ // Get the fill-reducing permutation
+ //NOTE: If Ap is the permuted matrix then perm and iperm vectors are defined as follows
+ // Row (column) i of Ap is the perm(i) row(column) of A, and row (column) i of A is the iperm(i) row(column) of Ap
+
+ matperm.resize(m);
+ for (int j = 0; j < m; j++)
+ matperm.indices()(iperm(j)) = j;
+
+ }
+
+ protected:
+ IndexVector m_indexPtr; // Pointer to the adjacenccy list of each row/column
+ IndexVector m_innerIndices; // Adjacency list
+};
+
+}// end namespace eigen
+#endif
diff --git a/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h b/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h
index ce04852b872..41b4fd7e392 100644
--- a/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h
+++ b/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h
@@ -2,10 +2,6 @@
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
@@ -86,6 +82,7 @@ Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Ind
/** \internal
+ * \ingroup OrderingMethods_Module
* Approximate minimum degree ordering algorithm.
* \returns the permutation P reducing the fill-in of the input matrix \a C
* The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional.
@@ -94,7 +91,6 @@ template<typename Scalar, typename Index>
void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, PermutationMatrix<Dynamic,Dynamic,Index>& perm)
{
using std::sqrt;
- typedef SparseMatrix<Scalar,ColMajor,Index> CCS;
int d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
diff --git a/extern/Eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h b/extern/Eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h
new file mode 100644
index 00000000000..44548f6607c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/OrderingMethods/Eigen_Colamd.h
@@ -0,0 +1,1850 @@
+// // This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Desire Nuentsa Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// This file is modified from the colamd/symamd library. The copyright is below
+
+// The authors of the code itself are Stefan I. Larimore and Timothy A.
+// Davis (davis@cise.ufl.edu), University of Florida. The algorithm was
+// developed in collaboration with John Gilbert, Xerox PARC, and Esmond
+// Ng, Oak Ridge National Laboratory.
+//
+// Date:
+//
+// September 8, 2003. Version 2.3.
+//
+// Acknowledgements:
+//
+// This work was supported by the National Science Foundation, under
+// grants DMS-9504974 and DMS-9803599.
+//
+// Notice:
+//
+// Copyright (c) 1998-2003 by the University of Florida.
+// All Rights Reserved.
+//
+// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+//
+// Permission is hereby granted to use, copy, modify, and/or distribute
+// this program, provided that the Copyright, this License, and the
+// Availability of the original version is retained on all copies and made
+// accessible to the end-user of any code or package that includes COLAMD
+// or any modified version of COLAMD.
+//
+// Availability:
+//
+// The colamd/symamd library is available at
+//
+// http://www.cise.ufl.edu/research/sparse/colamd/
+
+// This is the http://www.cise.ufl.edu/research/sparse/colamd/colamd.h
+// file. It is required by the colamd.c, colamdmex.c, and symamdmex.c
+// files, and by any C code that calls the routines whose prototypes are
+// listed below, or that uses the colamd/symamd definitions listed below.
+
+#ifndef EIGEN_COLAMD_H
+#define EIGEN_COLAMD_H
+
+namespace internal {
+/* Ensure that debugging is turned off: */
+#ifndef COLAMD_NDEBUG
+#define COLAMD_NDEBUG
+#endif /* NDEBUG */
+/* ========================================================================== */
+/* === Knob and statistics definitions ====================================== */
+/* ========================================================================== */
+
+/* size of the knobs [ ] array. Only knobs [0..1] are currently used. */
+#define COLAMD_KNOBS 20
+
+/* number of output statistics. Only stats [0..6] are currently used. */
+#define COLAMD_STATS 20
+
+/* knobs [0] and stats [0]: dense row knob and output statistic. */
+#define COLAMD_DENSE_ROW 0
+
+/* knobs [1] and stats [1]: dense column knob and output statistic. */
+#define COLAMD_DENSE_COL 1
+
+/* stats [2]: memory defragmentation count output statistic */
+#define COLAMD_DEFRAG_COUNT 2
+
+/* stats [3]: colamd status: zero OK, > 0 warning or notice, < 0 error */
+#define COLAMD_STATUS 3
+
+/* stats [4..6]: error info, or info on jumbled columns */
+#define COLAMD_INFO1 4
+#define COLAMD_INFO2 5
+#define COLAMD_INFO3 6
+
+/* error codes returned in stats [3]: */
+#define COLAMD_OK (0)
+#define COLAMD_OK_BUT_JUMBLED (1)
+#define COLAMD_ERROR_A_not_present (-1)
+#define COLAMD_ERROR_p_not_present (-2)
+#define COLAMD_ERROR_nrow_negative (-3)
+#define COLAMD_ERROR_ncol_negative (-4)
+#define COLAMD_ERROR_nnz_negative (-5)
+#define COLAMD_ERROR_p0_nonzero (-6)
+#define COLAMD_ERROR_A_too_small (-7)
+#define COLAMD_ERROR_col_length_negative (-8)
+#define COLAMD_ERROR_row_index_out_of_bounds (-9)
+#define COLAMD_ERROR_out_of_memory (-10)
+#define COLAMD_ERROR_internal_error (-999)
+
+/* ========================================================================== */
+/* === Definitions ========================================================== */
+/* ========================================================================== */
+
+#define COLAMD_MAX(a,b) (((a) > (b)) ? (a) : (b))
+#define COLAMD_MIN(a,b) (((a) < (b)) ? (a) : (b))
+
+#define ONES_COMPLEMENT(r) (-(r)-1)
+
+/* -------------------------------------------------------------------------- */
+
+#define COLAMD_EMPTY (-1)
+
+/* Row and column status */
+#define ALIVE (0)
+#define DEAD (-1)
+
+/* Column status */
+#define DEAD_PRINCIPAL (-1)
+#define DEAD_NON_PRINCIPAL (-2)
+
+/* Macros for row and column status update and checking. */
+#define ROW_IS_DEAD(r) ROW_IS_MARKED_DEAD (Row[r].shared2.mark)
+#define ROW_IS_MARKED_DEAD(row_mark) (row_mark < ALIVE)
+#define ROW_IS_ALIVE(r) (Row [r].shared2.mark >= ALIVE)
+#define COL_IS_DEAD(c) (Col [c].start < ALIVE)
+#define COL_IS_ALIVE(c) (Col [c].start >= ALIVE)
+#define COL_IS_DEAD_PRINCIPAL(c) (Col [c].start == DEAD_PRINCIPAL)
+#define KILL_ROW(r) { Row [r].shared2.mark = DEAD ; }
+#define KILL_PRINCIPAL_COL(c) { Col [c].start = DEAD_PRINCIPAL ; }
+#define KILL_NON_PRINCIPAL_COL(c) { Col [c].start = DEAD_NON_PRINCIPAL ; }
+
+/* ========================================================================== */
+/* === Colamd reporting mechanism =========================================== */
+/* ========================================================================== */
+
+// == Row and Column structures ==
+template <typename Index>
+struct colamd_col
+{
+ Index start ; /* index for A of first row in this column, or DEAD */
+ /* if column is dead */
+ Index length ; /* number of rows in this column */
+ union
+ {
+ Index thickness ; /* number of original columns represented by this */
+ /* col, if the column is alive */
+ Index parent ; /* parent in parent tree super-column structure, if */
+ /* the column is dead */
+ } shared1 ;
+ union
+ {
+ Index score ; /* the score used to maintain heap, if col is alive */
+ Index order ; /* pivot ordering of this column, if col is dead */
+ } shared2 ;
+ union
+ {
+ Index headhash ; /* head of a hash bucket, if col is at the head of */
+ /* a degree list */
+ Index hash ; /* hash value, if col is not in a degree list */
+ Index prev ; /* previous column in degree list, if col is in a */
+ /* degree list (but not at the head of a degree list) */
+ } shared3 ;
+ union
+ {
+ Index degree_next ; /* next column, if col is in a degree list */
+ Index hash_next ; /* next column, if col is in a hash list */
+ } shared4 ;
+
+};
+
+template <typename Index>
+struct Colamd_Row
+{
+ Index start ; /* index for A of first col in this row */
+ Index length ; /* number of principal columns in this row */
+ union
+ {
+ Index degree ; /* number of principal & non-principal columns in row */
+ Index p ; /* used as a row pointer in init_rows_cols () */
+ } shared1 ;
+ union
+ {
+ Index mark ; /* for computing set differences and marking dead rows*/
+ Index first_column ;/* first column in row (used in garbage collection) */
+ } shared2 ;
+
+};
+
+/* ========================================================================== */
+/* === Colamd recommended memory size ======================================= */
+/* ========================================================================== */
+
+/*
+ The recommended length Alen of the array A passed to colamd is given by
+ the COLAMD_RECOMMENDED (nnz, n_row, n_col) macro. It returns -1 if any
+ argument is negative. 2*nnz space is required for the row and column
+ indices of the matrix. colamd_c (n_col) + colamd_r (n_row) space is
+ required for the Col and Row arrays, respectively, which are internal to
+ colamd. An additional n_col space is the minimal amount of "elbow room",
+ and nnz/5 more space is recommended for run time efficiency.
+
+ This macro is not needed when using symamd.
+
+ Explicit typecast to Index added Sept. 23, 2002, COLAMD version 2.2, to avoid
+ gcc -pedantic warning messages.
+*/
+template <typename Index>
+inline Index colamd_c(Index n_col)
+{ return Index( ((n_col) + 1) * sizeof (colamd_col<Index>) / sizeof (Index) ) ; }
+
+template <typename Index>
+inline Index colamd_r(Index n_row)
+{ return Index(((n_row) + 1) * sizeof (Colamd_Row<Index>) / sizeof (Index)); }
+
+// Prototypes of non-user callable routines
+template <typename Index>
+static Index init_rows_cols (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> col [], Index A [], Index p [], Index stats[COLAMD_STATS] );
+
+template <typename Index>
+static void init_scoring (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], double knobs[COLAMD_KNOBS], Index *p_n_row2, Index *p_n_col2, Index *p_max_deg);
+
+template <typename Index>
+static Index find_ordering (Index n_row, Index n_col, Index Alen, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], Index n_col2, Index max_deg, Index pfree);
+
+template <typename Index>
+static void order_children (Index n_col, colamd_col<Index> Col [], Index p []);
+
+template <typename Index>
+static void detect_super_cols (colamd_col<Index> Col [], Index A [], Index head [], Index row_start, Index row_length ) ;
+
+template <typename Index>
+static Index garbage_collection (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index *pfree) ;
+
+template <typename Index>
+static inline Index clear_mark (Index n_row, Colamd_Row<Index> Row [] ) ;
+
+/* === No debugging ========================================================= */
+
+#define COLAMD_DEBUG0(params) ;
+#define COLAMD_DEBUG1(params) ;
+#define COLAMD_DEBUG2(params) ;
+#define COLAMD_DEBUG3(params) ;
+#define COLAMD_DEBUG4(params) ;
+
+#define COLAMD_ASSERT(expression) ((void) 0)
+
+
+/**
+ * \brief Returns the recommended value of Alen
+ *
+ * Returns recommended value of Alen for use by colamd.
+ * Returns -1 if any input argument is negative.
+ * The use of this routine or macro is optional.
+ * Note that the macro uses its arguments more than once,
+ * so be careful for side effects, if you pass expressions as arguments to COLAMD_RECOMMENDED.
+ *
+ * \param nnz nonzeros in A
+ * \param n_row number of rows in A
+ * \param n_col number of columns in A
+ * \return recommended value of Alen for use by colamd
+ */
+template <typename Index>
+inline Index colamd_recommended ( Index nnz, Index n_row, Index n_col)
+{
+ if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0)
+ return (-1);
+ else
+ return (2 * (nnz) + colamd_c (n_col) + colamd_r (n_row) + (n_col) + ((nnz) / 5));
+}
+
+/**
+ * \brief set default parameters The use of this routine is optional.
+ *
+ * Colamd: rows with more than (knobs [COLAMD_DENSE_ROW] * n_col)
+ * entries are removed prior to ordering. Columns with more than
+ * (knobs [COLAMD_DENSE_COL] * n_row) entries are removed prior to
+ * ordering, and placed last in the output column ordering.
+ *
+ * COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1,
+ * respectively, in colamd.h. Default values of these two knobs
+ * are both 0.5. Currently, only knobs [0] and knobs [1] are
+ * used, but future versions may use more knobs. If so, they will
+ * be properly set to their defaults by the future version of
+ * colamd_set_defaults, so that the code that calls colamd will
+ * not need to change, assuming that you either use
+ * colamd_set_defaults, or pass a (double *) NULL pointer as the
+ * knobs array to colamd or symamd.
+ *
+ * \param knobs parameter settings for colamd
+ */
+
+static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS])
+{
+ /* === Local variables ================================================== */
+
+ int i ;
+
+ if (!knobs)
+ {
+ return ; /* no knobs to initialize */
+ }
+ for (i = 0 ; i < COLAMD_KNOBS ; i++)
+ {
+ knobs [i] = 0 ;
+ }
+ knobs [COLAMD_DENSE_ROW] = 0.5 ; /* ignore rows over 50% dense */
+ knobs [COLAMD_DENSE_COL] = 0.5 ; /* ignore columns over 50% dense */
+}
+
+/**
+ * \brief Computes a column ordering using the column approximate minimum degree ordering
+ *
+ * Computes a column ordering (Q) of A such that P(AQ)=LU or
+ * (AQ)'AQ=LL' have less fill-in and require fewer floating point
+ * operations than factorizing the unpermuted matrix A or A'A,
+ * respectively.
+ *
+ *
+ * \param n_row number of rows in A
+ * \param n_col number of columns in A
+ * \param Alen, size of the array A
+ * \param A row indices of the matrix, of size ALen
+ * \param p column pointers of A, of size n_col+1
+ * \param knobs parameter settings for colamd
+ * \param stats colamd output statistics and error codes
+ */
+template <typename Index>
+static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, double knobs[COLAMD_KNOBS], Index stats[COLAMD_STATS])
+{
+ /* === Local variables ================================================== */
+
+ Index i ; /* loop index */
+ Index nnz ; /* nonzeros in A */
+ Index Row_size ; /* size of Row [], in integers */
+ Index Col_size ; /* size of Col [], in integers */
+ Index need ; /* minimum required length of A */
+ Colamd_Row<Index> *Row ; /* pointer into A of Row [0..n_row] array */
+ colamd_col<Index> *Col ; /* pointer into A of Col [0..n_col] array */
+ Index n_col2 ; /* number of non-dense, non-empty columns */
+ Index n_row2 ; /* number of non-dense, non-empty rows */
+ Index ngarbage ; /* number of garbage collections performed */
+ Index max_deg ; /* maximum row degree */
+ double default_knobs [COLAMD_KNOBS] ; /* default knobs array */
+
+
+ /* === Check the input arguments ======================================== */
+
+ if (!stats)
+ {
+ COLAMD_DEBUG0 (("colamd: stats not present\n")) ;
+ return (false) ;
+ }
+ for (i = 0 ; i < COLAMD_STATS ; i++)
+ {
+ stats [i] = 0 ;
+ }
+ stats [COLAMD_STATUS] = COLAMD_OK ;
+ stats [COLAMD_INFO1] = -1 ;
+ stats [COLAMD_INFO2] = -1 ;
+
+ if (!A) /* A is not present */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ;
+ COLAMD_DEBUG0 (("colamd: A not present\n")) ;
+ return (false) ;
+ }
+
+ if (!p) /* p is not present */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ;
+ COLAMD_DEBUG0 (("colamd: p not present\n")) ;
+ return (false) ;
+ }
+
+ if (n_row < 0) /* n_row must be >= 0 */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_nrow_negative ;
+ stats [COLAMD_INFO1] = n_row ;
+ COLAMD_DEBUG0 (("colamd: nrow negative %d\n", n_row)) ;
+ return (false) ;
+ }
+
+ if (n_col < 0) /* n_col must be >= 0 */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ;
+ stats [COLAMD_INFO1] = n_col ;
+ COLAMD_DEBUG0 (("colamd: ncol negative %d\n", n_col)) ;
+ return (false) ;
+ }
+
+ nnz = p [n_col] ;
+ if (nnz < 0) /* nnz must be >= 0 */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ;
+ stats [COLAMD_INFO1] = nnz ;
+ COLAMD_DEBUG0 (("colamd: number of entries negative %d\n", nnz)) ;
+ return (false) ;
+ }
+
+ if (p [0] != 0)
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ;
+ stats [COLAMD_INFO1] = p [0] ;
+ COLAMD_DEBUG0 (("colamd: p[0] not zero %d\n", p [0])) ;
+ return (false) ;
+ }
+
+ /* === If no knobs, set default knobs =================================== */
+
+ if (!knobs)
+ {
+ colamd_set_defaults (default_knobs) ;
+ knobs = default_knobs ;
+ }
+
+ /* === Allocate the Row and Col arrays from array A ===================== */
+
+ Col_size = colamd_c (n_col) ;
+ Row_size = colamd_r (n_row) ;
+ need = 2*nnz + n_col + Col_size + Row_size ;
+
+ if (need > Alen)
+ {
+ /* not enough space in array A to perform the ordering */
+ stats [COLAMD_STATUS] = COLAMD_ERROR_A_too_small ;
+ stats [COLAMD_INFO1] = need ;
+ stats [COLAMD_INFO2] = Alen ;
+ COLAMD_DEBUG0 (("colamd: Need Alen >= %d, given only Alen = %d\n", need,Alen));
+ return (false) ;
+ }
+
+ Alen -= Col_size + Row_size ;
+ Col = (colamd_col<Index> *) &A [Alen] ;
+ Row = (Colamd_Row<Index> *) &A [Alen + Col_size] ;
+
+ /* === Construct the row and column data structures ===================== */
+
+ if (!Eigen::internal::init_rows_cols (n_row, n_col, Row, Col, A, p, stats))
+ {
+ /* input matrix is invalid */
+ COLAMD_DEBUG0 (("colamd: Matrix invalid\n")) ;
+ return (false) ;
+ }
+
+ /* === Initialize scores, kill dense rows/columns ======================= */
+
+ Eigen::internal::init_scoring (n_row, n_col, Row, Col, A, p, knobs,
+ &n_row2, &n_col2, &max_deg) ;
+
+ /* === Order the supercolumns =========================================== */
+
+ ngarbage = Eigen::internal::find_ordering (n_row, n_col, Alen, Row, Col, A, p,
+ n_col2, max_deg, 2*nnz) ;
+
+ /* === Order the non-principal columns ================================== */
+
+ Eigen::internal::order_children (n_col, Col, p) ;
+
+ /* === Return statistics in stats ======================================= */
+
+ stats [COLAMD_DENSE_ROW] = n_row - n_row2 ;
+ stats [COLAMD_DENSE_COL] = n_col - n_col2 ;
+ stats [COLAMD_DEFRAG_COUNT] = ngarbage ;
+ COLAMD_DEBUG0 (("colamd: done.\n")) ;
+ return (true) ;
+}
+
+/* ========================================================================== */
+/* === NON-USER-CALLABLE ROUTINES: ========================================== */
+/* ========================================================================== */
+
+/* There are no user-callable routines beyond this point in the file */
+
+
+/* ========================================================================== */
+/* === init_rows_cols ======================================================= */
+/* ========================================================================== */
+
+/*
+ Takes the column form of the matrix in A and creates the row form of the
+ matrix. Also, row and column attributes are stored in the Col and Row
+ structs. If the columns are un-sorted or contain duplicate row indices,
+ this routine will also sort and remove duplicate row indices from the
+ column form of the matrix. Returns false if the matrix is invalid,
+ true otherwise. Not user-callable.
+*/
+template <typename Index>
+static Index init_rows_cols /* returns true if OK, or false otherwise */
+ (
+ /* === Parameters ======================================================= */
+
+ Index n_row, /* number of rows of A */
+ Index n_col, /* number of columns of A */
+ Colamd_Row<Index> Row [], /* of size n_row+1 */
+ colamd_col<Index> Col [], /* of size n_col+1 */
+ Index A [], /* row indices of A, of size Alen */
+ Index p [], /* pointers to columns in A, of size n_col+1 */
+ Index stats [COLAMD_STATS] /* colamd statistics */
+ )
+{
+ /* === Local variables ================================================== */
+
+ Index col ; /* a column index */
+ Index row ; /* a row index */
+ Index *cp ; /* a column pointer */
+ Index *cp_end ; /* a pointer to the end of a column */
+ Index *rp ; /* a row pointer */
+ Index *rp_end ; /* a pointer to the end of a row */
+ Index last_row ; /* previous row */
+
+ /* === Initialize columns, and check column pointers ==================== */
+
+ for (col = 0 ; col < n_col ; col++)
+ {
+ Col [col].start = p [col] ;
+ Col [col].length = p [col+1] - p [col] ;
+
+ if (Col [col].length < 0)
+ {
+ /* column pointers must be non-decreasing */
+ stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;
+ stats [COLAMD_INFO1] = col ;
+ stats [COLAMD_INFO2] = Col [col].length ;
+ COLAMD_DEBUG0 (("colamd: col %d length %d < 0\n", col, Col [col].length)) ;
+ return (false) ;
+ }
+
+ Col [col].shared1.thickness = 1 ;
+ Col [col].shared2.score = 0 ;
+ Col [col].shared3.prev = COLAMD_EMPTY ;
+ Col [col].shared4.degree_next = COLAMD_EMPTY ;
+ }
+
+ /* p [0..n_col] no longer needed, used as "head" in subsequent routines */
+
+ /* === Scan columns, compute row degrees, and check row indices ========= */
+
+ stats [COLAMD_INFO3] = 0 ; /* number of duplicate or unsorted row indices*/
+
+ for (row = 0 ; row < n_row ; row++)
+ {
+ Row [row].length = 0 ;
+ Row [row].shared2.mark = -1 ;
+ }
+
+ for (col = 0 ; col < n_col ; col++)
+ {
+ last_row = -1 ;
+
+ cp = &A [p [col]] ;
+ cp_end = &A [p [col+1]] ;
+
+ while (cp < cp_end)
+ {
+ row = *cp++ ;
+
+ /* make sure row indices within range */
+ if (row < 0 || row >= n_row)
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ;
+ stats [COLAMD_INFO1] = col ;
+ stats [COLAMD_INFO2] = row ;
+ stats [COLAMD_INFO3] = n_row ;
+ COLAMD_DEBUG0 (("colamd: row %d col %d out of bounds\n", row, col)) ;
+ return (false) ;
+ }
+
+ if (row <= last_row || Row [row].shared2.mark == col)
+ {
+ /* row index are unsorted or repeated (or both), thus col */
+ /* is jumbled. This is a notice, not an error condition. */
+ stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ;
+ stats [COLAMD_INFO1] = col ;
+ stats [COLAMD_INFO2] = row ;
+ (stats [COLAMD_INFO3]) ++ ;
+ COLAMD_DEBUG1 (("colamd: row %d col %d unsorted/duplicate\n",row,col));
+ }
+
+ if (Row [row].shared2.mark != col)
+ {
+ Row [row].length++ ;
+ }
+ else
+ {
+ /* this is a repeated entry in the column, */
+ /* it will be removed */
+ Col [col].length-- ;
+ }
+
+ /* mark the row as having been seen in this column */
+ Row [row].shared2.mark = col ;
+
+ last_row = row ;
+ }
+ }
+
+ /* === Compute row pointers ============================================= */
+
+ /* row form of the matrix starts directly after the column */
+ /* form of matrix in A */
+ Row [0].start = p [n_col] ;
+ Row [0].shared1.p = Row [0].start ;
+ Row [0].shared2.mark = -1 ;
+ for (row = 1 ; row < n_row ; row++)
+ {
+ Row [row].start = Row [row-1].start + Row [row-1].length ;
+ Row [row].shared1.p = Row [row].start ;
+ Row [row].shared2.mark = -1 ;
+ }
+
+ /* === Create row form ================================================== */
+
+ if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
+ {
+ /* if cols jumbled, watch for repeated row indices */
+ for (col = 0 ; col < n_col ; col++)
+ {
+ cp = &A [p [col]] ;
+ cp_end = &A [p [col+1]] ;
+ while (cp < cp_end)
+ {
+ row = *cp++ ;
+ if (Row [row].shared2.mark != col)
+ {
+ A [(Row [row].shared1.p)++] = col ;
+ Row [row].shared2.mark = col ;
+ }
+ }
+ }
+ }
+ else
+ {
+ /* if cols not jumbled, we don't need the mark (this is faster) */
+ for (col = 0 ; col < n_col ; col++)
+ {
+ cp = &A [p [col]] ;
+ cp_end = &A [p [col+1]] ;
+ while (cp < cp_end)
+ {
+ A [(Row [*cp++].shared1.p)++] = col ;
+ }
+ }
+ }
+
+ /* === Clear the row marks and set row degrees ========================== */
+
+ for (row = 0 ; row < n_row ; row++)
+ {
+ Row [row].shared2.mark = 0 ;
+ Row [row].shared1.degree = Row [row].length ;
+ }
+
+ /* === See if we need to re-create columns ============================== */
+
+ if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
+ {
+ COLAMD_DEBUG0 (("colamd: reconstructing column form, matrix jumbled\n")) ;
+
+
+ /* === Compute col pointers ========================================= */
+
+ /* col form of the matrix starts at A [0]. */
+ /* Note, we may have a gap between the col form and the row */
+ /* form if there were duplicate entries, if so, it will be */
+ /* removed upon the first garbage collection */
+ Col [0].start = 0 ;
+ p [0] = Col [0].start ;
+ for (col = 1 ; col < n_col ; col++)
+ {
+ /* note that the lengths here are for pruned columns, i.e. */
+ /* no duplicate row indices will exist for these columns */
+ Col [col].start = Col [col-1].start + Col [col-1].length ;
+ p [col] = Col [col].start ;
+ }
+
+ /* === Re-create col form =========================================== */
+
+ for (row = 0 ; row < n_row ; row++)
+ {
+ rp = &A [Row [row].start] ;
+ rp_end = rp + Row [row].length ;
+ while (rp < rp_end)
+ {
+ A [(p [*rp++])++] = row ;
+ }
+ }
+ }
+
+ /* === Done. Matrix is not (or no longer) jumbled ====================== */
+
+ return (true) ;
+}
+
+
+/* ========================================================================== */
+/* === init_scoring ========================================================= */
+/* ========================================================================== */
+
+/*
+ Kills dense or empty columns and rows, calculates an initial score for
+ each column, and places all columns in the degree lists. Not user-callable.
+*/
+template <typename Index>
+static void init_scoring
+ (
+ /* === Parameters ======================================================= */
+
+ Index n_row, /* number of rows of A */
+ Index n_col, /* number of columns of A */
+ Colamd_Row<Index> Row [], /* of size n_row+1 */
+ colamd_col<Index> Col [], /* of size n_col+1 */
+ Index A [], /* column form and row form of A */
+ Index head [], /* of size n_col+1 */
+ double knobs [COLAMD_KNOBS],/* parameters */
+ Index *p_n_row2, /* number of non-dense, non-empty rows */
+ Index *p_n_col2, /* number of non-dense, non-empty columns */
+ Index *p_max_deg /* maximum row degree */
+ )
+{
+ /* === Local variables ================================================== */
+
+ Index c ; /* a column index */
+ Index r, row ; /* a row index */
+ Index *cp ; /* a column pointer */
+ Index deg ; /* degree of a row or column */
+ Index *cp_end ; /* a pointer to the end of a column */
+ Index *new_cp ; /* new column pointer */
+ Index col_length ; /* length of pruned column */
+ Index score ; /* current column score */
+ Index n_col2 ; /* number of non-dense, non-empty columns */
+ Index n_row2 ; /* number of non-dense, non-empty rows */
+ Index dense_row_count ; /* remove rows with more entries than this */
+ Index dense_col_count ; /* remove cols with more entries than this */
+ Index min_score ; /* smallest column score */
+ Index max_deg ; /* maximum row degree */
+ Index next_col ; /* Used to add to degree list.*/
+
+
+ /* === Extract knobs ==================================================== */
+
+ dense_row_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_ROW] * n_col, n_col)) ;
+ dense_col_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_COL] * n_row, n_row)) ;
+ COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ;
+ max_deg = 0 ;
+ n_col2 = n_col ;
+ n_row2 = n_row ;
+
+ /* === Kill empty columns =============================================== */
+
+ /* Put the empty columns at the end in their natural order, so that LU */
+ /* factorization can proceed as far as possible. */
+ for (c = n_col-1 ; c >= 0 ; c--)
+ {
+ deg = Col [c].length ;
+ if (deg == 0)
+ {
+ /* this is a empty column, kill and order it last */
+ Col [c].shared2.order = --n_col2 ;
+ KILL_PRINCIPAL_COL (c) ;
+ }
+ }
+ COLAMD_DEBUG1 (("colamd: null columns killed: %d\n", n_col - n_col2)) ;
+
+ /* === Kill dense columns =============================================== */
+
+ /* Put the dense columns at the end, in their natural order */
+ for (c = n_col-1 ; c >= 0 ; c--)
+ {
+ /* skip any dead columns */
+ if (COL_IS_DEAD (c))
+ {
+ continue ;
+ }
+ deg = Col [c].length ;
+ if (deg > dense_col_count)
+ {
+ /* this is a dense column, kill and order it last */
+ Col [c].shared2.order = --n_col2 ;
+ /* decrement the row degrees */
+ cp = &A [Col [c].start] ;
+ cp_end = cp + Col [c].length ;
+ while (cp < cp_end)
+ {
+ Row [*cp++].shared1.degree-- ;
+ }
+ KILL_PRINCIPAL_COL (c) ;
+ }
+ }
+ COLAMD_DEBUG1 (("colamd: Dense and null columns killed: %d\n", n_col - n_col2)) ;
+
+ /* === Kill dense and empty rows ======================================== */
+
+ for (r = 0 ; r < n_row ; r++)
+ {
+ deg = Row [r].shared1.degree ;
+ COLAMD_ASSERT (deg >= 0 && deg <= n_col) ;
+ if (deg > dense_row_count || deg == 0)
+ {
+ /* kill a dense or empty row */
+ KILL_ROW (r) ;
+ --n_row2 ;
+ }
+ else
+ {
+ /* keep track of max degree of remaining rows */
+ max_deg = COLAMD_MAX (max_deg, deg) ;
+ }
+ }
+ COLAMD_DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ;
+
+ /* === Compute initial column scores ==================================== */
+
+ /* At this point the row degrees are accurate. They reflect the number */
+ /* of "live" (non-dense) columns in each row. No empty rows exist. */
+ /* Some "live" columns may contain only dead rows, however. These are */
+ /* pruned in the code below. */
+
+ /* now find the initial matlab score for each column */
+ for (c = n_col-1 ; c >= 0 ; c--)
+ {
+ /* skip dead column */
+ if (COL_IS_DEAD (c))
+ {
+ continue ;
+ }
+ score = 0 ;
+ cp = &A [Col [c].start] ;
+ new_cp = cp ;
+ cp_end = cp + Col [c].length ;
+ while (cp < cp_end)
+ {
+ /* get a row */
+ row = *cp++ ;
+ /* skip if dead */
+ if (ROW_IS_DEAD (row))
+ {
+ continue ;
+ }
+ /* compact the column */
+ *new_cp++ = row ;
+ /* add row's external degree */
+ score += Row [row].shared1.degree - 1 ;
+ /* guard against integer overflow */
+ score = COLAMD_MIN (score, n_col) ;
+ }
+ /* determine pruned column length */
+ col_length = (Index) (new_cp - &A [Col [c].start]) ;
+ if (col_length == 0)
+ {
+ /* a newly-made null column (all rows in this col are "dense" */
+ /* and have already been killed) */
+ COLAMD_DEBUG2 (("Newly null killed: %d\n", c)) ;
+ Col [c].shared2.order = --n_col2 ;
+ KILL_PRINCIPAL_COL (c) ;
+ }
+ else
+ {
+ /* set column length and set score */
+ COLAMD_ASSERT (score >= 0) ;
+ COLAMD_ASSERT (score <= n_col) ;
+ Col [c].length = col_length ;
+ Col [c].shared2.score = score ;
+ }
+ }
+ COLAMD_DEBUG1 (("colamd: Dense, null, and newly-null columns killed: %d\n",
+ n_col-n_col2)) ;
+
+ /* At this point, all empty rows and columns are dead. All live columns */
+ /* are "clean" (containing no dead rows) and simplicial (no supercolumns */
+ /* yet). Rows may contain dead columns, but all live rows contain at */
+ /* least one live column. */
+
+ /* === Initialize degree lists ========================================== */
+
+
+ /* clear the hash buckets */
+ for (c = 0 ; c <= n_col ; c++)
+ {
+ head [c] = COLAMD_EMPTY ;
+ }
+ min_score = n_col ;
+ /* place in reverse order, so low column indices are at the front */
+ /* of the lists. This is to encourage natural tie-breaking */
+ for (c = n_col-1 ; c >= 0 ; c--)
+ {
+ /* only add principal columns to degree lists */
+ if (COL_IS_ALIVE (c))
+ {
+ COLAMD_DEBUG4 (("place %d score %d minscore %d ncol %d\n",
+ c, Col [c].shared2.score, min_score, n_col)) ;
+
+ /* === Add columns score to DList =============================== */
+
+ score = Col [c].shared2.score ;
+
+ COLAMD_ASSERT (min_score >= 0) ;
+ COLAMD_ASSERT (min_score <= n_col) ;
+ COLAMD_ASSERT (score >= 0) ;
+ COLAMD_ASSERT (score <= n_col) ;
+ COLAMD_ASSERT (head [score] >= COLAMD_EMPTY) ;
+
+ /* now add this column to dList at proper score location */
+ next_col = head [score] ;
+ Col [c].shared3.prev = COLAMD_EMPTY ;
+ Col [c].shared4.degree_next = next_col ;
+
+ /* if there already was a column with the same score, set its */
+ /* previous pointer to this new column */
+ if (next_col != COLAMD_EMPTY)
+ {
+ Col [next_col].shared3.prev = c ;
+ }
+ head [score] = c ;
+
+ /* see if this score is less than current min */
+ min_score = COLAMD_MIN (min_score, score) ;
+
+
+ }
+ }
+
+
+ /* === Return number of remaining columns, and max row degree =========== */
+
+ *p_n_col2 = n_col2 ;
+ *p_n_row2 = n_row2 ;
+ *p_max_deg = max_deg ;
+}
+
+
+/* ========================================================================== */
+/* === find_ordering ======================================================== */
+/* ========================================================================== */
+
+/*
+ Order the principal columns of the supercolumn form of the matrix
+ (no supercolumns on input). Uses a minimum approximate column minimum
+ degree ordering method. Not user-callable.
+*/
+template <typename Index>
+static Index find_ordering /* return the number of garbage collections */
+ (
+ /* === Parameters ======================================================= */
+
+ Index n_row, /* number of rows of A */
+ Index n_col, /* number of columns of A */
+ Index Alen, /* size of A, 2*nnz + n_col or larger */
+ Colamd_Row<Index> Row [], /* of size n_row+1 */
+ colamd_col<Index> Col [], /* of size n_col+1 */
+ Index A [], /* column form and row form of A */
+ Index head [], /* of size n_col+1 */
+ Index n_col2, /* Remaining columns to order */
+ Index max_deg, /* Maximum row degree */
+ Index pfree /* index of first free slot (2*nnz on entry) */
+ )
+{
+ /* === Local variables ================================================== */
+
+ Index k ; /* current pivot ordering step */
+ Index pivot_col ; /* current pivot column */
+ Index *cp ; /* a column pointer */
+ Index *rp ; /* a row pointer */
+ Index pivot_row ; /* current pivot row */
+ Index *new_cp ; /* modified column pointer */
+ Index *new_rp ; /* modified row pointer */
+ Index pivot_row_start ; /* pointer to start of pivot row */
+ Index pivot_row_degree ; /* number of columns in pivot row */
+ Index pivot_row_length ; /* number of supercolumns in pivot row */
+ Index pivot_col_score ; /* score of pivot column */
+ Index needed_memory ; /* free space needed for pivot row */
+ Index *cp_end ; /* pointer to the end of a column */
+ Index *rp_end ; /* pointer to the end of a row */
+ Index row ; /* a row index */
+ Index col ; /* a column index */
+ Index max_score ; /* maximum possible score */
+ Index cur_score ; /* score of current column */
+ unsigned int hash ; /* hash value for supernode detection */
+ Index head_column ; /* head of hash bucket */
+ Index first_col ; /* first column in hash bucket */
+ Index tag_mark ; /* marker value for mark array */
+ Index row_mark ; /* Row [row].shared2.mark */
+ Index set_difference ; /* set difference size of row with pivot row */
+ Index min_score ; /* smallest column score */
+ Index col_thickness ; /* "thickness" (no. of columns in a supercol) */
+ Index max_mark ; /* maximum value of tag_mark */
+ Index pivot_col_thickness ; /* number of columns represented by pivot col */
+ Index prev_col ; /* Used by Dlist operations. */
+ Index next_col ; /* Used by Dlist operations. */
+ Index ngarbage ; /* number of garbage collections performed */
+
+
+ /* === Initialization and clear mark ==================================== */
+
+ max_mark = INT_MAX - n_col ; /* INT_MAX defined in <limits.h> */
+ tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+ min_score = 0 ;
+ ngarbage = 0 ;
+ COLAMD_DEBUG1 (("colamd: Ordering, n_col2=%d\n", n_col2)) ;
+
+ /* === Order the columns ================================================ */
+
+ for (k = 0 ; k < n_col2 ; /* 'k' is incremented below */)
+ {
+
+ /* === Select pivot column, and order it ============================ */
+
+ /* make sure degree list isn't empty */
+ COLAMD_ASSERT (min_score >= 0) ;
+ COLAMD_ASSERT (min_score <= n_col) ;
+ COLAMD_ASSERT (head [min_score] >= COLAMD_EMPTY) ;
+
+ /* get pivot column from head of minimum degree list */
+ while (head [min_score] == COLAMD_EMPTY && min_score < n_col)
+ {
+ min_score++ ;
+ }
+ pivot_col = head [min_score] ;
+ COLAMD_ASSERT (pivot_col >= 0 && pivot_col <= n_col) ;
+ next_col = Col [pivot_col].shared4.degree_next ;
+ head [min_score] = next_col ;
+ if (next_col != COLAMD_EMPTY)
+ {
+ Col [next_col].shared3.prev = COLAMD_EMPTY ;
+ }
+
+ COLAMD_ASSERT (COL_IS_ALIVE (pivot_col)) ;
+ COLAMD_DEBUG3 (("Pivot col: %d\n", pivot_col)) ;
+
+ /* remember score for defrag check */
+ pivot_col_score = Col [pivot_col].shared2.score ;
+
+ /* the pivot column is the kth column in the pivot order */
+ Col [pivot_col].shared2.order = k ;
+
+ /* increment order count by column thickness */
+ pivot_col_thickness = Col [pivot_col].shared1.thickness ;
+ k += pivot_col_thickness ;
+ COLAMD_ASSERT (pivot_col_thickness > 0) ;
+
+ /* === Garbage_collection, if necessary ============================= */
+
+ needed_memory = COLAMD_MIN (pivot_col_score, n_col - k) ;
+ if (pfree + needed_memory >= Alen)
+ {
+ pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;
+ ngarbage++ ;
+ /* after garbage collection we will have enough */
+ COLAMD_ASSERT (pfree + needed_memory < Alen) ;
+ /* garbage collection has wiped out the Row[].shared2.mark array */
+ tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+
+ }
+
+ /* === Compute pivot row pattern ==================================== */
+
+ /* get starting location for this new merged row */
+ pivot_row_start = pfree ;
+
+ /* initialize new row counts to zero */
+ pivot_row_degree = 0 ;
+
+ /* tag pivot column as having been visited so it isn't included */
+ /* in merged pivot row */
+ Col [pivot_col].shared1.thickness = -pivot_col_thickness ;
+
+ /* pivot row is the union of all rows in the pivot column pattern */
+ cp = &A [Col [pivot_col].start] ;
+ cp_end = cp + Col [pivot_col].length ;
+ while (cp < cp_end)
+ {
+ /* get a row */
+ row = *cp++ ;
+ COLAMD_DEBUG4 (("Pivot col pattern %d %d\n", ROW_IS_ALIVE (row), row)) ;
+ /* skip if row is dead */
+ if (ROW_IS_DEAD (row))
+ {
+ continue ;
+ }
+ rp = &A [Row [row].start] ;
+ rp_end = rp + Row [row].length ;
+ while (rp < rp_end)
+ {
+ /* get a column */
+ col = *rp++ ;
+ /* add the column, if alive and untagged */
+ col_thickness = Col [col].shared1.thickness ;
+ if (col_thickness > 0 && COL_IS_ALIVE (col))
+ {
+ /* tag column in pivot row */
+ Col [col].shared1.thickness = -col_thickness ;
+ COLAMD_ASSERT (pfree < Alen) ;
+ /* place column in pivot row */
+ A [pfree++] = col ;
+ pivot_row_degree += col_thickness ;
+ }
+ }
+ }
+
+ /* clear tag on pivot column */
+ Col [pivot_col].shared1.thickness = pivot_col_thickness ;
+ max_deg = COLAMD_MAX (max_deg, pivot_row_degree) ;
+
+
+ /* === Kill all rows used to construct pivot row ==================== */
+
+ /* also kill pivot row, temporarily */
+ cp = &A [Col [pivot_col].start] ;
+ cp_end = cp + Col [pivot_col].length ;
+ while (cp < cp_end)
+ {
+ /* may be killing an already dead row */
+ row = *cp++ ;
+ COLAMD_DEBUG3 (("Kill row in pivot col: %d\n", row)) ;
+ KILL_ROW (row) ;
+ }
+
+ /* === Select a row index to use as the new pivot row =============== */
+
+ pivot_row_length = pfree - pivot_row_start ;
+ if (pivot_row_length > 0)
+ {
+ /* pick the "pivot" row arbitrarily (first row in col) */
+ pivot_row = A [Col [pivot_col].start] ;
+ COLAMD_DEBUG3 (("Pivotal row is %d\n", pivot_row)) ;
+ }
+ else
+ {
+ /* there is no pivot row, since it is of zero length */
+ pivot_row = COLAMD_EMPTY ;
+ COLAMD_ASSERT (pivot_row_length == 0) ;
+ }
+ COLAMD_ASSERT (Col [pivot_col].length > 0 || pivot_row_length == 0) ;
+
+ /* === Approximate degree computation =============================== */
+
+ /* Here begins the computation of the approximate degree. The column */
+ /* score is the sum of the pivot row "length", plus the size of the */
+ /* set differences of each row in the column minus the pattern of the */
+ /* pivot row itself. The column ("thickness") itself is also */
+ /* excluded from the column score (we thus use an approximate */
+ /* external degree). */
+
+ /* The time taken by the following code (compute set differences, and */
+ /* add them up) is proportional to the size of the data structure */
+ /* being scanned - that is, the sum of the sizes of each column in */
+ /* the pivot row. Thus, the amortized time to compute a column score */
+ /* is proportional to the size of that column (where size, in this */
+ /* context, is the column "length", or the number of row indices */
+ /* in that column). The number of row indices in a column is */
+ /* monotonically non-decreasing, from the length of the original */
+ /* column on input to colamd. */
+
+ /* === Compute set differences ====================================== */
+
+ COLAMD_DEBUG3 (("** Computing set differences phase. **\n")) ;
+
+ /* pivot row is currently dead - it will be revived later. */
+
+ COLAMD_DEBUG3 (("Pivot row: ")) ;
+ /* for each column in pivot row */
+ rp = &A [pivot_row_start] ;
+ rp_end = rp + pivot_row_length ;
+ while (rp < rp_end)
+ {
+ col = *rp++ ;
+ COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
+ COLAMD_DEBUG3 (("Col: %d\n", col)) ;
+
+ /* clear tags used to construct pivot row pattern */
+ col_thickness = -Col [col].shared1.thickness ;
+ COLAMD_ASSERT (col_thickness > 0) ;
+ Col [col].shared1.thickness = col_thickness ;
+
+ /* === Remove column from degree list =========================== */
+
+ cur_score = Col [col].shared2.score ;
+ prev_col = Col [col].shared3.prev ;
+ next_col = Col [col].shared4.degree_next ;
+ COLAMD_ASSERT (cur_score >= 0) ;
+ COLAMD_ASSERT (cur_score <= n_col) ;
+ COLAMD_ASSERT (cur_score >= COLAMD_EMPTY) ;
+ if (prev_col == COLAMD_EMPTY)
+ {
+ head [cur_score] = next_col ;
+ }
+ else
+ {
+ Col [prev_col].shared4.degree_next = next_col ;
+ }
+ if (next_col != COLAMD_EMPTY)
+ {
+ Col [next_col].shared3.prev = prev_col ;
+ }
+
+ /* === Scan the column ========================================== */
+
+ cp = &A [Col [col].start] ;
+ cp_end = cp + Col [col].length ;
+ while (cp < cp_end)
+ {
+ /* get a row */
+ row = *cp++ ;
+ row_mark = Row [row].shared2.mark ;
+ /* skip if dead */
+ if (ROW_IS_MARKED_DEAD (row_mark))
+ {
+ continue ;
+ }
+ COLAMD_ASSERT (row != pivot_row) ;
+ set_difference = row_mark - tag_mark ;
+ /* check if the row has been seen yet */
+ if (set_difference < 0)
+ {
+ COLAMD_ASSERT (Row [row].shared1.degree <= max_deg) ;
+ set_difference = Row [row].shared1.degree ;
+ }
+ /* subtract column thickness from this row's set difference */
+ set_difference -= col_thickness ;
+ COLAMD_ASSERT (set_difference >= 0) ;
+ /* absorb this row if the set difference becomes zero */
+ if (set_difference == 0)
+ {
+ COLAMD_DEBUG3 (("aggressive absorption. Row: %d\n", row)) ;
+ KILL_ROW (row) ;
+ }
+ else
+ {
+ /* save the new mark */
+ Row [row].shared2.mark = set_difference + tag_mark ;
+ }
+ }
+ }
+
+
+ /* === Add up set differences for each column ======================= */
+
+ COLAMD_DEBUG3 (("** Adding set differences phase. **\n")) ;
+
+ /* for each column in pivot row */
+ rp = &A [pivot_row_start] ;
+ rp_end = rp + pivot_row_length ;
+ while (rp < rp_end)
+ {
+ /* get a column */
+ col = *rp++ ;
+ COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
+ hash = 0 ;
+ cur_score = 0 ;
+ cp = &A [Col [col].start] ;
+ /* compact the column */
+ new_cp = cp ;
+ cp_end = cp + Col [col].length ;
+
+ COLAMD_DEBUG4 (("Adding set diffs for Col: %d.\n", col)) ;
+
+ while (cp < cp_end)
+ {
+ /* get a row */
+ row = *cp++ ;
+ COLAMD_ASSERT(row >= 0 && row < n_row) ;
+ row_mark = Row [row].shared2.mark ;
+ /* skip if dead */
+ if (ROW_IS_MARKED_DEAD (row_mark))
+ {
+ continue ;
+ }
+ COLAMD_ASSERT (row_mark > tag_mark) ;
+ /* compact the column */
+ *new_cp++ = row ;
+ /* compute hash function */
+ hash += row ;
+ /* add set difference */
+ cur_score += row_mark - tag_mark ;
+ /* integer overflow... */
+ cur_score = COLAMD_MIN (cur_score, n_col) ;
+ }
+
+ /* recompute the column's length */
+ Col [col].length = (Index) (new_cp - &A [Col [col].start]) ;
+
+ /* === Further mass elimination ================================= */
+
+ if (Col [col].length == 0)
+ {
+ COLAMD_DEBUG4 (("further mass elimination. Col: %d\n", col)) ;
+ /* nothing left but the pivot row in this column */
+ KILL_PRINCIPAL_COL (col) ;
+ pivot_row_degree -= Col [col].shared1.thickness ;
+ COLAMD_ASSERT (pivot_row_degree >= 0) ;
+ /* order it */
+ Col [col].shared2.order = k ;
+ /* increment order count by column thickness */
+ k += Col [col].shared1.thickness ;
+ }
+ else
+ {
+ /* === Prepare for supercolumn detection ==================== */
+
+ COLAMD_DEBUG4 (("Preparing supercol detection for Col: %d.\n", col)) ;
+
+ /* save score so far */
+ Col [col].shared2.score = cur_score ;
+
+ /* add column to hash table, for supercolumn detection */
+ hash %= n_col + 1 ;
+
+ COLAMD_DEBUG4 ((" Hash = %d, n_col = %d.\n", hash, n_col)) ;
+ COLAMD_ASSERT (hash <= n_col) ;
+
+ head_column = head [hash] ;
+ if (head_column > COLAMD_EMPTY)
+ {
+ /* degree list "hash" is non-empty, use prev (shared3) of */
+ /* first column in degree list as head of hash bucket */
+ first_col = Col [head_column].shared3.headhash ;
+ Col [head_column].shared3.headhash = col ;
+ }
+ else
+ {
+ /* degree list "hash" is empty, use head as hash bucket */
+ first_col = - (head_column + 2) ;
+ head [hash] = - (col + 2) ;
+ }
+ Col [col].shared4.hash_next = first_col ;
+
+ /* save hash function in Col [col].shared3.hash */
+ Col [col].shared3.hash = (Index) hash ;
+ COLAMD_ASSERT (COL_IS_ALIVE (col)) ;
+ }
+ }
+
+ /* The approximate external column degree is now computed. */
+
+ /* === Supercolumn detection ======================================== */
+
+ COLAMD_DEBUG3 (("** Supercolumn detection phase. **\n")) ;
+
+ Eigen::internal::detect_super_cols (Col, A, head, pivot_row_start, pivot_row_length) ;
+
+ /* === Kill the pivotal column ====================================== */
+
+ KILL_PRINCIPAL_COL (pivot_col) ;
+
+ /* === Clear mark =================================================== */
+
+ tag_mark += (max_deg + 1) ;
+ if (tag_mark >= max_mark)
+ {
+ COLAMD_DEBUG2 (("clearing tag_mark\n")) ;
+ tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+ }
+
+ /* === Finalize the new pivot row, and column scores ================ */
+
+ COLAMD_DEBUG3 (("** Finalize scores phase. **\n")) ;
+
+ /* for each column in pivot row */
+ rp = &A [pivot_row_start] ;
+ /* compact the pivot row */
+ new_rp = rp ;
+ rp_end = rp + pivot_row_length ;
+ while (rp < rp_end)
+ {
+ col = *rp++ ;
+ /* skip dead columns */
+ if (COL_IS_DEAD (col))
+ {
+ continue ;
+ }
+ *new_rp++ = col ;
+ /* add new pivot row to column */
+ A [Col [col].start + (Col [col].length++)] = pivot_row ;
+
+ /* retrieve score so far and add on pivot row's degree. */
+ /* (we wait until here for this in case the pivot */
+ /* row's degree was reduced due to mass elimination). */
+ cur_score = Col [col].shared2.score + pivot_row_degree ;
+
+ /* calculate the max possible score as the number of */
+ /* external columns minus the 'k' value minus the */
+ /* columns thickness */
+ max_score = n_col - k - Col [col].shared1.thickness ;
+
+ /* make the score the external degree of the union-of-rows */
+ cur_score -= Col [col].shared1.thickness ;
+
+ /* make sure score is less or equal than the max score */
+ cur_score = COLAMD_MIN (cur_score, max_score) ;
+ COLAMD_ASSERT (cur_score >= 0) ;
+
+ /* store updated score */
+ Col [col].shared2.score = cur_score ;
+
+ /* === Place column back in degree list ========================= */
+
+ COLAMD_ASSERT (min_score >= 0) ;
+ COLAMD_ASSERT (min_score <= n_col) ;
+ COLAMD_ASSERT (cur_score >= 0) ;
+ COLAMD_ASSERT (cur_score <= n_col) ;
+ COLAMD_ASSERT (head [cur_score] >= COLAMD_EMPTY) ;
+ next_col = head [cur_score] ;
+ Col [col].shared4.degree_next = next_col ;
+ Col [col].shared3.prev = COLAMD_EMPTY ;
+ if (next_col != COLAMD_EMPTY)
+ {
+ Col [next_col].shared3.prev = col ;
+ }
+ head [cur_score] = col ;
+
+ /* see if this score is less than current min */
+ min_score = COLAMD_MIN (min_score, cur_score) ;
+
+ }
+
+ /* === Resurrect the new pivot row ================================== */
+
+ if (pivot_row_degree > 0)
+ {
+ /* update pivot row length to reflect any cols that were killed */
+ /* during super-col detection and mass elimination */
+ Row [pivot_row].start = pivot_row_start ;
+ Row [pivot_row].length = (Index) (new_rp - &A[pivot_row_start]) ;
+ Row [pivot_row].shared1.degree = pivot_row_degree ;
+ Row [pivot_row].shared2.mark = 0 ;
+ /* pivot row is no longer dead */
+ }
+ }
+
+ /* === All principal columns have now been ordered ====================== */
+
+ return (ngarbage) ;
+}
+
+
+/* ========================================================================== */
+/* === order_children ======================================================= */
+/* ========================================================================== */
+
+/*
+ The find_ordering routine has ordered all of the principal columns (the
+ representatives of the supercolumns). The non-principal columns have not
+ yet been ordered. This routine orders those columns by walking up the
+ parent tree (a column is a child of the column which absorbed it). The
+ final permutation vector is then placed in p [0 ... n_col-1], with p [0]
+ being the first column, and p [n_col-1] being the last. It doesn't look
+ like it at first glance, but be assured that this routine takes time linear
+ in the number of columns. Although not immediately obvious, the time
+ taken by this routine is O (n_col), that is, linear in the number of
+ columns. Not user-callable.
+*/
+template <typename Index>
+static inline void order_children
+(
+ /* === Parameters ======================================================= */
+
+ Index n_col, /* number of columns of A */
+ colamd_col<Index> Col [], /* of size n_col+1 */
+ Index p [] /* p [0 ... n_col-1] is the column permutation*/
+ )
+{
+ /* === Local variables ================================================== */
+
+ Index i ; /* loop counter for all columns */
+ Index c ; /* column index */
+ Index parent ; /* index of column's parent */
+ Index order ; /* column's order */
+
+ /* === Order each non-principal column ================================== */
+
+ for (i = 0 ; i < n_col ; i++)
+ {
+ /* find an un-ordered non-principal column */
+ COLAMD_ASSERT (COL_IS_DEAD (i)) ;
+ if (!COL_IS_DEAD_PRINCIPAL (i) && Col [i].shared2.order == COLAMD_EMPTY)
+ {
+ parent = i ;
+ /* once found, find its principal parent */
+ do
+ {
+ parent = Col [parent].shared1.parent ;
+ } while (!COL_IS_DEAD_PRINCIPAL (parent)) ;
+
+ /* now, order all un-ordered non-principal columns along path */
+ /* to this parent. collapse tree at the same time */
+ c = i ;
+ /* get order of parent */
+ order = Col [parent].shared2.order ;
+
+ do
+ {
+ COLAMD_ASSERT (Col [c].shared2.order == COLAMD_EMPTY) ;
+
+ /* order this column */
+ Col [c].shared2.order = order++ ;
+ /* collaps tree */
+ Col [c].shared1.parent = parent ;
+
+ /* get immediate parent of this column */
+ c = Col [c].shared1.parent ;
+
+ /* continue until we hit an ordered column. There are */
+ /* guarranteed not to be anymore unordered columns */
+ /* above an ordered column */
+ } while (Col [c].shared2.order == COLAMD_EMPTY) ;
+
+ /* re-order the super_col parent to largest order for this group */
+ Col [parent].shared2.order = order ;
+ }
+ }
+
+ /* === Generate the permutation ========================================= */
+
+ for (c = 0 ; c < n_col ; c++)
+ {
+ p [Col [c].shared2.order] = c ;
+ }
+}
+
+
+/* ========================================================================== */
+/* === detect_super_cols ==================================================== */
+/* ========================================================================== */
+
+/*
+ Detects supercolumns by finding matches between columns in the hash buckets.
+ Check amongst columns in the set A [row_start ... row_start + row_length-1].
+ The columns under consideration are currently *not* in the degree lists,
+ and have already been placed in the hash buckets.
+
+ The hash bucket for columns whose hash function is equal to h is stored
+ as follows:
+
+ if head [h] is >= 0, then head [h] contains a degree list, so:
+
+ head [h] is the first column in degree bucket h.
+ Col [head [h]].headhash gives the first column in hash bucket h.
+
+ otherwise, the degree list is empty, and:
+
+ -(head [h] + 2) is the first column in hash bucket h.
+
+ For a column c in a hash bucket, Col [c].shared3.prev is NOT a "previous
+ column" pointer. Col [c].shared3.hash is used instead as the hash number
+ for that column. The value of Col [c].shared4.hash_next is the next column
+ in the same hash bucket.
+
+ Assuming no, or "few" hash collisions, the time taken by this routine is
+ linear in the sum of the sizes (lengths) of each column whose score has
+ just been computed in the approximate degree computation.
+ Not user-callable.
+*/
+template <typename Index>
+static void detect_super_cols
+(
+ /* === Parameters ======================================================= */
+
+ colamd_col<Index> Col [], /* of size n_col+1 */
+ Index A [], /* row indices of A */
+ Index head [], /* head of degree lists and hash buckets */
+ Index row_start, /* pointer to set of columns to check */
+ Index row_length /* number of columns to check */
+)
+{
+ /* === Local variables ================================================== */
+
+ Index hash ; /* hash value for a column */
+ Index *rp ; /* pointer to a row */
+ Index c ; /* a column index */
+ Index super_c ; /* column index of the column to absorb into */
+ Index *cp1 ; /* column pointer for column super_c */
+ Index *cp2 ; /* column pointer for column c */
+ Index length ; /* length of column super_c */
+ Index prev_c ; /* column preceding c in hash bucket */
+ Index i ; /* loop counter */
+ Index *rp_end ; /* pointer to the end of the row */
+ Index col ; /* a column index in the row to check */
+ Index head_column ; /* first column in hash bucket or degree list */
+ Index first_col ; /* first column in hash bucket */
+
+ /* === Consider each column in the row ================================== */
+
+ rp = &A [row_start] ;
+ rp_end = rp + row_length ;
+ while (rp < rp_end)
+ {
+ col = *rp++ ;
+ if (COL_IS_DEAD (col))
+ {
+ continue ;
+ }
+
+ /* get hash number for this column */
+ hash = Col [col].shared3.hash ;
+ COLAMD_ASSERT (hash <= n_col) ;
+
+ /* === Get the first column in this hash bucket ===================== */
+
+ head_column = head [hash] ;
+ if (head_column > COLAMD_EMPTY)
+ {
+ first_col = Col [head_column].shared3.headhash ;
+ }
+ else
+ {
+ first_col = - (head_column + 2) ;
+ }
+
+ /* === Consider each column in the hash bucket ====================== */
+
+ for (super_c = first_col ; super_c != COLAMD_EMPTY ;
+ super_c = Col [super_c].shared4.hash_next)
+ {
+ COLAMD_ASSERT (COL_IS_ALIVE (super_c)) ;
+ COLAMD_ASSERT (Col [super_c].shared3.hash == hash) ;
+ length = Col [super_c].length ;
+
+ /* prev_c is the column preceding column c in the hash bucket */
+ prev_c = super_c ;
+
+ /* === Compare super_c with all columns after it ================ */
+
+ for (c = Col [super_c].shared4.hash_next ;
+ c != COLAMD_EMPTY ; c = Col [c].shared4.hash_next)
+ {
+ COLAMD_ASSERT (c != super_c) ;
+ COLAMD_ASSERT (COL_IS_ALIVE (c)) ;
+ COLAMD_ASSERT (Col [c].shared3.hash == hash) ;
+
+ /* not identical if lengths or scores are different */
+ if (Col [c].length != length ||
+ Col [c].shared2.score != Col [super_c].shared2.score)
+ {
+ prev_c = c ;
+ continue ;
+ }
+
+ /* compare the two columns */
+ cp1 = &A [Col [super_c].start] ;
+ cp2 = &A [Col [c].start] ;
+
+ for (i = 0 ; i < length ; i++)
+ {
+ /* the columns are "clean" (no dead rows) */
+ COLAMD_ASSERT (ROW_IS_ALIVE (*cp1)) ;
+ COLAMD_ASSERT (ROW_IS_ALIVE (*cp2)) ;
+ /* row indices will same order for both supercols, */
+ /* no gather scatter nessasary */
+ if (*cp1++ != *cp2++)
+ {
+ break ;
+ }
+ }
+
+ /* the two columns are different if the for-loop "broke" */
+ if (i != length)
+ {
+ prev_c = c ;
+ continue ;
+ }
+
+ /* === Got it! two columns are identical =================== */
+
+ COLAMD_ASSERT (Col [c].shared2.score == Col [super_c].shared2.score) ;
+
+ Col [super_c].shared1.thickness += Col [c].shared1.thickness ;
+ Col [c].shared1.parent = super_c ;
+ KILL_NON_PRINCIPAL_COL (c) ;
+ /* order c later, in order_children() */
+ Col [c].shared2.order = COLAMD_EMPTY ;
+ /* remove c from hash bucket */
+ Col [prev_c].shared4.hash_next = Col [c].shared4.hash_next ;
+ }
+ }
+
+ /* === Empty this hash bucket ======================================= */
+
+ if (head_column > COLAMD_EMPTY)
+ {
+ /* corresponding degree list "hash" is not empty */
+ Col [head_column].shared3.headhash = COLAMD_EMPTY ;
+ }
+ else
+ {
+ /* corresponding degree list "hash" is empty */
+ head [hash] = COLAMD_EMPTY ;
+ }
+ }
+}
+
+
+/* ========================================================================== */
+/* === garbage_collection =================================================== */
+/* ========================================================================== */
+
+/*
+ Defragments and compacts columns and rows in the workspace A. Used when
+ all avaliable memory has been used while performing row merging. Returns
+ the index of the first free position in A, after garbage collection. The
+ time taken by this routine is linear is the size of the array A, which is
+ itself linear in the number of nonzeros in the input matrix.
+ Not user-callable.
+*/
+template <typename Index>
+static Index garbage_collection /* returns the new value of pfree */
+ (
+ /* === Parameters ======================================================= */
+
+ Index n_row, /* number of rows */
+ Index n_col, /* number of columns */
+ Colamd_Row<Index> Row [], /* row info */
+ colamd_col<Index> Col [], /* column info */
+ Index A [], /* A [0 ... Alen-1] holds the matrix */
+ Index *pfree /* &A [0] ... pfree is in use */
+ )
+{
+ /* === Local variables ================================================== */
+
+ Index *psrc ; /* source pointer */
+ Index *pdest ; /* destination pointer */
+ Index j ; /* counter */
+ Index r ; /* a row index */
+ Index c ; /* a column index */
+ Index length ; /* length of a row or column */
+
+ /* === Defragment the columns =========================================== */
+
+ pdest = &A[0] ;
+ for (c = 0 ; c < n_col ; c++)
+ {
+ if (COL_IS_ALIVE (c))
+ {
+ psrc = &A [Col [c].start] ;
+
+ /* move and compact the column */
+ COLAMD_ASSERT (pdest <= psrc) ;
+ Col [c].start = (Index) (pdest - &A [0]) ;
+ length = Col [c].length ;
+ for (j = 0 ; j < length ; j++)
+ {
+ r = *psrc++ ;
+ if (ROW_IS_ALIVE (r))
+ {
+ *pdest++ = r ;
+ }
+ }
+ Col [c].length = (Index) (pdest - &A [Col [c].start]) ;
+ }
+ }
+
+ /* === Prepare to defragment the rows =================================== */
+
+ for (r = 0 ; r < n_row ; r++)
+ {
+ if (ROW_IS_ALIVE (r))
+ {
+ if (Row [r].length == 0)
+ {
+ /* this row is of zero length. cannot compact it, so kill it */
+ COLAMD_DEBUG3 (("Defrag row kill\n")) ;
+ KILL_ROW (r) ;
+ }
+ else
+ {
+ /* save first column index in Row [r].shared2.first_column */
+ psrc = &A [Row [r].start] ;
+ Row [r].shared2.first_column = *psrc ;
+ COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;
+ /* flag the start of the row with the one's complement of row */
+ *psrc = ONES_COMPLEMENT (r) ;
+
+ }
+ }
+ }
+
+ /* === Defragment the rows ============================================== */
+
+ psrc = pdest ;
+ while (psrc < pfree)
+ {
+ /* find a negative number ... the start of a row */
+ if (*psrc++ < 0)
+ {
+ psrc-- ;
+ /* get the row index */
+ r = ONES_COMPLEMENT (*psrc) ;
+ COLAMD_ASSERT (r >= 0 && r < n_row) ;
+ /* restore first column index */
+ *psrc = Row [r].shared2.first_column ;
+ COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;
+
+ /* move and compact the row */
+ COLAMD_ASSERT (pdest <= psrc) ;
+ Row [r].start = (Index) (pdest - &A [0]) ;
+ length = Row [r].length ;
+ for (j = 0 ; j < length ; j++)
+ {
+ c = *psrc++ ;
+ if (COL_IS_ALIVE (c))
+ {
+ *pdest++ = c ;
+ }
+ }
+ Row [r].length = (Index) (pdest - &A [Row [r].start]) ;
+
+ }
+ }
+ /* ensure we found all the rows */
+ COLAMD_ASSERT (debug_rows == 0) ;
+
+ /* === Return the new value of pfree ==================================== */
+
+ return ((Index) (pdest - &A [0])) ;
+}
+
+
+/* ========================================================================== */
+/* === clear_mark =========================================================== */
+/* ========================================================================== */
+
+/*
+ Clears the Row [].shared2.mark array, and returns the new tag_mark.
+ Return value is the new tag_mark. Not user-callable.
+*/
+template <typename Index>
+static inline Index clear_mark /* return the new value for tag_mark */
+ (
+ /* === Parameters ======================================================= */
+
+ Index n_row, /* number of rows in A */
+ Colamd_Row<Index> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
+ )
+{
+ /* === Local variables ================================================== */
+
+ Index r ;
+
+ for (r = 0 ; r < n_row ; r++)
+ {
+ if (ROW_IS_ALIVE (r))
+ {
+ Row [r].shared2.mark = 0 ;
+ }
+ }
+ return (1) ;
+}
+
+
+} // namespace internal
+#endif
diff --git a/extern/Eigen3/Eigen/src/OrderingMethods/Ordering.h b/extern/Eigen3/Eigen/src/OrderingMethods/Ordering.h
new file mode 100644
index 00000000000..b4da6531a1d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/OrderingMethods/Ordering.h
@@ -0,0 +1,150 @@
+
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ORDERING_H
+#define EIGEN_ORDERING_H
+
+namespace Eigen {
+
+#include "Eigen_Colamd.h"
+
+namespace internal {
+
+/** \internal
+ * \ingroup OrderingMethods_Module
+ * \returns the symmetric pattern A^T+A from the input matrix A.
+ * FIXME: The values should not be considered here
+ */
+template<typename MatrixType>
+void ordering_helper_at_plus_a(const MatrixType& mat, MatrixType& symmat)
+{
+ MatrixType C;
+ C = mat.transpose(); // NOTE: Could be costly
+ for (int i = 0; i < C.rows(); i++)
+ {
+ for (typename MatrixType::InnerIterator it(C, i); it; ++it)
+ it.valueRef() = 0.0;
+ }
+ symmat = C + mat;
+}
+
+}
+
+#ifndef EIGEN_MPL2_ONLY
+
+/** \ingroup OrderingMethods_Module
+ * \class AMDOrdering
+ *
+ * Functor computing the \em approximate \em minimum \em degree ordering
+ * If the matrix is not structurally symmetric, an ordering of A^T+A is computed
+ * \tparam Index The type of indices of the matrix
+ * \sa COLAMDOrdering
+ */
+template <typename Index>
+class AMDOrdering
+{
+ public:
+ typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+
+ /** Compute the permutation vector from a sparse matrix
+ * This routine is much faster if the input matrix is column-major
+ */
+ template <typename MatrixType>
+ void operator()(const MatrixType& mat, PermutationType& perm)
+ {
+ // Compute the symmetric pattern
+ SparseMatrix<typename MatrixType::Scalar, ColMajor, Index> symm;
+ internal::ordering_helper_at_plus_a(mat,symm);
+
+ // Call the AMD routine
+ //m_mat.prune(keep_diag());
+ internal::minimum_degree_ordering(symm, perm);
+ }
+
+ /** Compute the permutation with a selfadjoint matrix */
+ template <typename SrcType, unsigned int SrcUpLo>
+ void operator()(const SparseSelfAdjointView<SrcType, SrcUpLo>& mat, PermutationType& perm)
+ {
+ SparseMatrix<typename SrcType::Scalar, ColMajor, Index> C; C = mat;
+
+ // Call the AMD routine
+ // m_mat.prune(keep_diag()); //Remove the diagonal elements
+ internal::minimum_degree_ordering(C, perm);
+ }
+};
+
+#endif // EIGEN_MPL2_ONLY
+
+/** \ingroup OrderingMethods_Module
+ * \class NaturalOrdering
+ *
+ * Functor computing the natural ordering (identity)
+ *
+ * \note Returns an empty permutation matrix
+ * \tparam Index The type of indices of the matrix
+ */
+template <typename Index>
+class NaturalOrdering
+{
+ public:
+ typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+
+ /** Compute the permutation vector from a column-major sparse matrix */
+ template <typename MatrixType>
+ void operator()(const MatrixType& /*mat*/, PermutationType& perm)
+ {
+ perm.resize(0);
+ }
+
+};
+
+/** \ingroup OrderingMethods_Module
+ * \class COLAMDOrdering
+ *
+ * Functor computing the \em column \em approximate \em minimum \em degree ordering
+ * The matrix should be in column-major format
+ */
+template<typename Index>
+class COLAMDOrdering
+{
+ public:
+ typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+ typedef Matrix<Index, Dynamic, 1> IndexVector;
+
+ /** Compute the permutation vector form a sparse matrix */
+ template <typename MatrixType>
+ void operator() (const MatrixType& mat, PermutationType& perm)
+ {
+ Index m = mat.rows();
+ Index n = mat.cols();
+ Index nnz = mat.nonZeros();
+ // Get the recommended value of Alen to be used by colamd
+ Index Alen = internal::colamd_recommended(nnz, m, n);
+ // Set the default parameters
+ double knobs [COLAMD_KNOBS];
+ Index stats [COLAMD_STATS];
+ internal::colamd_set_defaults(knobs);
+
+ Index info;
+ IndexVector p(n+1), A(Alen);
+ for(Index i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
+ for(Index i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
+ // Call Colamd routine to compute the ordering
+ info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
+ eigen_assert( info && "COLAMD failed " );
+
+ perm.resize(n);
+ for (Index i = 0; i < n; i++) perm.indices()(p(i)) = i;
+ }
+};
+
+} // end namespace Eigen
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h b/extern/Eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h
index 82e137c645a..a955287d1c9 100644
--- a/extern/Eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h
+++ b/extern/Eigen3/Eigen/src/PaStiXSupport/PaStiXSupport.h
@@ -157,27 +157,6 @@ class PastixBase : internal::noncopyable
template<typename Rhs,typename Dest>
bool _solve (const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const;
- /** \internal */
- template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
- void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
- {
- eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
- eigen_assert(rows()==b.rows());
-
- // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
- static const int NbColsAtOnce = 1;
- int rhsCols = b.cols();
- int size = b.rows();
- Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,rhsCols);
- for(int k=0; k<rhsCols; k+=NbColsAtOnce)
- {
- int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
- tmp.leftCols(actualCols) = b.middleCols(k,actualCols);
- tmp.leftCols(actualCols) = derived().solve(tmp.leftCols(actualCols));
- dest.middleCols(k,actualCols) = tmp.leftCols(actualCols).sparseView();
- }
- }
-
Derived& derived()
{
return *static_cast<Derived*>(this);
@@ -731,7 +710,7 @@ struct sparse_solve_retval<PastixBase<_MatrixType>, Rhs>
template<typename Dest> void evalTo(Dest& dst) const
{
- dec()._solve_sparse(rhs(),dst);
+ this->defaultEvalTo(dst);
}
};
diff --git a/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h b/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
index e6defc8c39e..1c48f0df7b5 100644
--- a/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
+++ b/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
@@ -108,6 +108,7 @@ class PardisoImpl
typedef Matrix<Scalar,Dynamic,1> VectorType;
typedef Matrix<Index, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
typedef Matrix<Index, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+ typedef Array<Index,64,1,DontAlign> ParameterType;
enum {
ScalarIsComplex = NumTraits<Scalar>::IsComplex
};
@@ -142,7 +143,7 @@ class PardisoImpl
/** \warning for advanced usage only.
* \returns a reference to the parameter array controlling PARDISO.
* See the PARDISO manual to know how to use it. */
- Array<Index,64,1>& pardisoParameterArray()
+ ParameterType& pardisoParameterArray()
{
return m_iparm;
}
@@ -205,29 +206,6 @@ class PardisoImpl
template<typename BDerived, typename XDerived>
bool _solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const;
- /** \internal */
- template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
- void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
- {
- eigen_assert(m_size==b.rows());
-
- // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
- static const int NbColsAtOnce = 4;
- int rhsCols = b.cols();
- int size = b.rows();
- // Pardiso cannot solve in-place,
- // so we need two temporaries
- Eigen::Matrix<DestScalar,Dynamic,Dynamic,ColMajor> tmp_rhs(size,rhsCols);
- Eigen::Matrix<DestScalar,Dynamic,Dynamic,ColMajor> tmp_res(size,rhsCols);
- for(int k=0; k<rhsCols; k+=NbColsAtOnce)
- {
- int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
- tmp_rhs.leftCols(actualCols) = b.middleCols(k,actualCols);
- tmp_res.leftCols(actualCols) = derived().solve(tmp_rhs.leftCols(actualCols));
- dest.middleCols(k,actualCols) = tmp_res.leftCols(actualCols).sparseView();
- }
- }
-
protected:
void pardisoRelease()
{
@@ -295,7 +273,7 @@ class PardisoImpl
bool m_initialized, m_analysisIsOk, m_factorizationIsOk;
Index m_type, m_msglvl;
mutable void *m_pt[64];
- mutable Array<Index,64,1> m_iparm;
+ mutable ParameterType m_iparm;
mutable IntColVectorType m_perm;
Index m_size;
@@ -603,7 +581,7 @@ struct sparse_solve_retval<PardisoImpl<Derived>, Rhs>
template<typename Dest> void evalTo(Dest& dst) const
{
- dec().derived()._solve_sparse(rhs(),dst);
+ this->defaultEvalTo(dst);
}
};
diff --git a/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h b/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h
index 2daa23cc354..bec85810ccc 100644
--- a/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h
+++ b/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h
@@ -55,7 +55,13 @@ template<typename _MatrixType> class ColPivHouseholderQR
typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
typedef typename internal::plain_row_type<MatrixType, RealScalar>::type RealRowVectorType;
- typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
+ typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;
+
+ private:
+
+ typedef typename PermutationType::Index PermIndexType;
+
+ public:
/**
* \brief Default Constructor.
@@ -81,17 +87,29 @@ template<typename _MatrixType> class ColPivHouseholderQR
ColPivHouseholderQR(Index rows, Index cols)
: m_qr(rows, cols),
m_hCoeffs((std::min)(rows,cols)),
- m_colsPermutation(cols),
+ m_colsPermutation(PermIndexType(cols)),
m_colsTranspositions(cols),
m_temp(cols),
m_colSqNorms(cols),
m_isInitialized(false),
m_usePrescribedThreshold(false) {}
+ /** \brief Constructs a QR factorization from a given matrix
+ *
+ * This constructor computes the QR factorization of the matrix \a matrix by calling
+ * the method compute(). It is a short cut for:
+ *
+ * \code
+ * ColPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
+ * qr.compute(matrix);
+ * \endcode
+ *
+ * \sa compute()
+ */
ColPivHouseholderQR(const MatrixType& matrix)
: m_qr(matrix.rows(), matrix.cols()),
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
- m_colsPermutation(matrix.cols()),
+ m_colsPermutation(PermIndexType(matrix.cols())),
m_colsTranspositions(matrix.cols()),
m_temp(matrix.cols()),
m_colSqNorms(matrix.cols()),
@@ -127,6 +145,10 @@ template<typename _MatrixType> class ColPivHouseholderQR
}
HouseholderSequenceType householderQ(void) const;
+ HouseholderSequenceType matrixQ(void) const
+ {
+ return householderQ();
+ }
/** \returns a reference to the matrix where the Householder QR decomposition is stored
*/
@@ -135,9 +157,25 @@ template<typename _MatrixType> class ColPivHouseholderQR
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
return m_qr;
}
-
+
+ /** \returns a reference to the matrix where the result Householder QR is stored
+ * \warning The strict lower part of this matrix contains internal values.
+ * Only the upper triangular part should be referenced. To get it, use
+ * \code matrixR().template triangularView<Upper>() \endcode
+ * For rank-deficient matrices, use
+ * \code
+ * matrixR().topLeftCorner(rank(), rank()).template triangularView<Upper>()
+ * \endcode
+ */
+ const MatrixType& matrixR() const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return m_qr;
+ }
+
ColPivHouseholderQR& compute(const MatrixType& matrix);
+ /** \returns a const reference to the column permutation matrix */
const PermutationType& colsPermutation() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
@@ -181,11 +219,12 @@ template<typename _MatrixType> class ColPivHouseholderQR
*/
inline Index rank() const
{
+ using std::abs;
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
- RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold();
+ RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
Index result = 0;
for(Index i = 0; i < m_nonzero_pivots; ++i)
- result += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold);
+ result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);
return result;
}
@@ -255,6 +294,11 @@ template<typename _MatrixType> class ColPivHouseholderQR
inline Index rows() const { return m_qr.rows(); }
inline Index cols() const { return m_qr.cols(); }
+
+ /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
+ *
+ * For advanced uses only.
+ */
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
/** Allows to prescribe a threshold to be used by certain methods, such as rank(),
@@ -305,7 +349,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
- : NumTraits<Scalar>::epsilon() * m_qr.diagonalSize();
+ : NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize());
}
/** \returns the number of nonzero pivots in the QR decomposition.
@@ -325,6 +369,18 @@ template<typename _MatrixType> class ColPivHouseholderQR
* diagonal coefficient of R.
*/
RealScalar maxPivot() const { return m_maxpivot; }
+
+ /** \brief Reports whether the QR factorization was succesful.
+ *
+ * \note This function always returns \c Success. It is provided for compatibility
+ * with other factorization routines.
+ * \returns \c Success
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return Success;
+ }
protected:
MatrixType m_qr;
@@ -342,9 +398,10 @@ template<typename _MatrixType> class ColPivHouseholderQR
template<typename MatrixType>
typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::absDeterminant() const
{
+ using std::abs;
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
- return internal::abs(m_qr.diagonal().prod());
+ return abs(m_qr.diagonal().prod());
}
template<typename MatrixType>
@@ -355,12 +412,22 @@ typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDetermina
return m_qr.diagonal().cwiseAbs().array().log().sum();
}
+/** Performs the QR factorization of the given matrix \a matrix. The result of
+ * the factorization is stored into \c *this, and a reference to \c *this
+ * is returned.
+ *
+ * \sa class ColPivHouseholderQR, ColPivHouseholderQR(const MatrixType&)
+ */
template<typename MatrixType>
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
+ using std::abs;
Index rows = matrix.rows();
Index cols = matrix.cols();
Index size = matrix.diagonalSize();
+
+ // the column permutation is stored as int indices, so just to be sure:
+ eigen_assert(cols<=NumTraits<int>::highest());
m_qr = matrix;
m_hCoeffs.resize(size);
@@ -374,7 +441,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
for(Index k = 0; k < cols; ++k)
m_colSqNorms.coeffRef(k) = m_qr.col(k).squaredNorm();
- RealScalar threshold_helper = m_colSqNorms.maxCoeff() * internal::abs2(NumTraits<Scalar>::epsilon()) / RealScalar(rows);
+ RealScalar threshold_helper = m_colSqNorms.maxCoeff() * numext::abs2(NumTraits<Scalar>::epsilon()) / RealScalar(rows);
m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
m_maxpivot = RealScalar(0);
@@ -426,7 +493,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
m_qr.coeffRef(k,k) = beta;
// remember the maximum absolute value of diagonal coefficients
- if(internal::abs(beta) > m_maxpivot) m_maxpivot = internal::abs(beta);
+ if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta);
// apply the householder transformation
m_qr.bottomRightCorner(rows-k, cols-k-1)
@@ -436,9 +503,9 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
m_colSqNorms.tail(cols-k-1) -= m_qr.row(k).tail(cols-k-1).cwiseAbs2();
}
- m_colsPermutation.setIdentity(cols);
- for(Index k = 0; k < m_nonzero_pivots; ++k)
- m_colsPermutation.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));
+ m_colsPermutation.setIdentity(PermIndexType(cols));
+ for(PermIndexType k = 0; k < m_nonzero_pivots; ++k)
+ m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k)));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
m_isInitialized = true;
@@ -458,8 +525,8 @@ struct solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
{
eigen_assert(rhs().rows() == dec().rows());
- const int cols = dec().cols(),
- nonzero_pivots = dec().nonzeroPivots();
+ const Index cols = dec().cols(),
+ nonzero_pivots = dec().nonzeroPivots();
if(nonzero_pivots == 0)
{
@@ -475,19 +542,11 @@ struct solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
.transpose()
);
- dec().matrixQR()
+ dec().matrixR()
.topLeftCorner(nonzero_pivots, nonzero_pivots)
.template triangularView<Upper>()
.solveInPlace(c.topRows(nonzero_pivots));
-
- typename Rhs::PlainObject d(c);
- d.topRows(nonzero_pivots)
- = dec().matrixQR()
- .topLeftCorner(nonzero_pivots, nonzero_pivots)
- .template triangularView<Upper>()
- * c.topRows(nonzero_pivots);
-
for(Index i = 0; i < nonzero_pivots; ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
for(Index i = nonzero_pivots; i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
}
diff --git a/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR_MKL.h b/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR_MKL.h
index 745ecf8be98..b5b19832652 100644
--- a/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR_MKL.h
+++ b/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR_MKL.h
@@ -41,12 +41,13 @@ namespace Eigen {
/** \internal Specialization for the data types supported by MKL */
#define EIGEN_MKL_QR_COLPIV(EIGTYPE, MKLTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \
-template<> inline\
+template<> inline \
ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >& \
ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >::compute( \
const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix) \
\
{ \
+ using std::abs; \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
typedef MatrixType::Scalar Scalar; \
typedef MatrixType::RealScalar RealScalar; \
@@ -71,10 +72,10 @@ ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynami
m_isInitialized = true; \
m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \
m_hCoeffs.adjointInPlace(); \
- RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold(); \
+ RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); \
lapack_int *perm = m_colsPermutation.indices().data(); \
for(i=0;i<size;i++) { \
- m_nonzero_pivots += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold);\
+ m_nonzero_pivots += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);\
} \
for(i=0;i<cols;i++) perm[i]--;\
\
diff --git a/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h b/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h
index 37898e77cc2..6168e7abfb4 100644
--- a/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h
@@ -63,9 +63,10 @@ template<typename _MatrixType> class FullPivHouseholderQR
typedef typename MatrixType::Index Index;
typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
- typedef Matrix<Index, 1, ColsAtCompileTime, RowMajor, 1, MaxColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<Index, 1,
+ EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime,RowsAtCompileTime), RowMajor, 1,
+ EIGEN_SIZE_MIN_PREFER_FIXED(MaxColsAtCompileTime,MaxRowsAtCompileTime)> IntDiagSizeVectorType;
typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
- typedef typename internal::plain_col_type<MatrixType, Index>::type IntColVectorType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
typedef typename internal::plain_col_type<MatrixType>::type ColVectorType;
@@ -93,20 +94,32 @@ template<typename _MatrixType> class FullPivHouseholderQR
FullPivHouseholderQR(Index rows, Index cols)
: m_qr(rows, cols),
m_hCoeffs((std::min)(rows,cols)),
- m_rows_transpositions(rows),
- m_cols_transpositions(cols),
+ m_rows_transpositions((std::min)(rows,cols)),
+ m_cols_transpositions((std::min)(rows,cols)),
m_cols_permutation(cols),
- m_temp((std::min)(rows,cols)),
+ m_temp(cols),
m_isInitialized(false),
m_usePrescribedThreshold(false) {}
+ /** \brief Constructs a QR factorization from a given matrix
+ *
+ * This constructor computes the QR factorization of the matrix \a matrix by calling
+ * the method compute(). It is a short cut for:
+ *
+ * \code
+ * FullPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
+ * qr.compute(matrix);
+ * \endcode
+ *
+ * \sa compute()
+ */
FullPivHouseholderQR(const MatrixType& matrix)
: m_qr(matrix.rows(), matrix.cols()),
m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
- m_rows_transpositions(matrix.rows()),
- m_cols_transpositions(matrix.cols()),
+ m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())),
+ m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())),
m_cols_permutation(matrix.cols()),
- m_temp((std::min)(matrix.rows(), matrix.cols())),
+ m_temp(matrix.cols()),
m_isInitialized(false),
m_usePrescribedThreshold(false)
{
@@ -114,11 +127,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
}
/** This method finds a solution x to the equation Ax=b, where A is the matrix of which
- * *this is the QR decomposition, if any exists.
+ * \c *this is the QR decomposition.
*
* \param b the right-hand-side of the equation to solve.
*
- * \returns a solution.
+ * \returns the exact or least-square solution if the rank is greater or equal to the number of columns of A,
+ * and an arbitrary solution otherwise.
*
* \note The case where b is a matrix is not yet implemented. Also, this
* code is space inefficient.
@@ -152,13 +166,15 @@ template<typename _MatrixType> class FullPivHouseholderQR
FullPivHouseholderQR& compute(const MatrixType& matrix);
+ /** \returns a const reference to the column permutation matrix */
const PermutationType& colsPermutation() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return m_cols_permutation;
}
- const IntColVectorType& rowsTranspositions() const
+ /** \returns a const reference to the vector of indices representing the rows transpositions */
+ const IntDiagSizeVectorType& rowsTranspositions() const
{
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
return m_rows_transpositions;
@@ -201,11 +217,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
*/
inline Index rank() const
{
+ using std::abs;
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
- RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold();
+ RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
Index result = 0;
for(Index i = 0; i < m_nonzero_pivots; ++i)
- result += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold);
+ result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);
return result;
}
@@ -274,6 +291,11 @@ template<typename _MatrixType> class FullPivHouseholderQR
inline Index rows() const { return m_qr.rows(); }
inline Index cols() const { return m_qr.cols(); }
+
+ /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
+ *
+ * For advanced uses only.
+ */
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
/** Allows to prescribe a threshold to be used by certain methods, such as rank(),
@@ -324,7 +346,7 @@ template<typename _MatrixType> class FullPivHouseholderQR
return m_usePrescribedThreshold ? m_prescribedThreshold
// this formula comes from experimenting (see "LU precision tuning" thread on the list)
// and turns out to be identical to Higham's formula used already in LDLt.
- : NumTraits<Scalar>::epsilon() * m_qr.diagonalSize();
+ : NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize());
}
/** \returns the number of nonzero pivots in the QR decomposition.
@@ -348,8 +370,8 @@ template<typename _MatrixType> class FullPivHouseholderQR
protected:
MatrixType m_qr;
HCoeffsType m_hCoeffs;
- IntColVectorType m_rows_transpositions;
- IntRowVectorType m_cols_transpositions;
+ IntDiagSizeVectorType m_rows_transpositions;
+ IntDiagSizeVectorType m_cols_transpositions;
PermutationType m_cols_permutation;
RowVectorType m_temp;
bool m_isInitialized, m_usePrescribedThreshold;
@@ -362,9 +384,10 @@ template<typename _MatrixType> class FullPivHouseholderQR
template<typename MatrixType>
typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::absDeterminant() const
{
+ using std::abs;
eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
- return internal::abs(m_qr.diagonal().prod());
+ return abs(m_qr.diagonal().prod());
}
template<typename MatrixType>
@@ -375,9 +398,16 @@ typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDetermin
return m_qr.diagonal().cwiseAbs().array().log().sum();
}
+/** Performs the QR factorization of the given matrix \a matrix. The result of
+ * the factorization is stored into \c *this, and a reference to \c *this
+ * is returned.
+ *
+ * \sa class FullPivHouseholderQR, FullPivHouseholderQR(const MatrixType&)
+ */
template<typename MatrixType>
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
+ using std::abs;
Index rows = matrix.rows();
Index cols = matrix.cols();
Index size = (std::min)(rows,cols);
@@ -387,10 +417,10 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
m_temp.resize(cols);
- m_precision = NumTraits<Scalar>::epsilon() * size;
+ m_precision = NumTraits<Scalar>::epsilon() * RealScalar(size);
- m_rows_transpositions.resize(matrix.rows());
- m_cols_transpositions.resize(matrix.cols());
+ m_rows_transpositions.resize(size);
+ m_cols_transpositions.resize(size);
Index number_of_transpositions = 0;
RealScalar biggest(0);
@@ -439,7 +469,7 @@ FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(cons
m_qr.coeffRef(k,k) = beta;
// remember the maximum absolute value of diagonal coefficients
- if(internal::abs(beta) > m_maxpivot) m_maxpivot = internal::abs(beta);
+ if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta);
m_qr.bottomRightCorner(rows-k, cols-k-1)
.applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1));
@@ -488,17 +518,6 @@ struct solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
dec().hCoeffs().coeff(k), &temp.coeffRef(0));
}
- if(!dec().isSurjective())
- {
- // is c is in the image of R ?
- RealScalar biggest_in_upper_part_of_c = c.topRows( dec().rank() ).cwiseAbs().maxCoeff();
- RealScalar biggest_in_lower_part_of_c = c.bottomRows(rows-dec().rank()).cwiseAbs().maxCoeff();
- // FIXME brain dead
- const RealScalar m_precision = NumTraits<Scalar>::epsilon() * (std::min)(rows,cols);
- // this internal:: prefix is needed by at least gcc 3.4 and ICC
- if(!internal::isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision))
- return;
- }
dec().matrixQR()
.topLeftCorner(dec().rank(), dec().rank())
.template triangularView<Upper>()
@@ -520,14 +539,14 @@ template<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType
{
public:
typedef typename MatrixType::Index Index;
- typedef typename internal::plain_col_type<MatrixType, Index>::type IntColVectorType;
+ typedef typename FullPivHouseholderQR<MatrixType>::IntDiagSizeVectorType IntDiagSizeVectorType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef Matrix<typename MatrixType::Scalar, 1, MatrixType::RowsAtCompileTime, RowMajor, 1,
MatrixType::MaxRowsAtCompileTime> WorkVectorType;
FullPivHouseholderQRMatrixQReturnType(const MatrixType& qr,
const HCoeffsType& hCoeffs,
- const IntColVectorType& rowsTranspositions)
+ const IntDiagSizeVectorType& rowsTranspositions)
: m_qr(qr),
m_hCoeffs(hCoeffs),
m_rowsTranspositions(rowsTranspositions)
@@ -544,6 +563,7 @@ public:
template <typename ResultType>
void evalTo(ResultType& result, WorkVectorType& workspace) const
{
+ using numext::conj;
// compute the product H'_0 H'_1 ... H'_n-1,
// where H_k is the k-th Householder transformation I - h_k v_k v_k'
// and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
@@ -555,7 +575,7 @@ public:
for (Index k = size-1; k >= 0; k--)
{
result.block(k, k, rows-k, rows-k)
- .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), internal::conj(m_hCoeffs.coeff(k)), &workspace.coeffRef(k));
+ .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), conj(m_hCoeffs.coeff(k)), &workspace.coeffRef(k));
result.row(k).swap(result.row(m_rowsTranspositions.coeff(k)));
}
}
@@ -566,7 +586,7 @@ public:
protected:
typename MatrixType::Nested m_qr;
typename HCoeffsType::Nested m_hCoeffs;
- typename IntColVectorType::Nested m_rowsTranspositions;
+ typename IntDiagSizeVectorType::Nested m_rowsTranspositions;
};
} // end namespace internal
diff --git a/extern/Eigen3/Eigen/src/QR/HouseholderQR.h b/extern/Eigen3/Eigen/src/QR/HouseholderQR.h
index 5bcb32c1e18..abc61bcbbe1 100644
--- a/extern/Eigen3/Eigen/src/QR/HouseholderQR.h
+++ b/extern/Eigen3/Eigen/src/QR/HouseholderQR.h
@@ -57,14 +57,14 @@ template<typename _MatrixType> class HouseholderQR
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
- typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
+ typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;
/**
- * \brief Default Constructor.
- *
- * The default constructor is useful in cases in which the user intends to
- * perform decompositions via HouseholderQR::compute(const MatrixType&).
- */
+ * \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via HouseholderQR::compute(const MatrixType&).
+ */
HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {}
/** \brief Default Constructor with memory preallocation
@@ -79,6 +79,18 @@ template<typename _MatrixType> class HouseholderQR
m_temp(cols),
m_isInitialized(false) {}
+ /** \brief Constructs a QR factorization from a given matrix
+ *
+ * This constructor computes the QR factorization of the matrix \a matrix by calling
+ * the method compute(). It is a short cut for:
+ *
+ * \code
+ * HouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
+ * qr.compute(matrix);
+ * \endcode
+ *
+ * \sa compute()
+ */
HouseholderQR(const MatrixType& matrix)
: m_qr(matrix.rows(), matrix.cols()),
m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
@@ -113,6 +125,14 @@ template<typename _MatrixType> class HouseholderQR
return internal::solve_retval<HouseholderQR, Rhs>(*this, b.derived());
}
+ /** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations.
+ *
+ * The returned expression can directly be used to perform matrix products. It can also be assigned to a dense Matrix object.
+ * Here is an example showing how to recover the full or thin matrix Q, as well as how to perform matrix products using operator*:
+ *
+ * Example: \include HouseholderQR_householderQ.cpp
+ * Output: \verbinclude HouseholderQR_householderQ.out
+ */
HouseholderSequenceType householderQ() const
{
eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
@@ -161,6 +181,11 @@ template<typename _MatrixType> class HouseholderQR
inline Index rows() const { return m_qr.rows(); }
inline Index cols() const { return m_qr.cols(); }
+
+ /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
+ *
+ * For advanced uses only.
+ */
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
protected:
@@ -173,9 +198,10 @@ template<typename _MatrixType> class HouseholderQR
template<typename MatrixType>
typename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const
{
+ using std::abs;
eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
- return internal::abs(m_qr.diagonal().prod());
+ return abs(m_qr.diagonal().prod());
}
template<typename MatrixType>
@@ -232,7 +258,6 @@ void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
{
typedef typename MatrixQR::Index Index;
typedef typename MatrixQR::Scalar Scalar;
- typedef typename MatrixQR::RealScalar RealScalar;
typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
Index rows = mat.rows();
@@ -309,6 +334,12 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
} // end namespace internal
+/** Performs the QR factorization of the given matrix \a matrix. The result of
+ * the factorization is stored into \c *this, and a reference to \c *this
+ * is returned.
+ *
+ * \sa class HouseholderQR, HouseholderQR(const MatrixType&)
+ */
template<typename MatrixType>
HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
diff --git a/extern/Eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/extern/Eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
new file mode 100644
index 00000000000..a2cc2a9e261
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
@@ -0,0 +1,314 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SUITESPARSEQRSUPPORT_H
+#define EIGEN_SUITESPARSEQRSUPPORT_H
+
+namespace Eigen {
+
+ template<typename MatrixType> class SPQR;
+ template<typename SPQRType> struct SPQRMatrixQReturnType;
+ template<typename SPQRType> struct SPQRMatrixQTransposeReturnType;
+ template <typename SPQRType, typename Derived> struct SPQR_QProduct;
+ namespace internal {
+ template <typename SPQRType> struct traits<SPQRMatrixQReturnType<SPQRType> >
+ {
+ typedef typename SPQRType::MatrixType ReturnType;
+ };
+ template <typename SPQRType> struct traits<SPQRMatrixQTransposeReturnType<SPQRType> >
+ {
+ typedef typename SPQRType::MatrixType ReturnType;
+ };
+ template <typename SPQRType, typename Derived> struct traits<SPQR_QProduct<SPQRType, Derived> >
+ {
+ typedef typename Derived::PlainObject ReturnType;
+ };
+ } // End namespace internal
+
+/**
+ * \ingroup SPQRSupport_Module
+ * \class SPQR
+ * \brief Sparse QR factorization based on SuiteSparseQR library
+ *
+ * This class is used to perform a multithreaded and multifrontal rank-revealing QR decomposition
+ * of sparse matrices. The result is then used to solve linear leasts_square systems.
+ * Clearly, a QR factorization is returned such that A*P = Q*R where :
+ *
+ * P is the column permutation. Use colsPermutation() to get it.
+ *
+ * Q is the orthogonal matrix represented as Householder reflectors.
+ * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.
+ * You can then apply it to a vector.
+ *
+ * R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix.
+ * NOTE : The Index type of R is always UF_long. You can get it with SPQR::Index
+ *
+ * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
+ * NOTE
+ *
+ */
+template<typename _MatrixType>
+class SPQR
+{
+ public:
+ typedef typename _MatrixType::Scalar Scalar;
+ typedef typename _MatrixType::RealScalar RealScalar;
+ typedef UF_long Index ;
+ typedef SparseMatrix<Scalar, ColMajor, Index> MatrixType;
+ typedef PermutationMatrix<Dynamic, Dynamic> PermutationType;
+ public:
+ SPQR()
+ : m_isInitialized(false),
+ m_ordering(SPQR_ORDERING_DEFAULT),
+ m_allow_tol(SPQR_DEFAULT_TOL),
+ m_tolerance (NumTraits<Scalar>::epsilon())
+ {
+ cholmod_l_start(&m_cc);
+ }
+
+ SPQR(const _MatrixType& matrix)
+ : m_isInitialized(false),
+ m_ordering(SPQR_ORDERING_DEFAULT),
+ m_allow_tol(SPQR_DEFAULT_TOL),
+ m_tolerance (NumTraits<Scalar>::epsilon())
+ {
+ cholmod_l_start(&m_cc);
+ compute(matrix);
+ }
+
+ ~SPQR()
+ {
+ SPQR_free();
+ cholmod_l_finish(&m_cc);
+ }
+ void SPQR_free()
+ {
+ cholmod_l_free_sparse(&m_H, &m_cc);
+ cholmod_l_free_sparse(&m_cR, &m_cc);
+ cholmod_l_free_dense(&m_HTau, &m_cc);
+ std::free(m_E);
+ std::free(m_HPinv);
+ }
+
+ void compute(const _MatrixType& matrix)
+ {
+ if(m_isInitialized) SPQR_free();
+
+ MatrixType mat(matrix);
+ cholmod_sparse A;
+ A = viewAsCholmod(mat);
+ Index col = matrix.cols();
+ m_rank = SuiteSparseQR<Scalar>(m_ordering, m_tolerance, col, &A,
+ &m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);
+
+ if (!m_cR)
+ {
+ m_info = NumericalIssue;
+ m_isInitialized = false;
+ return;
+ }
+ m_info = Success;
+ m_isInitialized = true;
+ m_isRUpToDate = false;
+ }
+ /**
+ * Get the number of rows of the input matrix and the Q matrix
+ */
+ inline Index rows() const {return m_H->nrow; }
+
+ /**
+ * Get the number of columns of the input matrix.
+ */
+ inline Index cols() const { return m_cR->ncol; }
+
+ /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<SPQR, Rhs> solve(const MatrixBase<Rhs>& B) const
+ {
+ eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
+ eigen_assert(this->rows()==B.rows()
+ && "SPQR::solve(): invalid number of rows of the right hand side matrix B");
+ return internal::solve_retval<SPQR, Rhs>(*this, B.derived());
+ }
+
+ template<typename Rhs, typename Dest>
+ void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+ {
+ eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
+ eigen_assert(b.cols()==1 && "This method is for vectors only");
+
+ //Compute Q^T * b
+ typename Dest::PlainObject y;
+ y = matrixQ().transpose() * b;
+ // Solves with the triangular matrix R
+ Index rk = this->rank();
+ y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y.topRows(rk));
+ y.bottomRows(cols()-rk).setZero();
+ // Apply the column permutation
+ dest.topRows(cols()) = colsPermutation() * y.topRows(cols());
+
+ m_info = Success;
+ }
+
+ /** \returns the sparse triangular factor R. It is a sparse matrix
+ */
+ const MatrixType matrixR() const
+ {
+ eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
+ if(!m_isRUpToDate) {
+ m_R = viewAsEigen<Scalar,ColMajor, typename MatrixType::Index>(*m_cR);
+ m_isRUpToDate = true;
+ }
+ return m_R;
+ }
+ /// Get an expression of the matrix Q
+ SPQRMatrixQReturnType<SPQR> matrixQ() const
+ {
+ return SPQRMatrixQReturnType<SPQR>(*this);
+ }
+ /// Get the permutation that was applied to columns of A
+ PermutationType colsPermutation() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ Index n = m_cR->ncol;
+ PermutationType colsPerm(n);
+ for(Index j = 0; j <n; j++) colsPerm.indices()(j) = m_E[j];
+ return colsPerm;
+
+ }
+ /**
+ * Gets the rank of the matrix.
+ * It should be equal to matrixQR().cols if the matrix is full-rank
+ */
+ Index rank() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_cc.SPQR_istat[4];
+ }
+ /// Set the fill-reducing ordering method to be used
+ void setSPQROrdering(int ord) { m_ordering = ord;}
+ /// Set the tolerance tol to treat columns with 2-norm < =tol as zero
+ void setPivotThreshold(const RealScalar& tol) { m_tolerance = tol; }
+
+ /** \returns a pointer to the SPQR workspace */
+ cholmod_common *cholmodCommon() const { return &m_cc; }
+
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the sparse QR can not be computed
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+ protected:
+ bool m_isInitialized;
+ bool m_analysisIsOk;
+ bool m_factorizationIsOk;
+ mutable bool m_isRUpToDate;
+ mutable ComputationInfo m_info;
+ int m_ordering; // Ordering method to use, see SPQR's manual
+ int m_allow_tol; // Allow to use some tolerance during numerical factorization.
+ RealScalar m_tolerance; // treat columns with 2-norm below this tolerance as zero
+ mutable cholmod_sparse *m_cR; // The sparse R factor in cholmod format
+ mutable MatrixType m_R; // The sparse matrix R in Eigen format
+ mutable Index *m_E; // The permutation applied to columns
+ mutable cholmod_sparse *m_H; //The householder vectors
+ mutable Index *m_HPinv; // The row permutation of H
+ mutable cholmod_dense *m_HTau; // The Householder coefficients
+ mutable Index m_rank; // The rank of the matrix
+ mutable cholmod_common m_cc; // Workspace and parameters
+ template<typename ,typename > friend struct SPQR_QProduct;
+};
+
+template <typename SPQRType, typename Derived>
+struct SPQR_QProduct : ReturnByValue<SPQR_QProduct<SPQRType,Derived> >
+{
+ typedef typename SPQRType::Scalar Scalar;
+ typedef typename SPQRType::Index Index;
+ //Define the constructor to get reference to argument types
+ SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {}
+
+ inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); }
+ inline Index cols() const { return m_other.cols(); }
+ // Assign to a vector
+ template<typename ResType>
+ void evalTo(ResType& res) const
+ {
+ cholmod_dense y_cd;
+ cholmod_dense *x_cd;
+ int method = m_transpose ? SPQR_QTX : SPQR_QX;
+ cholmod_common *cc = m_spqr.cholmodCommon();
+ y_cd = viewAsCholmod(m_other.const_cast_derived());
+ x_cd = SuiteSparseQR_qmult<Scalar>(method, m_spqr.m_H, m_spqr.m_HTau, m_spqr.m_HPinv, &y_cd, cc);
+ res = Matrix<Scalar,ResType::RowsAtCompileTime,ResType::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x), x_cd->nrow, x_cd->ncol);
+ cholmod_l_free_dense(&x_cd, cc);
+ }
+ const SPQRType& m_spqr;
+ const Derived& m_other;
+ bool m_transpose;
+
+};
+template<typename SPQRType>
+struct SPQRMatrixQReturnType{
+
+ SPQRMatrixQReturnType(const SPQRType& spqr) : m_spqr(spqr) {}
+ template<typename Derived>
+ SPQR_QProduct<SPQRType, Derived> operator*(const MatrixBase<Derived>& other)
+ {
+ return SPQR_QProduct<SPQRType,Derived>(m_spqr,other.derived(),false);
+ }
+ SPQRMatrixQTransposeReturnType<SPQRType> adjoint() const
+ {
+ return SPQRMatrixQTransposeReturnType<SPQRType>(m_spqr);
+ }
+ // To use for operations with the transpose of Q
+ SPQRMatrixQTransposeReturnType<SPQRType> transpose() const
+ {
+ return SPQRMatrixQTransposeReturnType<SPQRType>(m_spqr);
+ }
+ const SPQRType& m_spqr;
+};
+
+template<typename SPQRType>
+struct SPQRMatrixQTransposeReturnType{
+ SPQRMatrixQTransposeReturnType(const SPQRType& spqr) : m_spqr(spqr) {}
+ template<typename Derived>
+ SPQR_QProduct<SPQRType,Derived> operator*(const MatrixBase<Derived>& other)
+ {
+ return SPQR_QProduct<SPQRType,Derived>(m_spqr,other.derived(), true);
+ }
+ const SPQRType& m_spqr;
+};
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<SPQR<_MatrixType>, Rhs>
+ : solve_retval_base<SPQR<_MatrixType>, Rhs>
+{
+ typedef SPQR<_MatrixType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+}// End namespace Eigen
+#endif
diff --git a/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h b/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h
index a7dbf073766..f44995cd39c 100644
--- a/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h
+++ b/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h
@@ -78,7 +78,8 @@ public:
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
- m_qr = FullPivHouseholderQR<MatrixType>(svd.rows(), svd.cols());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
}
@@ -96,7 +97,8 @@ public:
return false;
}
private:
- FullPivHouseholderQR<MatrixType> m_qr;
+ typedef FullPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
WorkspaceType m_workspace;
};
@@ -121,7 +123,8 @@ public:
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
- m_qr = FullPivHouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
}
m_adjoint.resize(svd.cols(), svd.rows());
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
@@ -141,7 +144,8 @@ public:
else return false;
}
private:
- FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+ typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
@@ -158,7 +162,8 @@ public:
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
- m_qr = ColPivHouseholderQR<MatrixType>(svd.rows(), svd.cols());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
@@ -183,7 +188,8 @@ public:
}
private:
- ColPivHouseholderQR<MatrixType> m_qr;
+ typedef ColPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
@@ -209,7 +215,8 @@ public:
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
- m_qr = ColPivHouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
@@ -237,7 +244,8 @@ public:
}
private:
- ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+ typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
@@ -254,7 +262,8 @@ public:
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
- m_qr = HouseholderQR<MatrixType>(svd.rows(), svd.cols());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
@@ -278,7 +287,8 @@ public:
return false;
}
private:
- HouseholderQR<MatrixType> m_qr;
+ typedef HouseholderQR<MatrixType> QRType;
+ QRType m_qr;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
@@ -304,7 +314,8 @@ public:
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
- m_qr = HouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
@@ -332,7 +343,8 @@ public:
}
private:
- HouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+ typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
@@ -359,15 +371,19 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
typedef typename SVD::Index Index;
static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
{
+ using std::sqrt;
Scalar z;
JacobiRotation<Scalar> rot;
- RealScalar n = sqrt(abs2(work_matrix.coeff(p,p)) + abs2(work_matrix.coeff(q,p)));
+ RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
if(n==0)
{
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z;
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
- z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ if(work_matrix.coeff(q,q)!=Scalar(0))
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ else
+ z = Scalar(0);
work_matrix.row(q) *= z;
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
}
@@ -398,9 +414,10 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
JacobiRotation<RealScalar> *j_left,
JacobiRotation<RealScalar> *j_right)
{
+ using std::sqrt;
Matrix<RealScalar,2,2> m;
- m << real(matrix.coeff(p,p)), real(matrix.coeff(p,q)),
- real(matrix.coeff(q,p)), real(matrix.coeff(q,q));
+ m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
+ numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
JacobiRotation<RealScalar> rot1;
RealScalar t = m.coeff(0,0) + m.coeff(1,1);
RealScalar d = m.coeff(1,0) - m.coeff(0,1);
@@ -412,7 +429,7 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
else
{
RealScalar u = d / t;
- rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + abs2(u));
+ rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
rot1.s() = rot1.c() * u;
}
m.applyOnTheLeft(0,1,rot1);
@@ -709,12 +726,14 @@ void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, u
}
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
- m_matrixU.resize(m_rows, m_computeFullU ? m_rows
- : m_computeThinU ? m_diagSize
- : 0);
- m_matrixV.resize(m_cols, m_computeFullV ? m_cols
- : m_computeThinV ? m_diagSize
- : 0);
+ if(RowsAtCompileTime==Dynamic)
+ m_matrixU.resize(m_rows, m_computeFullU ? m_rows
+ : m_computeThinU ? m_diagSize
+ : 0);
+ if(ColsAtCompileTime==Dynamic)
+ m_matrixV.resize(m_cols, m_computeFullV ? m_cols
+ : m_computeThinV ? m_diagSize
+ : 0);
m_workMatrix.resize(m_diagSize, m_diagSize);
if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
@@ -725,6 +744,7 @@ template<typename MatrixType, int QRPreconditioner>
JacobiSVD<MatrixType, QRPreconditioner>&
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
+ using std::abs;
allocate(matrix.rows(), matrix.cols(), computationOptions);
// currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
@@ -762,9 +782,9 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
// keep us iterating forever. Similarly, small denormal numbers are considered zero.
using std::max;
- RealScalar threshold = (max)(considerAsZero, precision * (max)(internal::abs(m_workMatrix.coeff(p,p)),
- internal::abs(m_workMatrix.coeff(q,q))));
- if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) > threshold)
+ RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
+ abs(m_workMatrix.coeff(q,q))));
+ if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
{
finished = false;
@@ -788,7 +808,7 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
for(Index i = 0; i < m_diagSize; ++i)
{
- RealScalar a = internal::abs(m_workMatrix.coeff(i,i));
+ RealScalar a = abs(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = a;
if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
}
@@ -833,17 +853,12 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
// A = U S V^*
// So A^{-1} = V S^{-1} U^*
- Index diagSize = (std::min)(dec().rows(), dec().cols());
- typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
-
+ Matrix<Scalar, Dynamic, Rhs::ColsAtCompileTime, 0, _MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime> tmp;
Index nonzeroSingVals = dec().nonzeroSingularValues();
- invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
- invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
-
- dst = dec().matrixV().leftCols(diagSize)
- * invertedSingVals.asDiagonal()
- * dec().matrixU().leftCols(diagSize).adjoint()
- * rhs();
+
+ tmp.noalias() = dec().matrixU().leftCols(nonzeroSingVals).adjoint() * rhs();
+ tmp = dec().singularValues().head(nonzeroSingVals).asDiagonal().inverse() * tmp;
+ dst = dec().matrixV().leftCols(nonzeroSingVals) * tmp;
}
};
} // end namespace internal
diff --git a/extern/Eigen3/Eigen/src/SVD/JacobiSVD_MKL.h b/extern/Eigen3/Eigen/src/SVD/JacobiSVD_MKL.h
index 4d479f6b26e..decda754050 100644
--- a/extern/Eigen3/Eigen/src/SVD/JacobiSVD_MKL.h
+++ b/extern/Eigen3/Eigen/src/SVD/JacobiSVD_MKL.h
@@ -40,7 +40,7 @@ namespace Eigen {
/** \internal Specialization for the data types supported by MKL */
#define EIGEN_MKL_SVD(EIGTYPE, MKLTYPE, MKLRTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \
-template<> inline\
+template<> inline \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>& \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
{ \
diff --git a/extern/Eigen3/Eigen/src/SVD/UpperBidiagonalization.h b/extern/Eigen3/Eigen/src/SVD/UpperBidiagonalization.h
index 213b3100df5..587de37a5ad 100644
--- a/extern/Eigen3/Eigen/src/SVD/UpperBidiagonalization.h
+++ b/extern/Eigen3/Eigen/src/SVD/UpperBidiagonalization.h
@@ -39,7 +39,7 @@ template<typename _MatrixType> class UpperBidiagonalization
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Diagonal<const MatrixType,0> >
> HouseholderUSequenceType;
typedef HouseholderSequence<
- const MatrixType,
+ const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,
Diagonal<const MatrixType,1>,
OnTheRight
> HouseholderVSequenceType;
@@ -74,7 +74,7 @@ template<typename _MatrixType> class UpperBidiagonalization
const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
{
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
- return HouseholderVSequenceType(m_householder, m_householder.const_derived().template diagonal<1>())
+ return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())
.setLength(m_householder.cols()-1)
.setShift(1);
}
diff --git a/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h b/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
index 9bf38ab2d91..f41d7e010f7 100644
--- a/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
+++ b/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -1,52 +1,12 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-/*
-
-NOTE: the _symbolic, and _numeric functions has been adapted from
- the LDL library:
-
-LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
-
-LDL License:
-
- Your use or distribution of LDL or any modified version of
- LDL implies that you agree to this License.
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
- USA
-
- Permission is hereby granted to use or copy this program under the
- terms of the GNU LGPL, provided that the Copyright, this License,
- and the Availability of the original version is retained on all copies.
- User documentation of any code that uses this code or any modified
- version of this code must cite the Copyright, this License, the
- Availability note, and "Used by permission." Permission to modify
- the code and to distribute modified code is granted, provided the
- Copyright, this License, and the Availability note are retained,
- and a notice that the code was modified is included.
- */
-
-#include "../Core/util/NonMPL2.h"
-
#ifndef EIGEN_SIMPLICIAL_CHOLESKY_H
#define EIGEN_SIMPLICIAL_CHOLESKY_H
@@ -215,27 +175,6 @@ class SimplicialCholeskyBase : internal::noncopyable
dest = m_Pinv * dest;
}
- /** \internal */
- template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
- void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
- {
- eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
- eigen_assert(m_matrix.rows()==b.rows());
-
- // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
- static const int NbColsAtOnce = 4;
- int rhsCols = b.cols();
- int size = b.rows();
- Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,rhsCols);
- for(int k=0; k<rhsCols; k+=NbColsAtOnce)
- {
- int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
- tmp.leftCols(actualCols) = b.middleCols(k,actualCols);
- tmp.leftCols(actualCols) = derived().solve(tmp.leftCols(actualCols));
- dest.middleCols(k,actualCols) = tmp.leftCols(actualCols).sparseView();
- }
- }
-
#endif // EIGEN_PARSED_BY_DOXYGEN
protected:
@@ -425,7 +364,7 @@ public:
Scalar determinant() const
{
Scalar detL = Base::m_matrix.diagonal().prod();
- return internal::abs2(detL);
+ return numext::abs2(detL);
}
};
@@ -660,7 +599,7 @@ public:
else
{
Scalar detL = Diagonal<const CholMatrixType>(Base::m_matrix).prod();
- return internal::abs2(detL);
+ return numext::abs2(detL);
}
}
@@ -693,151 +632,6 @@ void SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, CholMatrixTy
ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_P);
}
-template<typename Derived>
-void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)
-{
- const Index size = ap.rows();
- m_matrix.resize(size, size);
- m_parent.resize(size);
- m_nonZerosPerCol.resize(size);
-
- ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
-
- for(Index k = 0; k < size; ++k)
- {
- /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
- m_parent[k] = -1; /* parent of k is not yet known */
- tags[k] = k; /* mark node k as visited */
- m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
- for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
- {
- Index i = it.index();
- if(i < k)
- {
- /* follow path from i to root of etree, stop at flagged node */
- for(; tags[i] != k; i = m_parent[i])
- {
- /* find parent of i if not yet determined */
- if (m_parent[i] == -1)
- m_parent[i] = k;
- m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */
- tags[i] = k; /* mark i as visited */
- }
- }
- }
- }
-
- /* construct Lp index array from m_nonZerosPerCol column counts */
- Index* Lp = m_matrix.outerIndexPtr();
- Lp[0] = 0;
- for(Index k = 0; k < size; ++k)
- Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
-
- m_matrix.resizeNonZeros(Lp[size]);
-
- m_isInitialized = true;
- m_info = Success;
- m_analysisIsOk = true;
- m_factorizationIsOk = false;
-}
-
-
-template<typename Derived>
-template<bool DoLDLT>
-void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap)
-{
- eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
- eigen_assert(ap.rows()==ap.cols());
- const Index size = ap.rows();
- eigen_assert(m_parent.size()==size);
- eigen_assert(m_nonZerosPerCol.size()==size);
-
- const Index* Lp = m_matrix.outerIndexPtr();
- Index* Li = m_matrix.innerIndexPtr();
- Scalar* Lx = m_matrix.valuePtr();
-
- ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);
- ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0);
- ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
-
- bool ok = true;
- m_diag.resize(DoLDLT ? size : 0);
-
- for(Index k = 0; k < size; ++k)
- {
- // compute nonzero pattern of kth row of L, in topological order
- y[k] = 0.0; // Y(0:k) is now all zero
- Index top = size; // stack for pattern is empty
- tags[k] = k; // mark node k as visited
- m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
- for(typename MatrixType::InnerIterator it(ap,k); it; ++it)
- {
- Index i = it.index();
- if(i <= k)
- {
- y[i] += internal::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */
- Index len;
- for(len = 0; tags[i] != k; i = m_parent[i])
- {
- pattern[len++] = i; /* L(k,i) is nonzero */
- tags[i] = k; /* mark i as visited */
- }
- while(len > 0)
- pattern[--top] = pattern[--len];
- }
- }
-
- /* compute numerical values kth row of L (a sparse triangular solve) */
-
- RealScalar d = internal::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k)
- y[k] = 0.0;
- for(; top < size; ++top)
- {
- Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
- Scalar yi = y[i]; /* get and clear Y(i) */
- y[i] = 0.0;
-
- /* the nonzero entry L(k,i) */
- Scalar l_ki;
- if(DoLDLT)
- l_ki = yi / m_diag[i];
- else
- yi = l_ki = yi / Lx[Lp[i]];
-
- Index p2 = Lp[i] + m_nonZerosPerCol[i];
- Index p;
- for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)
- y[Li[p]] -= internal::conj(Lx[p]) * yi;
- d -= internal::real(l_ki * internal::conj(yi));
- Li[p] = k; /* store L(k,i) in column form of L */
- Lx[p] = l_ki;
- ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
- }
- if(DoLDLT)
- {
- m_diag[k] = d;
- if(d == RealScalar(0))
- {
- ok = false; /* failure, D(k,k) is zero */
- break;
- }
- }
- else
- {
- Index p = Lp[k] + m_nonZerosPerCol[k]++;
- Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */
- if(d <= RealScalar(0)) {
- ok = false; /* failure, matrix is not positive definite */
- break;
- }
- Lx[p] = internal::sqrt(d) ;
- }
- }
-
- m_info = ok ? Success : NumericalIssue;
- m_factorizationIsOk = true;
-}
-
namespace internal {
template<typename Derived, typename Rhs>
@@ -862,7 +656,7 @@ struct sparse_solve_retval<SimplicialCholeskyBase<Derived>, Rhs>
template<typename Dest> void evalTo(Dest& dst) const
{
- dec().derived()._solve_sparse(rhs(),dst);
+ this->defaultEvalTo(dst);
}
};
diff --git a/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
new file mode 100644
index 00000000000..7aaf702beed
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
@@ -0,0 +1,199 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+
+/*
+
+NOTE: thes functions vave been adapted from the LDL library:
+
+LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
+
+LDL License:
+
+ Your use or distribution of LDL or any modified version of
+ LDL implies that you agree to this License.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
+ USA
+
+ Permission is hereby granted to use or copy this program under the
+ terms of the GNU LGPL, provided that the Copyright, this License,
+ and the Availability of the original version is retained on all copies.
+ User documentation of any code that uses this code or any modified
+ version of this code must cite the Copyright, this License, the
+ Availability note, and "Used by permission." Permission to modify
+ the code and to distribute modified code is granted, provided the
+ Copyright, this License, and the Availability note are retained,
+ and a notice that the code was modified is included.
+ */
+
+#include "../Core/util/NonMPL2.h"
+
+#ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
+#define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
+
+namespace Eigen {
+
+template<typename Derived>
+void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)
+{
+ const Index size = ap.rows();
+ m_matrix.resize(size, size);
+ m_parent.resize(size);
+ m_nonZerosPerCol.resize(size);
+
+ ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
+
+ for(Index k = 0; k < size; ++k)
+ {
+ /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
+ m_parent[k] = -1; /* parent of k is not yet known */
+ tags[k] = k; /* mark node k as visited */
+ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
+ for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
+ {
+ Index i = it.index();
+ if(i < k)
+ {
+ /* follow path from i to root of etree, stop at flagged node */
+ for(; tags[i] != k; i = m_parent[i])
+ {
+ /* find parent of i if not yet determined */
+ if (m_parent[i] == -1)
+ m_parent[i] = k;
+ m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */
+ tags[i] = k; /* mark i as visited */
+ }
+ }
+ }
+ }
+
+ /* construct Lp index array from m_nonZerosPerCol column counts */
+ Index* Lp = m_matrix.outerIndexPtr();
+ Lp[0] = 0;
+ for(Index k = 0; k < size; ++k)
+ Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
+
+ m_matrix.resizeNonZeros(Lp[size]);
+
+ m_isInitialized = true;
+ m_info = Success;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+}
+
+
+template<typename Derived>
+template<bool DoLDLT>
+void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap)
+{
+ using std::sqrt;
+
+ eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+ eigen_assert(ap.rows()==ap.cols());
+ const Index size = ap.rows();
+ eigen_assert(m_parent.size()==size);
+ eigen_assert(m_nonZerosPerCol.size()==size);
+
+ const Index* Lp = m_matrix.outerIndexPtr();
+ Index* Li = m_matrix.innerIndexPtr();
+ Scalar* Lx = m_matrix.valuePtr();
+
+ ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);
+ ei_declare_aligned_stack_constructed_variable(Index, pattern, size, 0);
+ ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
+
+ bool ok = true;
+ m_diag.resize(DoLDLT ? size : 0);
+
+ for(Index k = 0; k < size; ++k)
+ {
+ // compute nonzero pattern of kth row of L, in topological order
+ y[k] = 0.0; // Y(0:k) is now all zero
+ Index top = size; // stack for pattern is empty
+ tags[k] = k; // mark node k as visited
+ m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
+ for(typename MatrixType::InnerIterator it(ap,k); it; ++it)
+ {
+ Index i = it.index();
+ if(i <= k)
+ {
+ y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */
+ Index len;
+ for(len = 0; tags[i] != k; i = m_parent[i])
+ {
+ pattern[len++] = i; /* L(k,i) is nonzero */
+ tags[i] = k; /* mark i as visited */
+ }
+ while(len > 0)
+ pattern[--top] = pattern[--len];
+ }
+ }
+
+ /* compute numerical values kth row of L (a sparse triangular solve) */
+
+ RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k)
+ y[k] = 0.0;
+ for(; top < size; ++top)
+ {
+ Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
+ Scalar yi = y[i]; /* get and clear Y(i) */
+ y[i] = 0.0;
+
+ /* the nonzero entry L(k,i) */
+ Scalar l_ki;
+ if(DoLDLT)
+ l_ki = yi / m_diag[i];
+ else
+ yi = l_ki = yi / Lx[Lp[i]];
+
+ Index p2 = Lp[i] + m_nonZerosPerCol[i];
+ Index p;
+ for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)
+ y[Li[p]] -= numext::conj(Lx[p]) * yi;
+ d -= numext::real(l_ki * numext::conj(yi));
+ Li[p] = k; /* store L(k,i) in column form of L */
+ Lx[p] = l_ki;
+ ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
+ }
+ if(DoLDLT)
+ {
+ m_diag[k] = d;
+ if(d == RealScalar(0))
+ {
+ ok = false; /* failure, D(k,k) is zero */
+ break;
+ }
+ }
+ else
+ {
+ Index p = Lp[k] + m_nonZerosPerCol[k]++;
+ Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */
+ if(d <= RealScalar(0)) {
+ ok = false; /* failure, matrix is not positive definite */
+ break;
+ }
+ Lx[p] = sqrt(d) ;
+ }
+ }
+
+ m_info = ok ? Success : NumericalIssue;
+ m_factorizationIsOk = true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h b/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h
index 6cfaadbaa9a..17fff96a78b 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h
@@ -288,9 +288,10 @@ class AmbiVector<_Scalar,_Index>::Iterator
* In practice, all coefficients having a magnitude smaller than \a epsilon
* are skipped.
*/
- Iterator(const AmbiVector& vec, RealScalar epsilon = 0)
+ Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
: m_vector(vec)
{
+ using std::abs;
m_epsilon = epsilon;
m_isDense = m_vector.m_mode==IsDense;
if (m_isDense)
@@ -304,7 +305,7 @@ class AmbiVector<_Scalar,_Index>::Iterator
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
m_currentEl = m_vector.m_llStart;
- while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<=m_epsilon)
+ while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
m_currentEl = llElements[m_currentEl].next;
if (m_currentEl<0)
{
@@ -326,11 +327,12 @@ class AmbiVector<_Scalar,_Index>::Iterator
Iterator& operator++()
{
+ using std::abs;
if (m_isDense)
{
do {
++m_cachedIndex;
- } while (m_cachedIndex<m_vector.m_end && internal::abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
+ } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
if (m_cachedIndex<m_vector.m_end)
m_cachedValue = m_vector.m_buffer[m_cachedIndex];
else
@@ -341,7 +343,7 @@ class AmbiVector<_Scalar,_Index>::Iterator
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
do {
m_currentEl = llElements[m_currentEl].next;
- } while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon);
+ } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<m_epsilon);
if (m_currentEl<0)
{
m_cachedIndex = -1;
diff --git a/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h b/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h
index 85a998aff10..3321fab4a8a 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h
@@ -139,7 +139,7 @@ class CompressedStorage
/** \returns the stored value at index \a key
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
- inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const
+ inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const
{
if (m_size==0)
return defaultValue;
@@ -152,7 +152,7 @@ class CompressedStorage
}
/** Like at(), but the search is performed in the range [start,end) */
- inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const
+ inline Scalar atInRange(size_t start, size_t end, Index key, const Scalar& defaultValue = Scalar(0)) const
{
if (start>=end)
return Scalar(0);
@@ -167,7 +167,7 @@ class CompressedStorage
/** \returns a reference to the value at index \a key
* If the value does not exist, then the value \a defaultValue is inserted
* such that the keys are sorted. */
- inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0))
+ inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
{
size_t id = searchLowerIndex(0,m_size,key);
if (id>=m_size || m_indices[id]!=key)
@@ -184,7 +184,7 @@ class CompressedStorage
return m_values[id];
}
- void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
size_t k = 0;
size_t n = size();
diff --git a/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index 16b5e1dba6c..5c320e2d2db 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -66,9 +66,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
}
// unordered insertion
- for(int k=0; k<nnz; ++k)
+ for(Index k=0; k<nnz; ++k)
{
- int i = indices[k];
+ Index i = indices[k];
res.insertBackByOuterInnerUnordered(j,i) = values[i];
mask[i] = false;
}
@@ -76,8 +76,8 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
#if 0
// alternative ordered insertion code:
- int t200 = rows/(log2(200)*1.39);
- int t = (rows*100)/139;
+ Index t200 = rows/(log2(200)*1.39);
+ Index t = (rows*100)/139;
// FIXME reserve nnz non zeros
// FIXME implement fast sort algorithms for very small nnz
@@ -90,9 +90,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
if(true)
{
if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
- for(int k=0; k<nnz; ++k)
+ for(Index k=0; k<nnz; ++k)
{
- int i = indices[k];
+ Index i = indices[k];
res.insertBackByOuterInner(j,i) = values[i];
mask[i] = false;
}
@@ -100,7 +100,7 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
else
{
// dense path
- for(int i=0; i<rows; ++i)
+ for(Index i=0; i<rows; ++i)
{
if(mask[i])
{
@@ -121,9 +121,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType,
- int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
- int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
- int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+ int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
+ int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
+ int ResStorageOrder = (traits<ResultType>::Flags&RowMajorBit) ? RowMajor : ColMajor>
struct conservative_sparse_sparse_product_selector;
template<typename Lhs, typename Rhs, typename ResultType>
@@ -134,8 +134,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
// sort the non zeros:
@@ -149,7 +149,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix rhsRow = rhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
@@ -162,7 +162,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix lhsRow = lhs;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
@@ -175,7 +175,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
res = resRow;
@@ -190,7 +190,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
res = resCol;
@@ -202,7 +202,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix lhsCol = lhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
@@ -215,7 +215,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
ColMajorMatrix rhsCol = rhs;
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
@@ -228,8 +228,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
// sort the non zeros:
diff --git a/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h b/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h
index 93cd4832dea..ab1a266a905 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h
@@ -50,6 +50,8 @@ class MappedSparseMatrix
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
inline Index innerSize() const { return m_innerSize; }
inline Index outerSize() const { return m_outerSize; }
+
+ bool isCompressed() const { return true; }
//----------------------------------------
// direct access interface
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h b/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h
index eefd8070251..16a20a5744e 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h
@@ -12,51 +12,37 @@
namespace Eigen {
-namespace internal {
-template<typename MatrixType, int Size>
-struct traits<SparseInnerVectorSet<MatrixType, Size> >
+template<typename XprType, int BlockRows, int BlockCols>
+class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
+ : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
{
- typedef typename traits<MatrixType>::Scalar Scalar;
- typedef typename traits<MatrixType>::Index Index;
- typedef typename traits<MatrixType>::StorageKind StorageKind;
- typedef MatrixXpr XprKind;
- enum {
- IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit,
- Flags = MatrixType::Flags,
- RowsAtCompileTime = IsRowMajor ? Size : MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = IsRowMajor ? MatrixType::ColsAtCompileTime : Size,
- MaxRowsAtCompileTime = RowsAtCompileTime,
- MaxColsAtCompileTime = ColsAtCompileTime,
- CoeffReadCost = MatrixType::CoeffReadCost
- };
-};
-} // end namespace internal
-
-template<typename MatrixType, int Size>
-class SparseInnerVectorSet : internal::no_assignment_operator,
- public SparseMatrixBase<SparseInnerVectorSet<MatrixType, Size> >
-{
- public:
-
- enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
-
- EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
- class InnerIterator: public MatrixType::InnerIterator
+ typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
+ typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
+public:
+ enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+protected:
+ enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
+public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+
+ class InnerIterator: public XprType::InnerIterator
{
+ typedef typename BlockImpl::Index Index;
public:
- inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
- : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ inline InnerIterator(const BlockType& xpr, Index outer)
+ : XprType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
protected:
Index m_outer;
};
- class ReverseInnerIterator: public MatrixType::ReverseInnerIterator
+ class ReverseInnerIterator: public XprType::ReverseInnerIterator
{
+ typedef typename BlockImpl::Index Index;
public:
- inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer)
- : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ inline ReverseInnerIterator(const BlockType& xpr, Index outer)
+ : XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
@@ -64,39 +50,24 @@ class SparseInnerVectorSet : internal::no_assignment_operator,
Index m_outer;
};
- inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
- : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
- {
- eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
- }
-
- inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
- : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
- {
- eigen_assert(Size!=Dynamic);
- eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
- }
-
-// template<typename OtherDerived>
-// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
-// {
-// return *this;
-// }
+ inline BlockImpl(const XprType& xpr, int i)
+ : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
+ {}
-// template<typename Sparse>
-// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
-// {
-// return *this;
-// }
+ inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+ : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
+ {}
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
- const typename MatrixType::Nested m_matrix;
+ typename XprType::Nested m_matrix;
Index m_outerStart;
- const internal::variable_if_dynamic<Index, Size> m_outerSize;
+ const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
};
@@ -104,32 +75,36 @@ class SparseInnerVectorSet : internal::no_assignment_operator,
* specialisation for SparseMatrix
***************************************************************************/
-template<typename _Scalar, int _Options, typename _Index, int Size>
-class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
- : public SparseMatrixBase<SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size> >
+template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols>
+class BlockImpl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse>
+ : public SparseMatrixBase<Block<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true> >
{
- typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
- public:
-
- enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
-
- EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
- class InnerIterator: public MatrixType::InnerIterator
+ typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
+ typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
+ typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
+public:
+ enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+ EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+protected:
+ enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
+public:
+
+ class InnerIterator: public SparseMatrixType::InnerIterator
{
public:
- inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
- : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ inline InnerIterator(const BlockType& xpr, Index outer)
+ : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
protected:
Index m_outer;
};
- class ReverseInnerIterator: public MatrixType::ReverseInnerIterator
+ class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator
{
public:
- inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer)
- : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ inline ReverseInnerIterator(const BlockType& xpr, Index outer)
+ : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
@@ -137,23 +112,18 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
Index m_outer;
};
- inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
- : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
- {
- eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
- }
+ inline BlockImpl(const SparseMatrixType& xpr, int i)
+ : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
+ {}
- inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
- : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
- {
- eigen_assert(Size==1);
- eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
- }
+ inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+ : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
+ {}
template<typename OtherDerived>
- inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+ inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
{
- typedef typename internal::remove_all<typename MatrixType::Nested>::type _NestedMatrixType;
+ typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
_NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);;
// This assignement is slow if this vector set is not empty
// and/or it is not at the end of the nonzeros of the underlying matrix.
@@ -163,70 +133,69 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
// 2 - let's check whether there is enough allocated memory
Index nnz = tmp.nonZeros();
- Index nnz_previous = nonZeros();
- Index free_size = Index(matrix.data().allocatedSize()) + nnz_previous;
- Index nnz_head = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart];
- Index tail = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()];
- Index nnz_tail = matrix.nonZeros() - tail;
-
- if(nnz>free_size)
+ Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
+ Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending posiiton of the current block
+ Index block_size = end - start; // available room in the current block
+ Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
+
+ Index free_size = m_matrix.isCompressed()
+ ? Index(matrix.data().allocatedSize()) + block_size
+ : block_size;
+
+ if(nnz>free_size)
{
// realloc manually to reduce copies
- typename MatrixType::Storage newdata(m_matrix.nonZeros() - nnz_previous + nnz);
+ typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
- std::memcpy(&newdata.value(0), &m_matrix.data().value(0), nnz_head*sizeof(Scalar));
- std::memcpy(&newdata.index(0), &m_matrix.data().index(0), nnz_head*sizeof(Index));
+ std::memcpy(&newdata.value(0), &m_matrix.data().value(0), start*sizeof(Scalar));
+ std::memcpy(&newdata.index(0), &m_matrix.data().index(0), start*sizeof(Index));
- std::memcpy(&newdata.value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
- std::memcpy(&newdata.index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+ std::memcpy(&newdata.value(start), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&newdata.index(start), &tmp.data().index(0), nnz*sizeof(Index));
- std::memcpy(&newdata.value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
- std::memcpy(&newdata.index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+ std::memcpy(&newdata.value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar));
+ std::memcpy(&newdata.index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index));
+
+ newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
matrix.data().swap(newdata);
}
else
{
// no need to realloc, simply copy the tail at its respective position and insert tmp
- matrix.data().resize(nnz_head + nnz + nnz_tail);
-
- if(nnz<nnz_previous)
- {
- std::memcpy(&matrix.data().value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
- std::memcpy(&matrix.data().index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
- }
- else
- {
- for(Index i=nnz_tail-1; i>=0; --i)
- {
- matrix.data().value(nnz_head+nnz+i) = matrix.data().value(tail+i);
- matrix.data().index(nnz_head+nnz+i) = matrix.data().index(tail+i);
- }
- }
-
- std::memcpy(&matrix.data().value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
- std::memcpy(&matrix.data().index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+ matrix.data().resize(start + nnz + tail_size);
+
+ std::memmove(&matrix.data().value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar));
+ std::memmove(&matrix.data().index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index));
+
+ std::memcpy(&matrix.data().value(start), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(start), &tmp.data().index(0), nnz*sizeof(Index));
}
+
+ // update innerNonZeros
+ if(!m_matrix.isCompressed())
+ for(Index j=0; j<m_outerSize.value(); ++j)
+ matrix.innerNonZeroPtr()[m_outerStart+j] = tmp.innerVector(j).nonZeros();
// update outer index pointers
- Index p = nnz_head;
+ Index p = start;
for(Index k=0; k<m_outerSize.value(); ++k)
{
matrix.outerIndexPtr()[m_outerStart+k] = p;
p += tmp.innerVector(k).nonZeros();
}
- std::ptrdiff_t offset = nnz - nnz_previous;
+ std::ptrdiff_t offset = nnz - block_size;
for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
{
matrix.outerIndexPtr()[k] += offset;
}
- return *this;
+ return derived();
}
- inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+ inline BlockType& operator=(const BlockType& other)
{
- return operator=<SparseInnerVectorSet>(other);
+ return operator=<BlockType>(other);
}
inline const Scalar* valuePtr() const
@@ -252,12 +221,12 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
else if(m_outerSize.value()==0)
return 0;
else
- return Map<const Matrix<Index,Size,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
+ return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
}
const Scalar& lastCoeff() const
{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl);
eigen_assert(nonZeros()>0);
if(m_matrix.isCompressed())
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
@@ -265,122 +234,175 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
}
-// template<typename Sparse>
-// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
-// {
-// return *this;
-// }
-
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
- typename MatrixType::Nested m_matrix;
+ typename SparseMatrixType::Nested m_matrix;
Index m_outerStart;
- const internal::variable_if_dynamic<Index, Size> m_outerSize;
+ const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
};
//----------
-/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
-template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i)
-{
- EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
- return innerVector(i);
-}
-
-/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
- * (read-only version) */
-template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i) const
-{
- EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
- return innerVector(i);
-}
-
-/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
-template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i)
-{
- EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
- return innerVector(i);
-}
-
-/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
- * (read-only version) */
-template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i) const
-{
- EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
- return innerVector(i);
-}
-
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major).
*/
template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer)
-{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
+{ return InnerVectorReturnType(derived(), outer); }
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) const
-{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
+{ return ConstInnerVectorReturnType(derived(), outer); }
-/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size)
+Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
{
- EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
- return innerVectors(start, size);
+ return Block<Derived,Dynamic,Dynamic,true>(derived(),
+ IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
+ IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
+
}
-/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
- * (read-only version) */
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size) const
+const Block<const Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
{
- EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
- return innerVectors(start, size);
+ return Block<const Derived,Dynamic,Dynamic,true>(derived(),
+ IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
+ IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
+
}
-/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
-template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleCols(Index start, Index size)
+/** Generic implementation of sparse Block expression.
+ * Real-only.
+ */
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
+ : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
{
- EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
- return innerVectors(start, size);
-}
+ typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
+ typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
+public:
+ enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+ EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+
+ /** Column or Row constructor
+ */
+ inline BlockImpl(const XprType& xpr, int i)
+ : m_matrix(xpr),
+ m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
+ m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
+ m_blockRows(xpr.rows()),
+ m_blockCols(xpr.cols())
+ {}
+
+ /** Dynamic-size constructor
+ */
+ inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+ : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols)
+ {}
+
+ inline int rows() const { return m_blockRows.value(); }
+ inline int cols() const { return m_blockCols.value(); }
+
+ inline Scalar& coeffRef(int row, int col)
+ {
+ return m_matrix.const_cast_derived()
+ .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+ }
-/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
- * (read-only version) */
-template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleCols(Index start, Index size) const
-{
- EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
- return innerVectors(start, size);
-}
+ inline const Scalar coeff(int row, int col) const
+ {
+ return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
+ }
+ inline Scalar& coeffRef(int index)
+ {
+ return m_matrix.const_cast_derived()
+ .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ }
+ inline const Scalar coeff(int index) const
+ {
+ return m_matrix
+ .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ }
+
+ inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
+
+ class InnerIterator : public _MatrixTypeNested::InnerIterator
+ {
+ typedef typename _MatrixTypeNested::InnerIterator Base;
+ const BlockType& m_block;
+ Index m_end;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer)
+ : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
+ m_block(block),
+ m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value())
+ {
+ while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) )
+ Base::operator++();
+ }
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major).
- */
-template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
-{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+ inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
+ inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
+ inline Index row() const { return Base::row() - m_block.m_startRow.value(); }
+ inline Index col() const { return Base::col() - m_block.m_startCol.value(); }
+
+ inline operator bool() const { return Base::operator bool() && Base::index() < m_end; }
+ };
+ class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator
+ {
+ typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+ const BlockType& m_block;
+ Index m_begin;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const BlockType& block, Index outer)
+ : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
+ m_block(block),
+ m_begin(IsRowMajor ? block.m_startCol.value() : block.m_startRow.value())
+ {
+ while( (Base::operator bool()) && (Base::index() >= (IsRowMajor ? m_block.m_startCol.value()+block.m_blockCols.value() : m_block.m_startRow.value()+block.m_blockRows.value())) )
+ Base::operator--();
+ }
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major). Read-only.
- */
-template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
-{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+ inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
+ inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
+ inline Index row() const { return Base::row() - m_block.m_startRow.value(); }
+ inline Index col() const { return Base::col() - m_block.m_startCol.value(); }
+
+ inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; }
+ };
+ protected:
+ friend class InnerIterator;
+ friend class ReverseInnerIterator;
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
+
+ typename XprType::Nested m_matrix;
+ const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
+ const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
+ const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
+ const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
+
+};
} // end namespace Eigen
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseColEtree.h b/extern/Eigen3/Eigen/src/SparseCore/SparseColEtree.h
new file mode 100644
index 00000000000..f8745f46100
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseColEtree.h
@@ -0,0 +1,206 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+/*
+
+ * NOTE: This file is the modified version of sp_coletree.c file in SuperLU
+
+ * -- SuperLU routine (version 3.1) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * August 1, 2008
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSE_COLETREE_H
+#define SPARSE_COLETREE_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** Find the root of the tree/set containing the vertex i : Use Path halving */
+template<typename Index, typename IndexVector>
+Index etree_find (Index i, IndexVector& pp)
+{
+ Index p = pp(i); // Parent
+ Index gp = pp(p); // Grand parent
+ while (gp != p)
+ {
+ pp(i) = gp; // Parent pointer on find path is changed to former grand parent
+ i = gp;
+ p = pp(i);
+ gp = pp(p);
+ }
+ return p;
+}
+
+/** Compute the column elimination tree of a sparse matrix
+ * \param mat The matrix in column-major format.
+ * \param parent The elimination tree
+ * \param firstRowElt The column index of the first element in each row
+ * \param perm The permutation to apply to the column of \b mat
+ */
+template <typename MatrixType, typename IndexVector>
+int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::Index *perm=0)
+{
+ typedef typename MatrixType::Index Index;
+ Index nc = mat.cols(); // Number of columns
+ Index m = mat.rows();
+ Index diagSize = (std::min)(nc,m);
+ IndexVector root(nc); // root of subtree of etree
+ root.setZero();
+ IndexVector pp(nc); // disjoint sets
+ pp.setZero(); // Initialize disjoint sets
+ parent.resize(mat.cols());
+ //Compute first nonzero column in each row
+ Index row,col;
+ firstRowElt.resize(m);
+ firstRowElt.setConstant(nc);
+ firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
+ bool found_diag;
+ for (col = 0; col < nc; col++)
+ {
+ Index pcol = col;
+ if(perm) pcol = perm[col];
+ for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
+ {
+ row = it.row();
+ firstRowElt(row) = (std::min)(firstRowElt(row), col);
+ }
+ }
+ /* Compute etree by Liu's algorithm for symmetric matrices,
+ except use (firstRowElt[r],c) in place of an edge (r,c) of A.
+ Thus each row clique in A'*A is replaced by a star
+ centered at its first vertex, which has the same fill. */
+ Index rset, cset, rroot;
+ for (col = 0; col < nc; col++)
+ {
+ found_diag = col>=m;
+ pp(col) = col;
+ cset = col;
+ root(cset) = col;
+ parent(col) = nc;
+ /* The diagonal element is treated here even if it does not exist in the matrix
+ * hence the loop is executed once more */
+ Index pcol = col;
+ if(perm) pcol = perm[col];
+ for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
+ { // A sequence of interleaved find and union is performed
+ Index i = col;
+ if(it) i = it.index();
+ if (i == col) found_diag = true;
+
+ row = firstRowElt(i);
+ if (row >= col) continue;
+ rset = internal::etree_find(row, pp); // Find the name of the set containing row
+ rroot = root(rset);
+ if (rroot != col)
+ {
+ parent(rroot) = col;
+ pp(cset) = rset;
+ cset = rset;
+ root(cset) = col;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Depth-first search from vertex n. No recursion.
+ * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.
+*/
+template <typename Index, typename IndexVector>
+void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum)
+{
+ Index current = n, first, next;
+ while (postnum != n)
+ {
+ // No kid for the current node
+ first = first_kid(current);
+
+ // no kid for the current node
+ if (first == -1)
+ {
+ // Numbering this node because it has no kid
+ post(current) = postnum++;
+
+ // looking for the next kid
+ next = next_kid(current);
+ while (next == -1)
+ {
+ // No more kids : back to the parent node
+ current = parent(current);
+ // numbering the parent node
+ post(current) = postnum++;
+
+ // Get the next kid
+ next = next_kid(current);
+ }
+ // stopping criterion
+ if (postnum == n+1) return;
+
+ // Updating current node
+ current = next;
+ }
+ else
+ {
+ current = first;
+ }
+ }
+}
+
+
+/**
+ * \brief Post order a tree
+ * \param n the number of nodes
+ * \param parent Input tree
+ * \param post postordered tree
+ */
+template <typename Index, typename IndexVector>
+void treePostorder(Index n, IndexVector& parent, IndexVector& post)
+{
+ IndexVector first_kid, next_kid; // Linked list of children
+ Index postnum;
+ // Allocate storage for working arrays and results
+ first_kid.resize(n+1);
+ next_kid.setZero(n+1);
+ post.setZero(n+1);
+
+ // Set up structure describing children
+ Index v, dad;
+ first_kid.setConstant(-1);
+ for (v = n-1; v >= 0; v--)
+ {
+ dad = parent(v);
+ next_kid(v) = first_kid(dad);
+ first_kid(dad) = v;
+ }
+
+ // Depth-first search from dummy root vertex #n
+ postnum = 0;
+ internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // SPARSE_COLETREE_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index d5f97f78fc9..ec86ca933c2 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -73,7 +73,7 @@ class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
BinaryOp,Lhs,Rhs, InnerIterator> Base;
- EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename CwiseBinaryOpImpl::Index outer)
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer)
: Base(binOp.derived(),outer)
{}
};
@@ -300,7 +300,7 @@ template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived &
SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
{
- return *this = derived() - other.derived();
+ return derived() = derived() - other.derived();
}
template<typename Derived>
@@ -308,7 +308,7 @@ template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived &
SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
{
- return *this = derived() + other.derived();
+ return derived() = derived() + other.derived();
}
template<typename Derived>
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
index 6f32940d6c1..54fd633a10c 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -39,7 +39,7 @@ struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
{
typedef Sparse StorageKind;
typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
- typename traits<Rhs>::Scalar>::ReturnType Scalar;
+ typename traits<Rhs>::Scalar>::ReturnType Scalar;
typedef typename Lhs::Index Index;
typedef typename Lhs::Nested LhsNested;
typedef typename Rhs::Nested RhsNested;
@@ -111,6 +111,7 @@ template<typename Lhs, typename Rhs, bool Transpose>
class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
{
typedef typename _LhsNested::InnerIterator Base;
+ typedef typename SparseDenseOuterProduct::Index Index;
public:
EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
: Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
@@ -124,7 +125,7 @@ class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNes
inline Scalar value() const { return Base::value() * m_factor; }
protected:
- int m_outer;
+ Index m_outer;
Scalar m_factor;
};
@@ -150,11 +151,11 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename Lhs::Index Index;
typedef typename Lhs::InnerIterator LhsInnerIterator;
- static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
for(Index c=0; c<rhs.cols(); ++c)
{
- int n = lhs.outerSize();
+ Index n = lhs.outerSize();
for(Index j=0; j<n; ++j)
{
typename Res::Scalar tmp(0);
@@ -174,7 +175,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, C
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename Lhs::InnerIterator LhsInnerIterator;
typedef typename Lhs::Index Index;
- static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
for(Index c=0; c<rhs.cols(); ++c)
{
@@ -196,7 +197,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename Lhs::InnerIterator LhsInnerIterator;
typedef typename Lhs::Index Index;
- static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
@@ -215,7 +216,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, C
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename Lhs::InnerIterator LhsInnerIterator;
typedef typename Lhs::Index Index;
- static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
@@ -244,7 +245,7 @@ class SparseTimeDenseProduct
SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{}
- template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
{
internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha);
}
@@ -274,7 +275,7 @@ class DenseTimeSparseProduct
DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{}
- template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
{
Transpose<const _LhsNested> lhs_t(m_lhs);
Transpose<const _RhsNested> rhs_t(m_rhs);
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h
index 095bf6863fc..1bb590e64d4 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h
@@ -78,7 +78,11 @@ class SparseDiagonalProduct
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
typedef internal::sparse_diagonal_product_inner_iterator_selector
- <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+ <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+
+ // We do not want ReverseInnerIterator for diagonal-sparse products,
+ // but this dummy declaration is needed to make diag * sparse * diag compile.
+ class ReverseInnerIterator;
EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
: m_lhs(lhs), m_rhs(rhs)
@@ -118,19 +122,23 @@ class sparse_diagonal_product_inner_iterator_selector
<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
: public CwiseBinaryOp<
scalar_product_op<typename Lhs::Scalar>,
- SparseInnerVectorSet<Rhs,1>,
- typename Lhs::DiagonalVectorType>::InnerIterator
+ const typename Rhs::ConstInnerVectorReturnType,
+ const typename Lhs::DiagonalVectorType>::InnerIterator
{
typedef typename CwiseBinaryOp<
scalar_product_op<typename Lhs::Scalar>,
- SparseInnerVectorSet<Rhs,1>,
- typename Lhs::DiagonalVectorType>::InnerIterator Base;
+ const typename Rhs::ConstInnerVectorReturnType,
+ const typename Lhs::DiagonalVectorType>::InnerIterator Base;
typedef typename Lhs::Index Index;
+ Index m_outer;
public:
inline sparse_diagonal_product_inner_iterator_selector(
const SparseDiagonalProductType& expr, Index outer)
- : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0)
+ : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0), m_outer(outer)
{}
+
+ inline Index outer() const { return m_outer; }
+ inline Index col() const { return m_outer; }
};
template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
@@ -152,19 +160,23 @@ class sparse_diagonal_product_inner_iterator_selector
<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
: public CwiseBinaryOp<
scalar_product_op<typename Rhs::Scalar>,
- SparseInnerVectorSet<Lhs,1>,
- Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
+ const typename Lhs::ConstInnerVectorReturnType,
+ const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
{
typedef typename CwiseBinaryOp<
scalar_product_op<typename Rhs::Scalar>,
- SparseInnerVectorSet<Lhs,1>,
- Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
+ const typename Lhs::ConstInnerVectorReturnType,
+ const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
typedef typename Lhs::Index Index;
+ Index m_outer;
public:
inline sparse_diagonal_product_inner_iterator_selector(
const SparseDiagonalProductType& expr, Index outer)
- : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0)
+ : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0), m_outer(outer)
{}
+
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return m_outer; }
};
} // end namespace internal
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h
index 5c4a593dc01..db39c9aecc7 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h
@@ -30,7 +30,7 @@ SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
Scalar res(0);
while (i)
{
- res += internal::conj(i.value()) * other.coeff(i.index());
+ res += numext::conj(i.value()) * other.coeff(i.index());
++i;
}
return res;
@@ -54,8 +54,8 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
typedef typename internal::remove_all<Nested>::type NestedCleaned;
typedef typename internal::remove_all<OtherNested>::type OtherNestedCleaned;
- const Nested nthis(derived());
- const OtherNested nother(other.derived());
+ Nested nthis(derived());
+ OtherNested nother(other.derived());
typename NestedCleaned::InnerIterator i(nthis,0);
typename OtherNestedCleaned::InnerIterator j(nother,0);
@@ -64,7 +64,7 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
{
if (i.index()==j.index())
{
- res += internal::conj(i.value()) * j.value();
+ res += numext::conj(i.value()) * j.value();
++i; ++j;
}
else if (i.index()<j.index())
@@ -79,16 +79,23 @@ template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::squaredNorm() const
{
- return internal::real((*this).cwiseAbs2().sum());
+ return numext::real((*this).cwiseAbs2().sum());
}
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::norm() const
{
- return internal::sqrt(squaredNorm());
+ using std::sqrt;
+ return sqrt(squaredNorm());
}
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::blueNorm() const
+{
+ return internal::blueNorm_impl(*this);
+}
} // end namespace Eigen
#endif // EIGEN_SPARSE_DOT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
index efb774f031b..01ce0dcfee3 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
@@ -31,7 +31,7 @@ namespace Eigen {
*
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
* \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
- * is RowMajor. The default is 0 which means column-major.
+ * is ColMajor or RowMajor. The default is 0 which means column-major.
* \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
*
* This class can be extended with the help of the plugin mechanism described on the page
@@ -170,6 +170,8 @@ class SparseMatrix
* This function returns Scalar(0) if the element is an explicit \em zero */
inline Scalar coeff(Index row, Index col) const
{
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
@@ -186,6 +188,8 @@ class SparseMatrix
*/
inline Scalar& coeffRef(Index row, Index col)
{
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
@@ -213,11 +217,13 @@ class SparseMatrix
* inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
*
*/
- EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
+ Scalar& insert(Index row, Index col)
{
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+
if(isCompressed())
{
- reserve(VectorXi::Constant(outerSize(), 2));
+ reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2));
}
return insertUncompressed(row,col);
}
@@ -281,12 +287,12 @@ class SparseMatrix
template<class SizesType>
inline void reserveInnerVectors(const SizesType& reserveSizes)
{
-
if(isCompressed())
{
std::size_t totalReserveSize = 0;
// turn the matrix into non-compressed mode
- m_innerNonZeros = new Index[m_outerSize];
+ m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+ if (!m_innerNonZeros) internal::throw_std_bad_alloc();
// temporarily use m_innerSizes to hold the new starting points.
Index* newOuterIndex = m_innerNonZeros;
@@ -299,11 +305,11 @@ class SparseMatrix
totalReserveSize += reserveSizes[j];
}
m_data.reserve(totalReserveSize);
- std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
- for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
+ Index previousOuterIndex = m_outerIndex[m_outerSize];
+ for(Index j=m_outerSize-1; j>=0; --j)
{
- ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
- for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+ Index innerNNZ = previousOuterIndex - m_outerIndex[j];
+ for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
@@ -318,25 +324,27 @@ class SparseMatrix
}
else
{
- Index* newOuterIndex = new Index[m_outerSize+1];
+ Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
+ if (!newOuterIndex) internal::throw_std_bad_alloc();
+
Index count = 0;
for(Index j=0; j<m_outerSize; ++j)
{
newOuterIndex[j] = count;
Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
- Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
+ Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
count += toReserve + m_innerNonZeros[j];
}
newOuterIndex[m_outerSize] = count;
m_data.resize(count);
- for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
+ for(Index j=m_outerSize-1; j>=0; --j)
{
- std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
+ Index offset = newOuterIndex[j] - m_outerIndex[j];
if(offset>0)
{
- std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
- for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+ Index innerNNZ = m_innerNonZeros[j];
+ for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
@@ -345,7 +353,7 @@ class SparseMatrix
}
std::swap(m_outerIndex, newOuterIndex);
- delete[] newOuterIndex;
+ std::free(newOuterIndex);
}
}
@@ -394,7 +402,7 @@ class SparseMatrix
* \sa insertBack, insertBackByOuterInner */
inline void startVec(Index outer)
{
- eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
+ eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
m_outerIndex[outer+1] = m_outerIndex[outer];
}
@@ -431,7 +439,7 @@ class SparseMatrix
/** \internal
* same as insert(Index,Index) except that the indices are given relative to the storage order */
- EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
+ Scalar& insertByOuterInner(Index j, Index i)
{
return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
}
@@ -448,7 +456,7 @@ class SparseMatrix
for(Index j=1; j<m_outerSize; ++j)
{
Index nextOldStart = m_outerIndex[j+1];
- std::ptrdiff_t offset = oldStart - m_outerIndex[j];
+ Index offset = oldStart - m_outerIndex[j];
if(offset>0)
{
for(Index k=0; k<m_innerNonZeros[j]; ++k)
@@ -460,14 +468,26 @@ class SparseMatrix
m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
oldStart = nextOldStart;
}
- delete[] m_innerNonZeros;
+ std::free(m_innerNonZeros);
m_innerNonZeros = 0;
m_data.resize(m_outerIndex[m_outerSize]);
m_data.squeeze();
}
+ /** Turns the matrix into the uncompressed mode */
+ void uncompress()
+ {
+ if(m_innerNonZeros != 0)
+ return;
+ m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+ for (Index i = 0; i < m_outerSize; i++)
+ {
+ m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
+ }
+ }
+
/** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
- void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
prune(default_prunning_func(reference,epsilon));
}
@@ -506,6 +526,70 @@ class SparseMatrix
m_data.resize(k,0);
}
+ /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
+ * \sa resizeNonZeros(Index), reserve(), setZero()
+ */
+ void conservativeResize(Index rows, Index cols)
+ {
+ // No change
+ if (this->rows() == rows && this->cols() == cols) return;
+
+ // If one dimension is null, then there is nothing to be preserved
+ if(rows==0 || cols==0) return resize(rows,cols);
+
+ Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
+ Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
+ Index newInnerSize = IsRowMajor ? cols : rows;
+
+ // Deals with inner non zeros
+ if (m_innerNonZeros)
+ {
+ // Resize m_innerNonZeros
+ Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
+ if (!newInnerNonZeros) internal::throw_std_bad_alloc();
+ m_innerNonZeros = newInnerNonZeros;
+
+ for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
+ m_innerNonZeros[i] = 0;
+ }
+ else if (innerChange < 0)
+ {
+ // Inner size decreased: allocate a new m_innerNonZeros
+ m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
+ if (!m_innerNonZeros) internal::throw_std_bad_alloc();
+ for(Index i = 0; i < m_outerSize; i++)
+ m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
+ }
+
+ // Change the m_innerNonZeros in case of a decrease of inner size
+ if (m_innerNonZeros && innerChange < 0)
+ {
+ for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
+ {
+ Index &n = m_innerNonZeros[i];
+ Index start = m_outerIndex[i];
+ while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
+ }
+ }
+
+ m_innerSize = newInnerSize;
+
+ // Re-allocate outer index structure if necessary
+ if (outerChange == 0)
+ return;
+
+ Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
+ if (!newOuterIndex) internal::throw_std_bad_alloc();
+ m_outerIndex = newOuterIndex;
+ if (outerChange > 0)
+ {
+ Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
+ for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
+ m_outerIndex[i] = last;
+ }
+ m_outerSize += outerChange;
+ }
+
/** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
* \sa resizeNonZeros(Index), reserve(), setZero()
*/
@@ -516,13 +600,15 @@ class SparseMatrix
m_data.clear();
if (m_outerSize != outerSize || m_outerSize==0)
{
- delete[] m_outerIndex;
- m_outerIndex = new Index [outerSize+1];
+ std::free(m_outerIndex);
+ m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
+ if (!m_outerIndex) internal::throw_std_bad_alloc();
+
m_outerSize = outerSize;
}
if(m_innerNonZeros)
{
- delete[] m_innerNonZeros;
+ std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
@@ -560,9 +646,20 @@ class SparseMatrix
inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
check_template_parameters();
*this = other.derived();
}
+
+ /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
+ template<typename OtherDerived, unsigned int UpLo>
+ inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ *this = other;
+ }
/** Copy constructor (it performs a deep copy) */
inline SparseMatrix(const SparseMatrix& other)
@@ -572,6 +669,16 @@ class SparseMatrix
*this = other.derived();
}
+ /** \brief Copy constructor with in-place evaluation */
+ template<typename OtherDerived>
+ SparseMatrix(const ReturnByValue<OtherDerived>& other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ initAssignment(other);
+ other.evalTo(*this);
+ }
+
/** Swaps the content of two sparse matrices of the same type.
* This is a fast operation that simply swaps the underlying pointers and parameters. */
inline void swap(SparseMatrix& other)
@@ -584,13 +691,22 @@ class SparseMatrix
m_data.swap(other.m_data);
}
+ /** Sets *this to the identity matrix */
+ inline void setIdentity()
+ {
+ eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
+ this->m_data.resize(rows());
+ Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
+ Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
+ Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
+ }
inline SparseMatrix& operator=(const SparseMatrix& other)
{
if (other.isRValue())
{
swap(other.const_cast_derived());
}
- else
+ else if(this!=&other)
{
initAssignment(other);
if(other.isCompressed())
@@ -613,7 +729,10 @@ class SparseMatrix
template<typename OtherDerived>
inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
- { return Base::operator=(other.derived()); }
+ {
+ initAssignment(other);
+ return Base::operator=(other.derived());
+ }
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
@@ -621,58 +740,7 @@ class SparseMatrix
#endif
template<typename OtherDerived>
- EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
- {
- initAssignment(other.derived());
- const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
- if (needToTranspose)
- {
- // two passes algorithm:
- // 1 - compute the number of coeffs per dest inner vector
- // 2 - do the actual copy/eval
- // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
- typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
- typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
- OtherCopy otherCopy(other.derived());
-
- Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
- // pass 1
- // FIXME the above copy could be merged with that pass
- for (Index j=0; j<otherCopy.outerSize(); ++j)
- for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
- ++m_outerIndex[it.index()];
-
- // prefix sum
- Index count = 0;
- VectorXi positions(outerSize());
- for (Index j=0; j<outerSize(); ++j)
- {
- Index tmp = m_outerIndex[j];
- m_outerIndex[j] = count;
- positions[j] = count;
- count += tmp;
- }
- m_outerIndex[outerSize()] = count;
- // alloc
- m_data.resize(count);
- // pass 2
- for (Index j=0; j<otherCopy.outerSize(); ++j)
- {
- for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
- {
- Index pos = positions[it.index()]++;
- m_data.index(pos) = j;
- m_data.value(pos) = it.value();
- }
- }
- return *this;
- }
- else
- {
- // there is no special optimization
- return Base::operator=(other.derived());
- }
- }
+ EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
{
@@ -684,8 +752,8 @@ class SparseMatrix
else
for (Index i=0; i<m.outerSize(); ++i)
{
- int p = m.m_outerIndex[i];
- int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
+ Index p = m.m_outerIndex[i];
+ Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index k=p;
for (; k<pe; ++k)
s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
@@ -714,8 +782,8 @@ class SparseMatrix
/** Destructor */
inline ~SparseMatrix()
{
- delete[] m_outerIndex;
- delete[] m_innerNonZeros;
+ std::free(m_outerIndex);
+ std::free(m_innerNonZeros);
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -735,118 +803,14 @@ protected:
resize(other.rows(), other.cols());
if(m_innerNonZeros)
{
- delete[] m_innerNonZeros;
+ std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
}
/** \internal
* \sa insert(Index,Index) */
- EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
- {
- eigen_assert(isCompressed());
-
- const Index outer = IsRowMajor ? row : col;
- const Index inner = IsRowMajor ? col : row;
-
- Index previousOuter = outer;
- if (m_outerIndex[outer+1]==0)
- {
- // we start a new inner vector
- while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
- {
- m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
- --previousOuter;
- }
- m_outerIndex[outer+1] = m_outerIndex[outer];
- }
-
- // here we have to handle the tricky case where the outerIndex array
- // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
- // the 2nd inner vector...
- bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
- && (size_t(m_outerIndex[outer+1]) == m_data.size());
-
- size_t startId = m_outerIndex[outer];
- // FIXME let's make sure sizeof(long int) == sizeof(size_t)
- size_t p = m_outerIndex[outer+1];
- ++m_outerIndex[outer+1];
-
- float reallocRatio = 1;
- if (m_data.allocatedSize()<=m_data.size())
- {
- // if there is no preallocated memory, let's reserve a minimum of 32 elements
- if (m_data.size()==0)
- {
- m_data.reserve(32);
- }
- else
- {
- // we need to reallocate the data, to reduce multiple reallocations
- // we use a smart resize algorithm based on the current filling ratio
- // in addition, we use float to avoid integers overflows
- float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
- reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
- // furthermore we bound the realloc ratio to:
- // 1) reduce multiple minor realloc when the matrix is almost filled
- // 2) avoid to allocate too much memory when the matrix is almost empty
- reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
- }
- }
- m_data.resize(m_data.size()+1,reallocRatio);
-
- if (!isLastVec)
- {
- if (previousOuter==-1)
- {
- // oops wrong guess.
- // let's correct the outer offsets
- for (Index k=0; k<=(outer+1); ++k)
- m_outerIndex[k] = 0;
- Index k=outer+1;
- while(m_outerIndex[k]==0)
- m_outerIndex[k++] = 1;
- while (k<=m_outerSize && m_outerIndex[k]!=0)
- m_outerIndex[k++]++;
- p = 0;
- --k;
- k = m_outerIndex[k]-1;
- while (k>0)
- {
- m_data.index(k) = m_data.index(k-1);
- m_data.value(k) = m_data.value(k-1);
- k--;
- }
- }
- else
- {
- // we are not inserting into the last inner vec
- // update outer indices:
- Index j = outer+2;
- while (j<=m_outerSize && m_outerIndex[j]!=0)
- m_outerIndex[j++]++;
- --j;
- // shift data of last vecs:
- Index k = m_outerIndex[j]-1;
- while (k>=Index(p))
- {
- m_data.index(k) = m_data.index(k-1);
- m_data.value(k) = m_data.value(k-1);
- k--;
- }
- }
- }
-
- while ( (p > startId) && (m_data.index(p-1) > inner) )
- {
- m_data.index(p) = m_data.index(p-1);
- m_data.value(p) = m_data.value(p-1);
- --p;
- }
-
- m_data.index(p) = inner;
- return (m_data.value(p) = 0);
- }
+ EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
/** \internal
* A vector object that is equal to 0 everywhere but v at the position i */
@@ -865,40 +829,12 @@ protected:
/** \internal
* \sa insert(Index,Index) */
- EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
- {
- eigen_assert(!isCompressed());
-
- const Index outer = IsRowMajor ? row : col;
- const Index inner = IsRowMajor ? col : row;
-
- std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
- std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
- if(innerNNZ>=room)
- {
- // this inner vector is full, we need to reallocate the whole buffer :(
- reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
- }
-
- Index startId = m_outerIndex[outer];
- Index p = startId + m_innerNonZeros[outer];
- while ( (p > startId) && (m_data.index(p-1) > inner) )
- {
- m_data.index(p) = m_data.index(p-1);
- m_data.value(p) = m_data.value(p-1);
- --p;
- }
-
- m_innerNonZeros[outer]++;
-
- m_data.index(p) = inner;
- return (m_data.value(p) = 0);
- }
+ EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
public:
/** \internal
* \sa insert(Index,Index) */
- inline Scalar& insertBackUncompressed(Index row, Index col)
+ EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
{
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
@@ -906,8 +842,7 @@ public:
eigen_assert(!isCompressed());
eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
- Index p = m_outerIndex[outer] + m_innerNonZeros[outer];
- m_innerNonZeros[outer]++;
+ Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
m_data.index(p) = inner;
return (m_data.value(p) = 0);
}
@@ -916,10 +851,11 @@ private:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+ EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
struct default_prunning_func {
- default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
+ default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
inline bool operator() (const Index&, const Index&, const Scalar& value) const
{
return !internal::isMuchSmallerThan(value, reference, epsilon);
@@ -1006,19 +942,25 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
typedef typename SparseMatrixType::Index Index;
SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
- // pass 1: count the nnz per inner-vector
- VectorXi wi(trMat.outerSize());
- wi.setZero();
- for(InputIterator it(begin); it!=end; ++it)
- wi(IsRowMajor ? it->col() : it->row())++;
+ if(begin!=end)
+ {
+ // pass 1: count the nnz per inner-vector
+ Matrix<Index,Dynamic,1> wi(trMat.outerSize());
+ wi.setZero();
+ for(InputIterator it(begin); it!=end; ++it)
+ {
+ eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
+ wi(IsRowMajor ? it->col() : it->row())++;
+ }
- // pass 2: insert all the elements into trMat
- trMat.reserve(wi);
- for(InputIterator it(begin); it!=end; ++it)
- trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
+ // pass 2: insert all the elements into trMat
+ trMat.reserve(wi);
+ for(InputIterator it(begin); it!=end; ++it)
+ trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
- // pass 3:
- trMat.sumupDuplicates();
+ // pass 3:
+ trMat.sumupDuplicates();
+ }
// pass 4: transposed copy -> implicit sorting
mat = trMat;
@@ -1027,7 +969,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
}
-/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \b.
+/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
*
* A \em triplet is a tuple (i,j,value) defining a non-zero element.
* The input list of triplets does not have to be sorted, and can contains duplicated elements.
@@ -1077,11 +1019,11 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
{
eigen_assert(!isCompressed());
// TODO, in practice we should be able to use m_innerNonZeros for that task
- VectorXi wi(innerSize());
+ Matrix<Index,Dynamic,1> wi(innerSize());
wi.fill(-1);
Index count = 0;
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
- for(int j=0; j<outerSize(); ++j)
+ for(Index j=0; j<outerSize(); ++j)
{
Index start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
@@ -1106,11 +1048,212 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
m_outerIndex[m_outerSize] = count;
// turn the matrix into compressed form
- delete[] m_innerNonZeros;
+ std::free(m_innerNonZeros);
m_innerNonZeros = 0;
m_data.resize(m_outerIndex[m_outerSize]);
}
+template<typename Scalar, int _Options, typename _Index>
+template<typename OtherDerived>
+EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
+{
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if (needToTranspose)
+ {
+ // two passes algorithm:
+ // 1 - compute the number of coeffs per dest inner vector
+ // 2 - do the actual copy/eval
+ // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+ typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+ typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ SparseMatrix dest(other.rows(),other.cols());
+ Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
+
+ // pass 1
+ // FIXME the above copy could be merged with that pass
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ ++dest.m_outerIndex[it.index()];
+
+ // prefix sum
+ Index count = 0;
+ Matrix<Index,Dynamic,1> positions(dest.outerSize());
+ for (Index j=0; j<dest.outerSize(); ++j)
+ {
+ Index tmp = dest.m_outerIndex[j];
+ dest.m_outerIndex[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ dest.m_outerIndex[dest.outerSize()] = count;
+ // alloc
+ dest.m_data.resize(count);
+ // pass 2
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ {
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ {
+ Index pos = positions[it.index()]++;
+ dest.m_data.index(pos) = j;
+ dest.m_data.value(pos) = it.value();
+ }
+ }
+ this->swap(dest);
+ return *this;
+ }
+ else
+ {
+ if(other.isRValue())
+ initAssignment(other.derived());
+ // there is no special optimization
+ return Base::operator=(other.derived());
+ }
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
+{
+ eigen_assert(!isCompressed());
+
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
+ Index innerNNZ = m_innerNonZeros[outer];
+ if(innerNNZ>=room)
+ {
+ // this inner vector is full, we need to reallocate the whole buffer :(
+ reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
+ }
+
+ Index startId = m_outerIndex[outer];
+ Index p = startId + m_innerNonZeros[outer];
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+ eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
+
+ m_innerNonZeros[outer]++;
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
+{
+ eigen_assert(isCompressed());
+
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index previousOuter = outer;
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
+ {
+ m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+ --previousOuter;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ // here we have to handle the tricky case where the outerIndex array
+ // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
+ // the 2nd inner vector...
+ bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
+ && (size_t(m_outerIndex[outer+1]) == m_data.size());
+
+ size_t startId = m_outerIndex[outer];
+ // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+ size_t p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ float reallocRatio = 1;
+ if (m_data.allocatedSize()<=m_data.size())
+ {
+ // if there is no preallocated memory, let's reserve a minimum of 32 elements
+ if (m_data.size()==0)
+ {
+ m_data.reserve(32);
+ }
+ else
+ {
+ // we need to reallocate the data, to reduce multiple reallocations
+ // we use a smart resize algorithm based on the current filling ratio
+ // in addition, we use float to avoid integers overflows
+ float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
+ reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
+ // furthermore we bound the realloc ratio to:
+ // 1) reduce multiple minor realloc when the matrix is almost filled
+ // 2) avoid to allocate too much memory when the matrix is almost empty
+ reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
+ }
+ }
+ m_data.resize(m_data.size()+1,reallocRatio);
+
+ if (!isLastVec)
+ {
+ if (previousOuter==-1)
+ {
+ // oops wrong guess.
+ // let's correct the outer offsets
+ for (Index k=0; k<=(outer+1); ++k)
+ m_outerIndex[k] = 0;
+ Index k=outer+1;
+ while(m_outerIndex[k]==0)
+ m_outerIndex[k++] = 1;
+ while (k<=m_outerSize && m_outerIndex[k]!=0)
+ m_outerIndex[k++]++;
+ p = 0;
+ --k;
+ k = m_outerIndex[k]-1;
+ while (k>0)
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ else
+ {
+ // we are not inserting into the last inner vec
+ // update outer indices:
+ Index j = outer+2;
+ while (j<=m_outerSize && m_outerIndex[j]!=0)
+ m_outerIndex[j++]++;
+ --j;
+ // shift data of last vecs:
+ Index k = m_outerIndex[j]-1;
+ while (k>=Index(p))
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ }
+
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+}
+
} // end namespace Eigen
#endif // EIGEN_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
index 9a1258097fe..bbcf7fb1c62 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -89,6 +89,9 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
*/
IsRowMajor = Flags&RowMajorBit ? 1 : 0,
+
+ InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
+ : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
#ifndef EIGEN_PARSED_BY_DOXYGEN
_HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
@@ -102,7 +105,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
>::type AdjointReturnType;
- typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject;
+ typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, Index> PlainObject;
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -136,13 +139,13 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
# include "../plugins/CommonCwiseBinaryOps.h"
# include "../plugins/MatrixCwiseUnaryOps.h"
# include "../plugins/MatrixCwiseBinaryOps.h"
+# include "../plugins/BlockMethods.h"
# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
# include EIGEN_SPARSEMATRIXBASE_PLUGIN
# endif
# undef EIGEN_CURRENT_STORAGE_BASE_CLASS
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
-
/** \returns the number of rows. \sa cols() */
inline Index rows() const { return derived().rows(); }
/** \returns the number of columns. \sa rows() */
@@ -299,8 +302,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
}
else
{
- SparseMatrix<Scalar, RowMajorBit> trans = m;
- s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit> >&>(trans);
+ SparseMatrix<Scalar, RowMajorBit, Index> trans = m;
+ s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, Index> >&>(trans);
}
}
return s;
@@ -322,8 +325,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
typename internal::traits<OtherDerived>::Scalar \
>::ReturnType \
>, \
- Derived, \
- OtherDerived \
+ const Derived, \
+ const OtherDerived \
>
template<typename OtherDerived>
@@ -387,55 +390,45 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
RealScalar squaredNorm() const;
RealScalar norm() const;
+ RealScalar blueNorm() const;
Transpose<Derived> transpose() { return derived(); }
const Transpose<const Derived> transpose() const { return derived(); }
const AdjointReturnType adjoint() const { return transpose(); }
- // sub-vector
- SparseInnerVectorSet<Derived,1> row(Index i);
- const SparseInnerVectorSet<Derived,1> row(Index i) const;
- SparseInnerVectorSet<Derived,1> col(Index j);
- const SparseInnerVectorSet<Derived,1> col(Index j) const;
- SparseInnerVectorSet<Derived,1> innerVector(Index outer);
- const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const;
-
- // set of sub-vectors
- SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size);
- const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const;
- SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
- const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
-
- SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size);
- const SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size) const;
- SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size);
- const SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size) const;
- SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
- const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
-
- /** \internal use operator= */
- template<typename DenseDerived>
- void evalTo(MatrixBase<DenseDerived>& dst) const
- {
- dst.setZero();
- for (Index j=0; j<outerSize(); ++j)
- for (typename Derived::InnerIterator i(derived(),j); i; ++i)
- dst.coeffRef(i.row(),i.col()) = i.value();
- }
+ // inner-vector
+ typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType;
+ typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
+ InnerVectorReturnType innerVector(Index outer);
+ const ConstInnerVectorReturnType innerVector(Index outer) const;
- Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
- {
- return derived();
- }
+ // set of inner-vectors
+ Block<Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize);
+ const Block<const Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize) const;
+
+ /** \internal use operator= */
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived>& dst) const
+ {
+ dst.setZero();
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ dst.coeffRef(i.row(),i.col()) = i.value();
+ }
+
+ Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+ {
+ return derived();
+ }
template<typename OtherDerived>
bool isApprox(const SparseMatrixBase<OtherDerived>& other,
- RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
{ return toDense().isApprox(other.toDense(),prec); }
template<typename OtherDerived>
bool isApprox(const MatrixBase<OtherDerived>& other,
- RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
{ return toDense().isApprox(other,prec); }
/** \returns the matrix or vector obtained by evaluating this expression.
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h b/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h
index b897b7595b5..b85be93f6f9 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h
@@ -57,7 +57,7 @@ struct permut_sparsematrix_product_retval
if(MoveOuter)
{
SparseMatrix<Scalar,SrcStorageOrder,Index> tmp(m_matrix.rows(), m_matrix.cols());
- VectorXi sizes(m_matrix.outerSize());
+ Matrix<Index,Dynamic,1> sizes(m_matrix.outerSize());
for(Index j=0; j<m_matrix.outerSize(); ++j)
{
Index jp = m_permutation.indices().coeff(j);
@@ -77,7 +77,7 @@ struct permut_sparsematrix_product_retval
else
{
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> tmp(m_matrix.rows(), m_matrix.cols());
- VectorXi sizes(tmp.outerSize());
+ Matrix<Index,Dynamic,1> sizes(tmp.outerSize());
sizes.setZero();
PermutationMatrix<Dynamic,Dynamic,Index> perm;
if((Side==OnTheLeft) ^ Transposed)
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h
index 6a555b83434..cf766307008 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h
@@ -16,6 +16,7 @@ template<typename Lhs, typename Rhs>
struct SparseSparseProductReturnType
{
typedef typename internal::traits<Lhs>::Scalar Scalar;
+ typedef typename internal::traits<Lhs>::Index Index;
enum {
LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
@@ -24,11 +25,11 @@ struct SparseSparseProductReturnType
};
typedef typename internal::conditional<TransposeLhs,
- SparseMatrix<Scalar,0>,
+ SparseMatrix<Scalar,0,Index>,
typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
typedef typename internal::conditional<TransposeRhs,
- SparseMatrix<Scalar,0>,
+ SparseMatrix<Scalar,0,Index>,
typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
typedef SparseSparseProduct<LhsNested, RhsNested> Type;
@@ -99,15 +100,16 @@ class SparseSparseProduct : internal::no_assignment_operator,
}
template<typename Lhs, typename Rhs>
- EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, RealScalar tolerance)
+ EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance)
: m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false)
{
init();
}
- SparseSparseProduct pruned(Scalar reference = 0, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) const
+ SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) const
{
- return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon);
+ using std::abs;
+ return SparseSparseProduct(m_lhs,m_rhs,abs(reference)*epsilon);
}
template<typename Dest>
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h
index 86ec0a6c5e2..0eda96bc471 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -69,6 +69,30 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
const _MatrixTypeNested& matrix() const { return m_matrix; }
_MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
+ /** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
+ *
+ * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
+ * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
+ */
+ template<typename OtherDerived>
+ SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>
+ operator*(const SparseMatrixBase<OtherDerived>& rhs) const
+ {
+ return SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>(*this, rhs.derived());
+ }
+
+ /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
+ *
+ * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
+ * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
+ */
+ template<typename OtherDerived> friend
+ SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject >
+ operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+ {
+ return SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject>(lhs.derived(), rhs);
+ }
+
/** Efficient sparse self-adjoint matrix times dense vector/matrix product */
template<typename OtherDerived>
SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
@@ -94,7 +118,7 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
* call this function with u.adjoint().
*/
template<typename DerivedU>
- SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+ SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
/** \internal triggered by sparse_matrix = SparseSelfadjointView; */
template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
@@ -173,7 +197,7 @@ SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView(
template<typename MatrixType, unsigned int UpLo>
template<typename DerivedU>
SparseSelfAdjointView<MatrixType,UpLo>&
-SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha)
+SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
{
SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
if(alpha==Scalar(0))
@@ -207,12 +231,12 @@ class SparseSelfAdjointTimeDenseProduct
SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{}
- template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
{
+ EIGEN_ONLY_USED_FOR_DEBUG(alpha);
// TODO use alpha
eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
typedef typename internal::remove_all<Lhs>::type _Lhs;
- typedef typename internal::remove_all<Rhs>::type _Rhs;
typedef typename _Lhs::InnerIterator LhsInnerIterator;
enum {
LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
@@ -240,7 +264,7 @@ class SparseSelfAdjointTimeDenseProduct
Index b = LhsIsRowMajor ? i.index() : j;
typename Lhs::Scalar v = i.value();
dest.row(a) += (v) * m_rhs.row(b);
- dest.row(b) += internal::conj(v) * m_rhs.row(a);
+ dest.row(b) += numext::conj(v) * m_rhs.row(a);
}
if (ProcessFirstHalf && i && (i.index()==j))
dest.row(j) += i.value() * m_rhs.row(j);
@@ -268,7 +292,7 @@ class DenseTimeSparseSelfAdjointProduct
DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{}
- template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, Scalar /*alpha*/) const
+ template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const
{
// TODO
}
@@ -367,7 +391,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
dest.valuePtr()[k] = it.value();
k = count[ip]++;
dest.innerIndexPtr()[k] = jp;
- dest.valuePtr()[k] = internal::conj(it.value());
+ dest.valuePtr()[k] = numext::conj(it.value());
}
}
}
@@ -428,7 +452,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
if(!StorageOrderMatch) std::swap(ip,jp);
if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
- dest.valuePtr()[k] = conj(it.value());
+ dest.valuePtr()[k] = numext::conj(it.value());
else
dest.valuePtr()[k] = it.value();
}
@@ -461,7 +485,10 @@ class SparseSymmetricPermutationProduct
template<typename DestScalar, int Options, typename DstIndex>
void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
{
- internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+// internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+ SparseMatrix<DestScalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix,tmp,m_perm.indices().data());
+ _dest = tmp;
}
template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index 2438ac573d0..fcc18f5c9c8 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -17,7 +17,7 @@ namespace internal {
// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
template<typename Lhs, typename Rhs, typename ResultType>
-static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, typename ResultType::RealScalar tolerance)
+static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance)
{
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
@@ -27,7 +27,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
- //int size = lhs.outerSize();
+ //Index size = lhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
@@ -85,7 +85,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,C
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
typedef typename ResultType::RealScalar RealScalar;
- static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
@@ -97,10 +97,10 @@ template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
- static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// we need a col-major matrix to hold the result
- typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> SparseTemporaryType;
SparseTemporaryType _res(res.rows(), res.cols());
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
res = _res;
@@ -111,7 +111,7 @@ template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
{
typedef typename ResultType::RealScalar RealScalar;
- static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
// let's transpose the product to get a column x column product
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
@@ -124,12 +124,13 @@ template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
{
typedef typename ResultType::RealScalar RealScalar;
- static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
- ColMajorMatrix colLhs(lhs);
- ColMajorMatrix colRhs(rhs);
- internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res, tolerance);
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixLhs;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixRhs;
+ ColMajorMatrixLhs colLhs(lhs);
+ ColMajorMatrixRhs colRhs(rhs);
+ internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
// let's transpose the product to get a column x column product
// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h b/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h
index 273f9de688f..7c300ee8dbc 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h
@@ -18,7 +18,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
public:
- EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType> )
class InnerIterator;
class ReverseInnerIterator;
@@ -34,26 +34,28 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerItera
: public _MatrixTypeNested::InnerIterator
{
typedef typename _MatrixTypeNested::InnerIterator Base;
+ typedef typename TransposeImpl::Index Index;
public:
EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
: Base(trans.derived().nestedExpression(), outer)
{}
- inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
- inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+ Index row() const { return Base::col(); }
+ Index col() const { return Base::row(); }
};
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
: public _MatrixTypeNested::ReverseInnerIterator
{
typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+ typedef typename TransposeImpl::Index Index;
public:
EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
: Base(xpr.derived().nestedExpression(), outer)
{}
- inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
- inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+ Index row() const { return Base::col(); }
+ Index col() const { return Base::row(); }
};
} // end namespace Eigen
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h
index 477e4bd94b0..333127b78e8 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -27,6 +28,7 @@ template<typename MatrixType, int Mode> class SparseTriangularView
enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
|| ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)),
SkipLast = !SkipFirst,
+ SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
};
@@ -64,6 +66,7 @@ template<typename MatrixType, int Mode>
class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
{
typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
+ typedef typename SparseTriangularView::Index Index;
public:
EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
@@ -71,7 +74,7 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe
{
if(SkipFirst)
{
- while((*this) && (HasUnitDiag ? this->index()<=outer : this->index()<outer))
+ while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
Base::operator++();
if(HasUnitDiag)
m_returnOne = true;
@@ -101,8 +104,8 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe
return *this;
}
- inline Index row() const { return Base::row(); }
- inline Index col() const { return Base::col(); }
+ inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); }
+ inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); }
inline Index index() const
{
if(HasUnitDiag && m_returnOne) return Base::outer();
@@ -118,7 +121,12 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe
{
if(HasUnitDiag && m_returnOne)
return true;
- return (SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer()));
+ if(SkipFirst) return Base::operator bool();
+ else
+ {
+ if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
+ else return (Base::operator bool() && this->index() <= this->outer());
+ }
}
protected:
bool m_returnOne;
@@ -128,18 +136,20 @@ template<typename MatrixType, int Mode>
class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
{
typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
+ typedef typename SparseTriangularView::Index Index;
public:
EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer)
: Base(view.nestedExpression(), outer)
{
eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal");
- if(SkipLast)
- while((*this) && this->index()>outer)
+ if(SkipLast) {
+ while((*this) && (SkipDiag ? this->index()>=outer : this->index()>outer))
--(*this);
+ }
}
- EIGEN_STRONG_INLINE InnerIterator& operator--()
+ EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
{ Base::operator--(); return *this; }
inline Index row() const { return Base::row(); }
@@ -147,7 +157,12 @@ class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public Matri
EIGEN_STRONG_INLINE operator bool() const
{
- return SkipLast ? Base::operator bool() : (Base::operator bool() && this->index() >= this->outer());
+ if (SkipLast) return Base::operator bool() ;
+ else
+ {
+ if(SkipDiag) return (Base::operator bool() && this->index() > this->outer());
+ else return (Base::operator bool() && this->index() >= this->outer());
+ }
}
};
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h b/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h
index 6062a086ff7..05023858b16 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h
@@ -73,7 +73,6 @@ template<typename _Scalar, int _Flags = 0, typename _Index = int> class Dynamic
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix;
-template<typename MatrixType, int Size> class SparseInnerVectorSet;
template<typename MatrixType, int Mode> class SparseTriangularView;
template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
@@ -99,23 +98,24 @@ template<typename T> struct eval<T,Sparse>
template<typename T,int Cols> struct sparse_eval<T,1,Cols> {
typedef typename traits<T>::Scalar _Scalar;
- enum { _Flags = traits<T>::Flags| RowMajorBit };
+ typedef typename traits<T>::Index _Index;
public:
- typedef SparseVector<_Scalar, _Flags> type;
+ typedef SparseVector<_Scalar, RowMajor, _Index> type;
};
template<typename T,int Rows> struct sparse_eval<T,Rows,1> {
typedef typename traits<T>::Scalar _Scalar;
- enum { _Flags = traits<T>::Flags & (~RowMajorBit) };
+ typedef typename traits<T>::Index _Index;
public:
- typedef SparseVector<_Scalar, _Flags> type;
+ typedef SparseVector<_Scalar, ColMajor, _Index> type;
};
template<typename T,int Rows,int Cols> struct sparse_eval {
typedef typename traits<T>::Scalar _Scalar;
- enum { _Flags = traits<T>::Flags };
+ typedef typename traits<T>::Index _Index;
+ enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
public:
- typedef SparseMatrix<_Scalar, _Flags> type;
+ typedef SparseMatrix<_Scalar, _Options, _Index> type;
};
template<typename T> struct sparse_eval<T,1,1> {
@@ -127,12 +127,10 @@ template<typename T> struct sparse_eval<T,1,1> {
template<typename T> struct plain_matrix_type<T,Sparse>
{
typedef typename traits<T>::Scalar _Scalar;
- enum {
- _Flags = traits<T>::Flags
- };
-
+ typedef typename traits<T>::Index _Index;
+ enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
public:
- typedef SparseMatrix<_Scalar, _Flags> type;
+ typedef SparseMatrix<_Scalar, _Options, _Index> type;
};
} // end namespace internal
@@ -145,7 +143,7 @@ template<typename T> struct plain_matrix_type<T,Sparse>
*
* \sa SparseMatrix::setFromTriplets()
*/
-template<typename Scalar, typename Index=unsigned int>
+template<typename Scalar, typename Index=typename SparseMatrix<Scalar>::Index >
class Triplet
{
public:
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h b/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h
index c952f654038..7e15c814b6f 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h
@@ -45,35 +45,40 @@ struct traits<SparseVector<_Scalar, _Options, _Index> >
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
+
+// Sparse-Vector-Assignment kinds:
+enum {
+ SVA_RuntimeSwitch,
+ SVA_Inner,
+ SVA_Outer
+};
+
+template< typename Dest, typename Src,
+ int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
+ : Src::InnerSizeAtCompileTime==1 ? SVA_Outer
+ : SVA_Inner>
+struct sparse_vector_assign_selector;
+
}
template<typename _Scalar, int _Options, typename _Index>
class SparseVector
: public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
{
+ typedef SparseMatrixBase<SparseVector> SparseBase;
+
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
-
- protected:
- public:
-
- typedef SparseMatrixBase<SparseVector> SparseBase;
+
+ typedef internal::CompressedStorage<Scalar,Index> Storage;
enum { IsColVector = internal::traits<SparseVector>::IsColVector };
enum {
Options = _Options
};
-
- internal::CompressedStorage<Scalar,Index> m_data;
- Index m_size;
-
- internal::CompressedStorage<Scalar,Index>& _data() { return m_data; }
- internal::CompressedStorage<Scalar,Index>& _data() const { return m_data; }
-
- public:
-
+
EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
@@ -84,17 +89,26 @@ class SparseVector
EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); }
EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); }
+
+ /** \internal */
+ inline Storage& data() { return m_data; }
+ /** \internal */
+ inline const Storage& data() const { return m_data; }
inline Scalar coeff(Index row, Index col) const
{
- eigen_assert((IsColVector ? col : row)==0);
+ eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
return coeff(IsColVector ? row : col);
}
- inline Scalar coeff(Index i) const { return m_data.at(i); }
+ inline Scalar coeff(Index i) const
+ {
+ eigen_assert(i>=0 && i<m_size);
+ return m_data.at(i);
+ }
inline Scalar& coeffRef(Index row, Index col)
{
- eigen_assert((IsColVector ? col : row)==0);
+ eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
return coeff(IsColVector ? row : col);
}
@@ -106,6 +120,7 @@ class SparseVector
*/
inline Scalar& coeffRef(Index i)
{
+ eigen_assert(i>=0 && i<m_size);
return m_data.atWithInsertion(i);
}
@@ -139,6 +154,8 @@ class SparseVector
inline Scalar& insert(Index row, Index col)
{
+ eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
+
Index inner = IsColVector ? row : col;
Index outer = IsColVector ? col : row;
eigen_assert(outer==0);
@@ -146,6 +163,8 @@ class SparseVector
}
Scalar& insert(Index i)
{
+ eigen_assert(i>=0 && i<m_size);
+
Index startId = 0;
Index p = Index(m_data.size()) - 1;
// TODO smart realloc
@@ -169,7 +188,7 @@ class SparseVector
inline void finalize() {}
- void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
m_data.prune(reference,epsilon);
}
@@ -188,25 +207,31 @@ class SparseVector
void resizeNonZeros(Index size) { m_data.resize(size); }
- inline SparseVector() : m_size(0) { resize(0); }
+ inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
- inline SparseVector(Index size) : m_size(0) { resize(size); }
+ inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
- inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
+ inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
template<typename OtherDerived>
inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
: m_size(0)
{
+ check_template_parameters();
*this = other.derived();
}
inline SparseVector(const SparseVector& other)
- : m_size(0)
+ : SparseBase(other), m_size(0)
{
+ check_template_parameters();
*this = other.derived();
}
+ /** Swaps the values of \c *this and \a other.
+ * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
+ * \sa SparseMatrixBase::swap()
+ */
inline void swap(SparseVector& other)
{
std::swap(m_size, other.m_size);
@@ -230,10 +255,10 @@ class SparseVector
template<typename OtherDerived>
inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
{
- if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
- return assign(other.transpose());
- else
- return assign(other);
+ SparseVector tmp(other.size());
+ internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());
+ this->swap(tmp);
+ return *this;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -260,73 +285,63 @@ class SparseVector
public:
- /** \deprecated use setZero() and reserve() */
+ /** \internal \deprecated use setZero() and reserve() */
EIGEN_DEPRECATED void startFill(Index reserve)
{
setZero();
m_data.reserve(reserve);
}
- /** \deprecated use insertBack(Index,Index) */
+ /** \internal \deprecated use insertBack(Index,Index) */
EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
{
eigen_assert(r==0 || c==0);
return fill(IsColVector ? r : c);
}
- /** \deprecated use insertBack(Index) */
+ /** \internal \deprecated use insertBack(Index) */
EIGEN_DEPRECATED Scalar& fill(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
- /** \deprecated use insert(Index,Index) */
+ /** \internal \deprecated use insert(Index,Index) */
EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
{
eigen_assert(r==0 || c==0);
return fillrand(IsColVector ? r : c);
}
- /** \deprecated use insert(Index) */
+ /** \internal \deprecated use insert(Index) */
EIGEN_DEPRECATED Scalar& fillrand(Index i)
{
return insert(i);
}
- /** \deprecated use finalize() */
+ /** \internal \deprecated use finalize() */
EIGEN_DEPRECATED void endFill() {}
+ // These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.
+ /** \internal \deprecated use data() */
+ EIGEN_DEPRECATED Storage& _data() { return m_data; }
+ /** \internal \deprecated use data() */
+ EIGEN_DEPRECATED const Storage& _data() const { return m_data; }
+
# ifdef EIGEN_SPARSEVECTOR_PLUGIN
# include EIGEN_SPARSEVECTOR_PLUGIN
# endif
protected:
- template<typename OtherDerived>
- EIGEN_DONT_INLINE SparseVector& assign(const SparseMatrixBase<OtherDerived>& _other)
+
+ static void check_template_parameters()
{
- const OtherDerived& other(_other.derived());
- const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
- if(needToTranspose)
- {
- Index size = other.size();
- Index nnz = other.nonZeros();
- resize(size);
- reserve(nnz);
- for(Index i=0; i<size; ++i)
- {
- typename OtherDerived::InnerIterator it(other, i);
- if(it)
- insert(i) = it.value();
- }
- return *this;
- }
- else
- {
- // there is no special optimization
- return Base::operator=(other);
- }
+ EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+ EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
+
+ Storage m_data;
+ Index m_size;
};
template<typename Scalar, int _Options, typename _Index>
@@ -393,6 +408,40 @@ class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
const Index m_start;
};
+namespace internal {
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
+ static void run(Dest& dst, const Src& src) {
+ eigen_internal_assert(src.innerSize()==src.size());
+ for(typename Src::InnerIterator it(src, 0); it; ++it)
+ dst.insert(it.index()) = it.value();
+ }
+};
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
+ static void run(Dest& dst, const Src& src) {
+ eigen_internal_assert(src.outerSize()==src.size());
+ for(typename Dest::Index i=0; i<src.size(); ++i)
+ {
+ typename Src::InnerIterator it(src, i);
+ if(it)
+ dst.insert(i) = it.value();
+ }
+ }
+};
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
+ static void run(Dest& dst, const Src& src) {
+ if(src.outerSize()==1) sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
+ else sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
+ }
+};
+
+}
+
} // end namespace Eigen
#endif // EIGEN_SPARSEVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseView.h
index 8b0b9ea0304..fd8450463fe 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseView.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseView.h
@@ -18,7 +18,7 @@ namespace internal {
template<typename MatrixType>
struct traits<SparseView<MatrixType> > : traits<MatrixType>
{
- typedef int Index;
+ typedef typename MatrixType::Index Index;
typedef Sparse StorageKind;
enum {
Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
@@ -56,6 +56,7 @@ protected:
template<typename MatrixType>
class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
{
+ typedef typename SparseView::Index Index;
public:
typedef typename _MatrixTypeNested::InnerIterator IterBase;
InnerIterator(const SparseView& view, Index outer) :
@@ -88,7 +89,7 @@ private:
template<typename Derived>
const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
- typename NumTraits<Scalar>::Real m_epsilon) const
+ const typename NumTraits<Scalar>::Real& m_epsilon) const
{
return SparseView<Derived>(derived(), m_reference, m_epsilon);
}
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU.h
new file mode 100644
index 00000000000..1d592f2c8c7
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU.h
@@ -0,0 +1,758 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_SPARSE_LU_H
+#define EIGEN_SPARSE_LU_H
+
+namespace Eigen {
+
+template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::Index> > class SparseLU;
+template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
+template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
+
+/** \ingroup SparseLU_Module
+ * \class SparseLU
+ *
+ * \brief Sparse supernodal LU factorization for general matrices
+ *
+ * This class implements the supernodal LU factorization for general matrices.
+ * It uses the main techniques from the sequential SuperLU package
+ * (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real
+ * and complex arithmetics with single and double precision, depending on the
+ * scalar type of your input matrix.
+ * The code has been optimized to provide BLAS-3 operations during supernode-panel updates.
+ * It benefits directly from the built-in high-performant Eigen BLAS routines.
+ * Moreover, when the size of a supernode is very small, the BLAS calls are avoided to
+ * enable a better optimization from the compiler. For best performance,
+ * you should compile it with NDEBUG flag to avoid the numerous bounds checking on vectors.
+ *
+ * An important parameter of this class is the ordering method. It is used to reorder the columns
+ * (and eventually the rows) of the matrix to reduce the number of new elements that are created during
+ * numerical factorization. The cheapest method available is COLAMD.
+ * See \link OrderingMethods_Module the OrderingMethods module \endlink for the list of
+ * built-in and external ordering methods.
+ *
+ * Simple example with key steps
+ * \code
+ * VectorXd x(n), b(n);
+ * SparseMatrix<double, ColMajor> A;
+ * SparseLU<SparseMatrix<scalar, ColMajor>, COLAMDOrdering<Index> > solver;
+ * // fill A and b;
+ * // Compute the ordering permutation vector from the structural pattern of A
+ * solver.analyzePattern(A);
+ * // Compute the numerical factorization
+ * solver.factorize(A);
+ * //Use the factors to solve the linear system
+ * x = solver.solve(b);
+ * \endcode
+ *
+ * \warning The input matrix A should be in a \b compressed and \b column-major form.
+ * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
+ *
+ * \note Unlike the initial SuperLU implementation, there is no step to equilibrate the matrix.
+ * For badly scaled matrices, this step can be useful to reduce the pivoting during factorization.
+ * If this is the case for your matrices, you can try the basic scaling method at
+ * "unsupported/Eigen/src/IterativeSolvers/Scaling.h"
+ *
+ * \tparam _MatrixType The type of the sparse matrix. It must be a column-major SparseMatrix<>
+ * \tparam _OrderingType The ordering method to use, either AMD, COLAMD or METIS. Default is COLMAD
+ *
+ *
+ * \sa \ref TutorialSparseDirectSolvers
+ * \sa \ref OrderingMethods_Module
+ */
+template <typename _MatrixType, typename _OrderingType>
+class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::Index>
+{
+ public:
+ typedef _MatrixType MatrixType;
+ typedef _OrderingType OrderingType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar,ColMajor,Index> NCMatrix;
+ typedef internal::MappedSuperNodalMatrix<Scalar, Index> SCMatrix;
+ typedef Matrix<Scalar,Dynamic,1> ScalarVector;
+ typedef Matrix<Index,Dynamic,1> IndexVector;
+ typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+ typedef internal::SparseLUImpl<Scalar, Index> Base;
+
+ public:
+ SparseLU():m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
+ {
+ initperfvalues();
+ }
+ SparseLU(const MatrixType& matrix):m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
+ {
+ initperfvalues();
+ compute(matrix);
+ }
+
+ ~SparseLU()
+ {
+ // Free all explicit dynamic pointers
+ }
+
+ void analyzePattern (const MatrixType& matrix);
+ void factorize (const MatrixType& matrix);
+ void simplicialfactorize(const MatrixType& matrix);
+
+ /**
+ * Compute the symbolic and numeric factorization of the input sparse matrix.
+ * The input matrix should be in column-major storage.
+ */
+ void compute (const MatrixType& matrix)
+ {
+ // Analyze
+ analyzePattern(matrix);
+ //Factorize
+ factorize(matrix);
+ }
+
+ inline Index rows() const { return m_mat.rows(); }
+ inline Index cols() const { return m_mat.cols(); }
+ /** Indicate that the pattern of the input matrix is symmetric */
+ void isSymmetric(bool sym)
+ {
+ m_symmetricmode = sym;
+ }
+
+ /** \returns an expression of the matrix L, internally stored as supernodes
+ * The only operation available with this expression is the triangular solve
+ * \code
+ * y = b; matrixL().solveInPlace(y);
+ * \endcode
+ */
+ SparseLUMatrixLReturnType<SCMatrix> matrixL() const
+ {
+ return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
+ }
+ /** \returns an expression of the matrix U,
+ * The only operation available with this expression is the triangular solve
+ * \code
+ * y = b; matrixU().solveInPlace(y);
+ * \endcode
+ */
+ SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,Index> > matrixU() const
+ {
+ return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,Index> >(m_Lstore, m_Ustore);
+ }
+
+ /**
+ * \returns a reference to the row matrix permutation \f$ P_r \f$ such that \f$P_r A P_c^T = L U\f$
+ * \sa colsPermutation()
+ */
+ inline const PermutationType& rowsPermutation() const
+ {
+ return m_perm_r;
+ }
+ /**
+ * \returns a reference to the column matrix permutation\f$ P_c^T \f$ such that \f$P_r A P_c^T = L U\f$
+ * \sa rowsPermutation()
+ */
+ inline const PermutationType& colsPermutation() const
+ {
+ return m_perm_c;
+ }
+ /** Set the threshold used for a diagonal entry to be an acceptable pivot. */
+ void setPivotThreshold(const RealScalar& thresh)
+ {
+ m_diagpivotthresh = thresh;
+ }
+
+ /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
+ *
+ * \warning the destination matrix X in X = this->solve(B) must be colmun-major.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<SparseLU, Rhs> solve(const MatrixBase<Rhs>& B) const
+ {
+ eigen_assert(m_factorizationIsOk && "SparseLU is not initialized.");
+ eigen_assert(rows()==B.rows()
+ && "SparseLU::solve(): invalid number of rows of the right hand side matrix B");
+ return internal::solve_retval<SparseLU, Rhs>(*this, B.derived());
+ }
+
+ /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<SparseLU, Rhs> solve(const SparseMatrixBase<Rhs>& B) const
+ {
+ eigen_assert(m_factorizationIsOk && "SparseLU is not initialized.");
+ eigen_assert(rows()==B.rows()
+ && "SparseLU::solve(): invalid number of rows of the right hand side matrix B");
+ return internal::sparse_solve_retval<SparseLU, Rhs>(*this, B.derived());
+ }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance
+ * \c InvalidInput if the input matrix is invalid
+ *
+ * \sa iparm()
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ /**
+ * \returns A string describing the type of error
+ */
+ std::string lastErrorMessage() const
+ {
+ return m_lastError;
+ }
+
+ template<typename Rhs, typename Dest>
+ bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const
+ {
+ Dest& X(X_base.derived());
+ eigen_assert(m_factorizationIsOk && "The matrix should be factorized first");
+ EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
+ THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+
+ // Permute the right hand side to form X = Pr*B
+ // on return, X is overwritten by the computed solution
+ X.resize(B.rows(),B.cols());
+
+ // this ugly const_cast_derived() helps to detect aliasing when applying the permutations
+ for(Index j = 0; j < B.cols(); ++j)
+ X.col(j) = rowsPermutation() * B.const_cast_derived().col(j);
+
+ //Forward substitution with L
+ this->matrixL().solveInPlace(X);
+ this->matrixU().solveInPlace(X);
+
+ // Permute back the solution
+ for (Index j = 0; j < B.cols(); ++j)
+ X.col(j) = colsPermutation().inverse() * X.col(j);
+
+ return true;
+ }
+
+ /**
+ * \returns the absolute value of the determinant of the matrix of which
+ * *this is the QR decomposition.
+ *
+ * \warning a determinant can be very big or small, so for matrices
+ * of large enough dimension, there is a risk of overflow/underflow.
+ * One way to work around that is to use logAbsDeterminant() instead.
+ *
+ * \sa logAbsDeterminant(), signDeterminant()
+ */
+ Scalar absDeterminant()
+ {
+ eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+ // Initialize with the determinant of the row matrix
+ Scalar det = Scalar(1.);
+ //Note that the diagonal blocks of U are stored in supernodes,
+ // which are available in the L part :)
+ for (Index j = 0; j < this->cols(); ++j)
+ {
+ for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
+ {
+ if(it.row() < j) continue;
+ if(it.row() == j)
+ {
+ det *= (std::abs)(it.value());
+ break;
+ }
+ }
+ }
+ return det;
+ }
+
+ /** \returns the natural log of the absolute value of the determinant of the matrix
+ * of which **this is the QR decomposition
+ *
+ * \note This method is useful to work around the risk of overflow/underflow that's
+ * inherent to the determinant computation.
+ *
+ * \sa absDeterminant(), signDeterminant()
+ */
+ Scalar logAbsDeterminant() const
+ {
+ eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+ Scalar det = Scalar(0.);
+ for (Index j = 0; j < this->cols(); ++j)
+ {
+ for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
+ {
+ if(it.row() < j) continue;
+ if(it.row() == j)
+ {
+ det += (std::log)((std::abs)(it.value()));
+ break;
+ }
+ }
+ }
+ return det;
+ }
+
+ /** \returns A number representing the sign of the determinant
+ *
+ * \sa absDeterminant(), logAbsDeterminant()
+ */
+ Scalar signDeterminant()
+ {
+ eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+ return Scalar(m_detPermR);
+ }
+
+ protected:
+ // Functions
+ void initperfvalues()
+ {
+ m_perfv.panel_size = 1;
+ m_perfv.relax = 1;
+ m_perfv.maxsuper = 128;
+ m_perfv.rowblk = 16;
+ m_perfv.colblk = 8;
+ m_perfv.fillfactor = 20;
+ }
+
+ // Variables
+ mutable ComputationInfo m_info;
+ bool m_isInitialized;
+ bool m_factorizationIsOk;
+ bool m_analysisIsOk;
+ std::string m_lastError;
+ NCMatrix m_mat; // The input (permuted ) matrix
+ SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
+ MappedSparseMatrix<Scalar,ColMajor,Index> m_Ustore; // The upper triangular matrix
+ PermutationType m_perm_c; // Column permutation
+ PermutationType m_perm_r ; // Row permutation
+ IndexVector m_etree; // Column elimination tree
+
+ typename Base::GlobalLU_t m_glu;
+
+ // SparseLU options
+ bool m_symmetricmode;
+ // values for performance
+ internal::perfvalues<Index> m_perfv;
+ RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
+ Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
+ Index m_detPermR; // Determinant of the coefficient matrix
+ private:
+ // Disable copy constructor
+ SparseLU (const SparseLU& );
+
+}; // End class SparseLU
+
+
+
+// Functions needed by the anaysis phase
+/**
+ * Compute the column permutation to minimize the fill-in
+ *
+ * - Apply this permutation to the input matrix -
+ *
+ * - Compute the column elimination tree on the permuted matrix
+ *
+ * - Postorder the elimination tree and the column permutation
+ *
+ */
+template <typename MatrixType, typename OrderingType>
+void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
+{
+
+ //TODO It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat.
+
+ OrderingType ord;
+ ord(mat,m_perm_c);
+
+ // Apply the permutation to the column of the input matrix
+ //First copy the whole input matrix.
+ m_mat = mat;
+ if (m_perm_c.size()) {
+ m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
+ //Then, permute only the column pointers
+ const Index * outerIndexPtr;
+ if (mat.isCompressed()) outerIndexPtr = mat.outerIndexPtr();
+ else
+ {
+ Index *outerIndexPtr_t = new Index[mat.cols()+1];
+ for(Index i = 0; i <= mat.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
+ outerIndexPtr = outerIndexPtr_t;
+ }
+ for (Index i = 0; i < mat.cols(); i++)
+ {
+ m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
+ m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
+ }
+ if(!mat.isCompressed()) delete[] outerIndexPtr;
+ }
+ // Compute the column elimination tree of the permuted matrix
+ IndexVector firstRowElt;
+ internal::coletree(m_mat, m_etree,firstRowElt);
+
+ // In symmetric mode, do not do postorder here
+ if (!m_symmetricmode) {
+ IndexVector post, iwork;
+ // Post order etree
+ internal::treePostorder(m_mat.cols(), m_etree, post);
+
+
+ // Renumber etree in postorder
+ Index m = m_mat.cols();
+ iwork.resize(m+1);
+ for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
+ m_etree = iwork;
+
+ // Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
+ PermutationType post_perm(m);
+ for (Index i = 0; i < m; i++)
+ post_perm.indices()(i) = post(i);
+
+ // Combine the two permutations : postorder the permutation for future use
+ if(m_perm_c.size()) {
+ m_perm_c = post_perm * m_perm_c;
+ }
+
+ } // end postordering
+
+ m_analysisIsOk = true;
+}
+
+// Functions needed by the numerical factorization phase
+
+
+/**
+ * - Numerical factorization
+ * - Interleaved with the symbolic factorization
+ * On exit, info is
+ *
+ * = 0: successful factorization
+ *
+ * > 0: if info = i, and i is
+ *
+ * <= A->ncol: U(i,i) is exactly zero. The factorization has
+ * been completed, but the factor U is exactly singular,
+ * and division by zero will occur if it is used to solve a
+ * system of equations.
+ *
+ * > A->ncol: number of bytes allocated when memory allocation
+ * failure occurred, plus A->ncol. If lwork = -1, it is
+ * the estimated amount of space needed, plus A->ncol.
+ */
+template <typename MatrixType, typename OrderingType>
+void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
+{
+ using internal::emptyIdxLU;
+ eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
+ eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
+
+ typedef typename IndexVector::Scalar Index;
+
+
+ // Apply the column permutation computed in analyzepattern()
+ // m_mat = matrix * m_perm_c.inverse();
+ m_mat = matrix;
+ if (m_perm_c.size())
+ {
+ m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
+ //Then, permute only the column pointers
+ const Index * outerIndexPtr;
+ if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
+ else
+ {
+ Index* outerIndexPtr_t = new Index[matrix.cols()+1];
+ for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
+ outerIndexPtr = outerIndexPtr_t;
+ }
+ for (Index i = 0; i < matrix.cols(); i++)
+ {
+ m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
+ m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
+ }
+ if(!matrix.isCompressed()) delete[] outerIndexPtr;
+ }
+ else
+ { //FIXME This should not be needed if the empty permutation is handled transparently
+ m_perm_c.resize(matrix.cols());
+ for(Index i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
+ }
+
+ Index m = m_mat.rows();
+ Index n = m_mat.cols();
+ Index nnz = m_mat.nonZeros();
+ Index maxpanel = m_perfv.panel_size * m;
+ // Allocate working storage common to the factor routines
+ Index lwork = 0;
+ Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
+ if (info)
+ {
+ m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
+ m_factorizationIsOk = false;
+ return ;
+ }
+
+ // Set up pointers for integer working arrays
+ IndexVector segrep(m); segrep.setZero();
+ IndexVector parent(m); parent.setZero();
+ IndexVector xplore(m); xplore.setZero();
+ IndexVector repfnz(maxpanel);
+ IndexVector panel_lsub(maxpanel);
+ IndexVector xprune(n); xprune.setZero();
+ IndexVector marker(m*internal::LUNoMarker); marker.setZero();
+
+ repfnz.setConstant(-1);
+ panel_lsub.setConstant(-1);
+
+ // Set up pointers for scalar working arrays
+ ScalarVector dense;
+ dense.setZero(maxpanel);
+ ScalarVector tempv;
+ tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) );
+
+ // Compute the inverse of perm_c
+ PermutationType iperm_c(m_perm_c.inverse());
+
+ // Identify initial relaxed snodes
+ IndexVector relax_end(n);
+ if ( m_symmetricmode == true )
+ Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
+ else
+ Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
+
+
+ m_perm_r.resize(m);
+ m_perm_r.indices().setConstant(-1);
+ marker.setConstant(-1);
+ m_detPermR = 1; // Record the determinant of the row permutation
+
+ m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);
+ m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);
+
+ // Work on one 'panel' at a time. A panel is one of the following :
+ // (a) a relaxed supernode at the bottom of the etree, or
+ // (b) panel_size contiguous columns, <panel_size> defined by the user
+ Index jcol;
+ IndexVector panel_histo(n);
+ Index pivrow; // Pivotal row number in the original row matrix
+ Index nseg1; // Number of segments in U-column above panel row jcol
+ Index nseg; // Number of segments in each U-column
+ Index irep;
+ Index i, k, jj;
+ for (jcol = 0; jcol < n; )
+ {
+ // Adjust panel size so that a panel won't overlap with the next relaxed snode.
+ Index panel_size = m_perfv.panel_size; // upper bound on panel width
+ for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
+ {
+ if (relax_end(k) != emptyIdxLU)
+ {
+ panel_size = k - jcol;
+ break;
+ }
+ }
+ if (k == n)
+ panel_size = n - jcol;
+
+ // Symbolic outer factorization on a panel of columns
+ Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
+
+ // Numeric sup-panel updates in topological order
+ Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
+
+ // Sparse LU within the panel, and below the panel diagonal
+ for ( jj = jcol; jj< jcol + panel_size; jj++)
+ {
+ k = (jj - jcol) * m; // Column index for w-wide arrays
+
+ nseg = nseg1; // begin after all the panel segments
+ //Depth-first-search for the current column
+ VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);
+ VectorBlock<IndexVector> repfnz_k(repfnz, k, m);
+ info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
+ if ( info )
+ {
+ m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
+ m_info = NumericalIssue;
+ m_factorizationIsOk = false;
+ return;
+ }
+ // Numeric updates to this column
+ VectorBlock<ScalarVector> dense_k(dense, k, m);
+ VectorBlock<IndexVector> segrep_k(segrep, nseg1, m-nseg1);
+ info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
+ if ( info )
+ {
+ m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
+ m_info = NumericalIssue;
+ m_factorizationIsOk = false;
+ return;
+ }
+
+ // Copy the U-segments to ucol(*)
+ info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
+ if ( info )
+ {
+ m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
+ m_info = NumericalIssue;
+ m_factorizationIsOk = false;
+ return;
+ }
+
+ // Form the L-segment
+ info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
+ if ( info )
+ {
+ m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
+ std::ostringstream returnInfo;
+ returnInfo << info;
+ m_lastError += returnInfo.str();
+ m_info = NumericalIssue;
+ m_factorizationIsOk = false;
+ return;
+ }
+
+ // Update the determinant of the row permutation matrix
+ if (pivrow != jj) m_detPermR *= -1;
+
+ // Prune columns (0:jj-1) using column jj
+ Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
+
+ // Reset repfnz for this column
+ for (i = 0; i < nseg; i++)
+ {
+ irep = segrep(i);
+ repfnz_k(irep) = emptyIdxLU;
+ }
+ } // end SparseLU within the panel
+ jcol += panel_size; // Move to the next panel
+ } // end for -- end elimination
+
+ // Count the number of nonzeros in factors
+ Base::countnz(n, m_nnzL, m_nnzU, m_glu);
+ // Apply permutation to the L subscripts
+ Base::fixupL(n, m_perm_r.indices(), m_glu);
+
+ // Create supernode matrix L
+ m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
+ // Create the column major upper sparse matrix U;
+ new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, Index> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
+
+ m_info = Success;
+ m_factorizationIsOk = true;
+}
+
+template<typename MappedSupernodalType>
+struct SparseLUMatrixLReturnType : internal::no_assignment_operator
+{
+ typedef typename MappedSupernodalType::Index Index;
+ typedef typename MappedSupernodalType::Scalar Scalar;
+ SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
+ { }
+ Index rows() { return m_mapL.rows(); }
+ Index cols() { return m_mapL.cols(); }
+ template<typename Dest>
+ void solveInPlace( MatrixBase<Dest> &X) const
+ {
+ m_mapL.solveInPlace(X);
+ }
+ const MappedSupernodalType& m_mapL;
+};
+
+template<typename MatrixLType, typename MatrixUType>
+struct SparseLUMatrixUReturnType : internal::no_assignment_operator
+{
+ typedef typename MatrixLType::Index Index;
+ typedef typename MatrixLType::Scalar Scalar;
+ SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
+ : m_mapL(mapL),m_mapU(mapU)
+ { }
+ Index rows() { return m_mapL.rows(); }
+ Index cols() { return m_mapL.cols(); }
+
+ template<typename Dest> void solveInPlace(MatrixBase<Dest> &X) const
+ {
+ Index nrhs = X.cols();
+ Index n = X.rows();
+ // Backward solve with U
+ for (Index k = m_mapL.nsuper(); k >= 0; k--)
+ {
+ Index fsupc = m_mapL.supToCol()[k];
+ Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension
+ Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
+ Index luptr = m_mapL.colIndexPtr()[fsupc];
+
+ if (nsupc == 1)
+ {
+ for (Index j = 0; j < nrhs; j++)
+ {
+ X(fsupc, j) /= m_mapL.valuePtr()[luptr];
+ }
+ }
+ else
+ {
+ Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
+ Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
+ U = A.template triangularView<Upper>().solve(U);
+ }
+
+ for (Index j = 0; j < nrhs; ++j)
+ {
+ for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
+ {
+ typename MatrixUType::InnerIterator it(m_mapU, jcol);
+ for ( ; it; ++it)
+ {
+ Index irow = it.index();
+ X(irow, j) -= X(jcol, j) * it.value();
+ }
+ }
+ }
+ } // End For U-solve
+ }
+ const MatrixLType& m_mapL;
+ const MatrixUType& m_mapU;
+};
+
+namespace internal {
+
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
+ : solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
+{
+ typedef SparseLU<_MatrixType,Derived> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct sparse_solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
+ : sparse_solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
+{
+ typedef SparseLU<_MatrixType,Derived> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ this->defaultEvalTo(dst);
+ }
+};
+} // end namespace internal
+
+} // End namespace Eigen
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLUImpl.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLUImpl.h
new file mode 100644
index 00000000000..14d70897df7
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLUImpl.h
@@ -0,0 +1,64 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#ifndef SPARSELU_IMPL_H
+#define SPARSELU_IMPL_H
+
+namespace Eigen {
+namespace internal {
+
+/** \ingroup SparseLU_Module
+ * \class SparseLUImpl
+ * Base class for sparseLU
+ */
+template <typename Scalar, typename Index>
+class SparseLUImpl
+{
+ public:
+ typedef Matrix<Scalar,Dynamic,1> ScalarVector;
+ typedef Matrix<Index,Dynamic,1> IndexVector;
+ typedef typename ScalarVector::RealScalar RealScalar;
+ typedef Ref<Matrix<Scalar,Dynamic,1> > BlockScalarVector;
+ typedef Ref<Matrix<Index,Dynamic,1> > BlockIndexVector;
+ typedef LU_GlobalLU_t<IndexVector, ScalarVector> GlobalLU_t;
+ typedef SparseMatrix<Scalar,ColMajor,Index> MatrixType;
+
+ protected:
+ template <typename VectorType>
+ Index expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions);
+ Index memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu);
+ template <typename VectorType>
+ Index memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions);
+ void heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end);
+ void relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end);
+ Index snode_dfs(const Index jcol, const Index kcol,const MatrixType& mat, IndexVector& xprune, IndexVector& marker, GlobalLU_t& glu);
+ Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu);
+ Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu);
+ template <typename Traits>
+ void dfs_kernel(const Index jj, IndexVector& perm_r,
+ Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
+ Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
+ IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits);
+ void panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
+
+ void panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu);
+ Index column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
+ Index column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu);
+ Index copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu);
+ void pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu);
+ void countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu);
+ void fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu);
+
+ template<typename , typename >
+ friend struct column_dfs_traits;
+};
+
+} // end namespace internal
+} // namespace Eigen
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Memory.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Memory.h
new file mode 100644
index 00000000000..1ffa7d54e96
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Memory.h
@@ -0,0 +1,227 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*
+
+ * NOTE: This file is the modified version of [s,d,c,z]memory.c files in SuperLU
+
+ * -- SuperLU routine (version 3.1) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * August 1, 2008
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+
+#ifndef EIGEN_SPARSELU_MEMORY
+#define EIGEN_SPARSELU_MEMORY
+
+namespace Eigen {
+namespace internal {
+
+enum { LUNoMarker = 3 };
+enum {emptyIdxLU = -1};
+template<typename Index>
+inline Index LUnumTempV(Index& m, Index& w, Index& t, Index& b)
+{
+ return (std::max)(m, (t+b)*w);
+}
+
+template< typename Scalar, typename Index>
+inline Index LUTempSpace(Index&m, Index& w)
+{
+ return (2*w + 4 + LUNoMarker) * m * sizeof(Index) + (w + 1) * m * sizeof(Scalar);
+}
+
+
+
+
+/**
+ * Expand the existing storage to accomodate more fill-ins
+ * \param vec Valid pointer to the vector to allocate or expand
+ * \param[in,out] length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
+ * \param[in] nbElts Current number of elements in the factors
+ * \param keep_prev 1: use length and do not expand the vector; 0: compute new_len and expand
+ * \param[in,out] num_expansions Number of times the memory has been expanded
+ */
+template <typename Scalar, typename Index>
+template <typename VectorType>
+Index SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions)
+{
+
+ float alpha = 1.5; // Ratio of the memory increase
+ Index new_len; // New size of the allocated memory
+
+ if(num_expansions == 0 || keep_prev)
+ new_len = length ; // First time allocate requested
+ else
+ new_len = (std::max)(length+1,Index(alpha * length));
+
+ VectorType old_vec; // Temporary vector to hold the previous values
+ if (nbElts > 0 )
+ old_vec = vec.segment(0,nbElts);
+
+ //Allocate or expand the current vector
+#ifdef EIGEN_EXCEPTIONS
+ try
+#endif
+ {
+ vec.resize(new_len);
+ }
+#ifdef EIGEN_EXCEPTIONS
+ catch(std::bad_alloc& )
+#else
+ if(!vec.size())
+#endif
+ {
+ if (!num_expansions)
+ {
+ // First time to allocate from LUMemInit()
+ // Let LUMemInit() deals with it.
+ return -1;
+ }
+ if (keep_prev)
+ {
+ // In this case, the memory length should not not be reduced
+ return new_len;
+ }
+ else
+ {
+ // Reduce the size and increase again
+ Index tries = 0; // Number of attempts
+ do
+ {
+ alpha = (alpha + 1)/2;
+ new_len = (std::max)(length+1,Index(alpha * length));
+#ifdef EIGEN_EXCEPTIONS
+ try
+#endif
+ {
+ vec.resize(new_len);
+ }
+#ifdef EIGEN_EXCEPTIONS
+ catch(std::bad_alloc& )
+#else
+ if (!vec.size())
+#endif
+ {
+ tries += 1;
+ if ( tries > 10) return new_len;
+ }
+ } while (!vec.size());
+ }
+ }
+ //Copy the previous values to the newly allocated space
+ if (nbElts > 0)
+ vec.segment(0, nbElts) = old_vec;
+
+
+ length = new_len;
+ if(num_expansions) ++num_expansions;
+ return 0;
+}
+
+/**
+ * \brief Allocate various working space for the numerical factorization phase.
+ * \param m number of rows of the input matrix
+ * \param n number of columns
+ * \param annz number of initial nonzeros in the matrix
+ * \param lwork if lwork=-1, this routine returns an estimated size of the required memory
+ * \param glu persistent data to facilitate multiple factors : will be deleted later ??
+ * \param fillratio estimated ratio of fill in the factors
+ * \param panel_size Size of a panel
+ * \return an estimated size of the required memory if lwork = -1; otherwise, return the size of actually allocated memory when allocation failed, and 0 on success
+ * \note Unlike SuperLU, this routine does not support successive factorization with the same pattern and the same row permutation
+ */
+template <typename Scalar, typename Index>
+Index SparseLUImpl<Scalar,Index>::memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu)
+{
+ Index& num_expansions = glu.num_expansions; //No memory expansions so far
+ num_expansions = 0;
+ glu.nzumax = glu.nzlumax = (std::min)(fillratio * annz / n, m) * n; // estimated number of nonzeros in U
+ glu.nzlmax = (std::max)(Index(4), fillratio) * annz / 4; // estimated nnz in L factor
+ // Return the estimated size to the user if necessary
+ Index tempSpace;
+ tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
+ if (lwork == emptyIdxLU)
+ {
+ Index estimated_size;
+ estimated_size = (5 * n + 5) * sizeof(Index) + tempSpace
+ + (glu.nzlmax + glu.nzumax) * sizeof(Index) + (glu.nzlumax+glu.nzumax) * sizeof(Scalar) + n;
+ return estimated_size;
+ }
+
+ // Setup the required space
+
+ // First allocate Integer pointers for L\U factors
+ glu.xsup.resize(n+1);
+ glu.supno.resize(n+1);
+ glu.xlsub.resize(n+1);
+ glu.xlusup.resize(n+1);
+ glu.xusub.resize(n+1);
+
+ // Reserve memory for L/U factors
+ do
+ {
+ if( (expand<ScalarVector>(glu.lusup, glu.nzlumax, 0, 0, num_expansions)<0)
+ || (expand<ScalarVector>(glu.ucol, glu.nzumax, 0, 0, num_expansions)<0)
+ || (expand<IndexVector> (glu.lsub, glu.nzlmax, 0, 0, num_expansions)<0)
+ || (expand<IndexVector> (glu.usub, glu.nzumax, 0, 1, num_expansions)<0) )
+ {
+ //Reduce the estimated size and retry
+ glu.nzlumax /= 2;
+ glu.nzumax /= 2;
+ glu.nzlmax /= 2;
+ if (glu.nzlumax < annz ) return glu.nzlumax;
+ }
+ } while (!glu.lusup.size() || !glu.ucol.size() || !glu.lsub.size() || !glu.usub.size());
+
+ ++num_expansions;
+ return 0;
+
+} // end LuMemInit
+
+/**
+ * \brief Expand the existing storage
+ * \param vec vector to expand
+ * \param[in,out] maxlen On input, previous size of vec (Number of elements to copy ). on output, new size
+ * \param nbElts current number of elements in the vector.
+ * \param memtype Type of the element to expand
+ * \param num_expansions Number of expansions
+ * \return 0 on success, > 0 size of the memory allocated so far
+ */
+template <typename Scalar, typename Index>
+template <typename VectorType>
+Index SparseLUImpl<Scalar,Index>::memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions)
+{
+ Index failed_size;
+ if (memtype == USUB)
+ failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 1, num_expansions);
+ else
+ failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 0, num_expansions);
+
+ if (failed_size)
+ return failed_size;
+
+ return 0 ;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif // EIGEN_SPARSELU_MEMORY
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Structs.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Structs.h
new file mode 100644
index 00000000000..24d6bf17946
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Structs.h
@@ -0,0 +1,111 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*
+ * NOTE: This file comes from a partly modified version of files slu_[s,d,c,z]defs.h
+ * -- SuperLU routine (version 4.1) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * November, 2010
+ *
+ * Global data structures used in LU factorization -
+ *
+ * nsuper: #supernodes = nsuper + 1, numbered [0, nsuper].
+ * (xsup,supno): supno[i] is the supernode no to which i belongs;
+ * xsup(s) points to the beginning of the s-th supernode.
+ * e.g. supno 0 1 2 2 3 3 3 4 4 4 4 4 (n=12)
+ * xsup 0 1 2 4 7 12
+ * Note: dfs will be performed on supernode rep. relative to the new
+ * row pivoting ordering
+ *
+ * (xlsub,lsub): lsub[*] contains the compressed subscript of
+ * rectangular supernodes; xlsub[j] points to the starting
+ * location of the j-th column in lsub[*]. Note that xlsub
+ * is indexed by column.
+ * Storage: original row subscripts
+ *
+ * During the course of sparse LU factorization, we also use
+ * (xlsub,lsub) for the purpose of symmetric pruning. For each
+ * supernode {s,s+1,...,t=s+r} with first column s and last
+ * column t, the subscript set
+ * lsub[j], j=xlsub[s], .., xlsub[s+1]-1
+ * is the structure of column s (i.e. structure of this supernode).
+ * It is used for the storage of numerical values.
+ * Furthermore,
+ * lsub[j], j=xlsub[t], .., xlsub[t+1]-1
+ * is the structure of the last column t of this supernode.
+ * It is for the purpose of symmetric pruning. Therefore, the
+ * structural subscripts can be rearranged without making physical
+ * interchanges among the numerical values.
+ *
+ * However, if the supernode has only one column, then we
+ * only keep one set of subscripts. For any subscript interchange
+ * performed, similar interchange must be done on the numerical
+ * values.
+ *
+ * The last column structures (for pruning) will be removed
+ * after the numercial LU factorization phase.
+ *
+ * (xlusup,lusup): lusup[*] contains the numerical values of the
+ * rectangular supernodes; xlusup[j] points to the starting
+ * location of the j-th column in storage vector lusup[*]
+ * Note: xlusup is indexed by column.
+ * Each rectangular supernode is stored by column-major
+ * scheme, consistent with Fortran 2-dim array storage.
+ *
+ * (xusub,ucol,usub): ucol[*] stores the numerical values of
+ * U-columns outside the rectangular supernodes. The row
+ * subscript of nonzero ucol[k] is stored in usub[k].
+ * xusub[i] points to the starting location of column i in ucol.
+ * Storage: new row subscripts; that is subscripts of PA.
+ */
+
+#ifndef EIGEN_LU_STRUCTS
+#define EIGEN_LU_STRUCTS
+namespace Eigen {
+namespace internal {
+
+typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType;
+
+template <typename IndexVector, typename ScalarVector>
+struct LU_GlobalLU_t {
+ typedef typename IndexVector::Scalar Index;
+ IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode
+ IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping)
+ ScalarVector lusup; // nonzero values of L ordered by columns
+ IndexVector lsub; // Compressed row indices of L rectangular supernodes.
+ IndexVector xlusup; // pointers to the beginning of each column in lusup
+ IndexVector xlsub; // pointers to the beginning of each column in lsub
+ Index nzlmax; // Current max size of lsub
+ Index nzlumax; // Current max size of lusup
+ ScalarVector ucol; // nonzero values of U ordered by columns
+ IndexVector usub; // row indices of U columns in ucol
+ IndexVector xusub; // Pointers to the beginning of each column of U in ucol
+ Index nzumax; // Current max size of ucol
+ Index n; // Number of columns in the matrix
+ Index num_expansions;
+};
+
+// Values to set for performance
+template <typename Index>
+struct perfvalues {
+ Index panel_size; // a panel consists of at most <panel_size> consecutive columns
+ Index relax; // To control degree of relaxing supernodes. If the number of nodes (columns)
+ // in a subtree of the elimination tree is less than relax, this subtree is considered
+ // as one supernode regardless of the row structures of those columns
+ Index maxsuper; // The maximum size for a supernode in complete LU
+ Index rowblk; // The minimum row dimension for 2-D blocking to be used;
+ Index colblk; // The minimum column dimension for 2-D blocking to be used;
+ Index fillfactor; // The estimated fills factors for L and U, compared with A
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif // EIGEN_LU_STRUCTS
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
new file mode 100644
index 00000000000..ad6f2183fed
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
@@ -0,0 +1,298 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSELU_SUPERNODAL_MATRIX_H
+#define EIGEN_SPARSELU_SUPERNODAL_MATRIX_H
+
+namespace Eigen {
+namespace internal {
+
+/** \ingroup SparseLU_Module
+ * \brief a class to manipulate the L supernodal factor from the SparseLU factorization
+ *
+ * This class contain the data to easily store
+ * and manipulate the supernodes during the factorization and solution phase of Sparse LU.
+ * Only the lower triangular matrix has supernodes.
+ *
+ * NOTE : This class corresponds to the SCformat structure in SuperLU
+ *
+ */
+/* TODO
+ * InnerIterator as for sparsematrix
+ * SuperInnerIterator to iterate through all supernodes
+ * Function for triangular solve
+ */
+template <typename _Scalar, typename _Index>
+class MappedSuperNodalMatrix
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Matrix<Index,Dynamic,1> IndexVector;
+ typedef Matrix<Scalar,Dynamic,1> ScalarVector;
+ public:
+ MappedSuperNodalMatrix()
+ {
+
+ }
+ MappedSuperNodalMatrix(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
+ IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
+ {
+ setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col);
+ }
+
+ ~MappedSuperNodalMatrix()
+ {
+
+ }
+ /**
+ * Set appropriate pointers for the lower triangular supernodal matrix
+ * These infos are available at the end of the numerical factorization
+ * FIXME This class will be modified such that it can be use in the course
+ * of the factorization.
+ */
+ void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
+ IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
+ {
+ m_row = m;
+ m_col = n;
+ m_nzval = nzval.data();
+ m_nzval_colptr = nzval_colptr.data();
+ m_rowind = rowind.data();
+ m_rowind_colptr = rowind_colptr.data();
+ m_nsuper = col_to_sup(n);
+ m_col_to_sup = col_to_sup.data();
+ m_sup_to_col = sup_to_col.data();
+ }
+
+ /**
+ * Number of rows
+ */
+ Index rows() { return m_row; }
+
+ /**
+ * Number of columns
+ */
+ Index cols() { return m_col; }
+
+ /**
+ * Return the array of nonzero values packed by column
+ *
+ * The size is nnz
+ */
+ Scalar* valuePtr() { return m_nzval; }
+
+ const Scalar* valuePtr() const
+ {
+ return m_nzval;
+ }
+ /**
+ * Return the pointers to the beginning of each column in \ref valuePtr()
+ */
+ Index* colIndexPtr()
+ {
+ return m_nzval_colptr;
+ }
+
+ const Index* colIndexPtr() const
+ {
+ return m_nzval_colptr;
+ }
+
+ /**
+ * Return the array of compressed row indices of all supernodes
+ */
+ Index* rowIndex() { return m_rowind; }
+
+ const Index* rowIndex() const
+ {
+ return m_rowind;
+ }
+
+ /**
+ * Return the location in \em rowvaluePtr() which starts each column
+ */
+ Index* rowIndexPtr() { return m_rowind_colptr; }
+
+ const Index* rowIndexPtr() const
+ {
+ return m_rowind_colptr;
+ }
+
+ /**
+ * Return the array of column-to-supernode mapping
+ */
+ Index* colToSup() { return m_col_to_sup; }
+
+ const Index* colToSup() const
+ {
+ return m_col_to_sup;
+ }
+ /**
+ * Return the array of supernode-to-column mapping
+ */
+ Index* supToCol() { return m_sup_to_col; }
+
+ const Index* supToCol() const
+ {
+ return m_sup_to_col;
+ }
+
+ /**
+ * Return the number of supernodes
+ */
+ Index nsuper() const
+ {
+ return m_nsuper;
+ }
+
+ class InnerIterator;
+ template<typename Dest>
+ void solveInPlace( MatrixBase<Dest>&X) const;
+
+
+
+
+ protected:
+ Index m_row; // Number of rows
+ Index m_col; // Number of columns
+ Index m_nsuper; // Number of supernodes
+ Scalar* m_nzval; //array of nonzero values packed by column
+ Index* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j
+ Index* m_rowind; // Array of compressed row indices of rectangular supernodes
+ Index* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j
+ Index* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs
+ Index* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode
+
+ private :
+};
+
+/**
+ * \brief InnerIterator class to iterate over nonzero values of the current column in the supernodal matrix L
+ *
+ */
+template<typename Scalar, typename Index>
+class MappedSuperNodalMatrix<Scalar,Index>::InnerIterator
+{
+ public:
+ InnerIterator(const MappedSuperNodalMatrix& mat, Index outer)
+ : m_matrix(mat),
+ m_outer(outer),
+ m_supno(mat.colToSup()[outer]),
+ m_idval(mat.colIndexPtr()[outer]),
+ m_startidval(m_idval),
+ m_endidval(mat.colIndexPtr()[outer+1]),
+ m_idrow(mat.rowIndexPtr()[outer]),
+ m_endidrow(mat.rowIndexPtr()[outer+1])
+ {}
+ inline InnerIterator& operator++()
+ {
+ m_idval++;
+ m_idrow++;
+ return *this;
+ }
+ inline Scalar value() const { return m_matrix.valuePtr()[m_idval]; }
+
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_idval]); }
+
+ inline Index index() const { return m_matrix.rowIndex()[m_idrow]; }
+ inline Index row() const { return index(); }
+ inline Index col() const { return m_outer; }
+
+ inline Index supIndex() const { return m_supno; }
+
+ inline operator bool() const
+ {
+ return ( (m_idval < m_endidval) && (m_idval >= m_startidval)
+ && (m_idrow < m_endidrow) );
+ }
+
+ protected:
+ const MappedSuperNodalMatrix& m_matrix; // Supernodal lower triangular matrix
+ const Index m_outer; // Current column
+ const Index m_supno; // Current SuperNode number
+ Index m_idval; // Index to browse the values in the current column
+ const Index m_startidval; // Start of the column value
+ const Index m_endidval; // End of the column value
+ Index m_idrow; // Index to browse the row indices
+ Index m_endidrow; // End index of row indices of the current column
+};
+
+/**
+ * \brief Solve with the supernode triangular matrix
+ *
+ */
+template<typename Scalar, typename Index>
+template<typename Dest>
+void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) const
+{
+ Index n = X.rows();
+ Index nrhs = X.cols();
+ const Scalar * Lval = valuePtr(); // Nonzero values
+ Matrix<Scalar,Dynamic,Dynamic> work(n, nrhs); // working vector
+ work.setZero();
+ for (Index k = 0; k <= nsuper(); k ++)
+ {
+ Index fsupc = supToCol()[k]; // First column of the current supernode
+ Index istart = rowIndexPtr()[fsupc]; // Pointer index to the subscript of the current column
+ Index nsupr = rowIndexPtr()[fsupc+1] - istart; // Number of rows in the current supernode
+ Index nsupc = supToCol()[k+1] - fsupc; // Number of columns in the current supernode
+ Index nrow = nsupr - nsupc; // Number of rows in the non-diagonal part of the supernode
+ Index irow; //Current index row
+
+ if (nsupc == 1 )
+ {
+ for (Index j = 0; j < nrhs; j++)
+ {
+ InnerIterator it(*this, fsupc);
+ ++it; // Skip the diagonal element
+ for (; it; ++it)
+ {
+ irow = it.row();
+ X(irow, j) -= X(fsupc, j) * it.value();
+ }
+ }
+ }
+ else
+ {
+ // The supernode has more than one column
+ Index luptr = colIndexPtr()[fsupc];
+ Index lda = colIndexPtr()[fsupc+1] - luptr;
+
+ // Triangular solve
+ Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
+ Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
+ U = A.template triangularView<UnitLower>().solve(U);
+
+ // Matrix-vector product
+ new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
+ work.block(0, 0, nrow, nrhs) = A * U;
+
+ //Begin Scatter
+ for (Index j = 0; j < nrhs; j++)
+ {
+ Index iptr = istart + nsupc;
+ for (Index i = 0; i < nrow; i++)
+ {
+ irow = rowIndex()[iptr];
+ X(irow, j) -= work(i, j); // Scatter operation
+ work(i, j) = Scalar(0);
+ iptr++;
+ }
+ }
+ }
+ }
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSELU_MATRIX_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Utils.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Utils.h
new file mode 100644
index 00000000000..15352ac33ae
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Utils.h
@@ -0,0 +1,80 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_SPARSELU_UTILS_H
+#define EIGEN_SPARSELU_UTILS_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Count Nonzero elements in the factors
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu)
+{
+ nnzL = 0;
+ nnzU = (glu.xusub)(n);
+ Index nsuper = (glu.supno)(n);
+ Index jlen;
+ Index i, j, fsupc;
+ if (n <= 0 ) return;
+ // For each supernode
+ for (i = 0; i <= nsuper; i++)
+ {
+ fsupc = glu.xsup(i);
+ jlen = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
+
+ for (j = fsupc; j < glu.xsup(i+1); j++)
+ {
+ nnzL += jlen;
+ nnzU += j - fsupc + 1;
+ jlen--;
+ }
+ }
+}
+
+/**
+ * \brief Fix up the data storage lsub for L-subscripts.
+ *
+ * It removes the subscripts sets for structural pruning,
+ * and applies permutation to the remaining subscripts
+ *
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu)
+{
+ Index fsupc, i, j, k, jstart;
+
+ Index nextl = 0;
+ Index nsuper = (glu.supno)(n);
+
+ // For each supernode
+ for (i = 0; i <= nsuper; i++)
+ {
+ fsupc = glu.xsup(i);
+ jstart = glu.xlsub(fsupc);
+ glu.xlsub(fsupc) = nextl;
+ for (j = jstart; j < glu.xlsub(fsupc + 1); j++)
+ {
+ glu.lsub(nextl) = perm_r(glu.lsub(j)); // Now indexed into P*A
+ nextl++;
+ }
+ for (k = fsupc+1; k < glu.xsup(i+1); k++)
+ glu.xlsub(k) = nextl; // other columns in supernode i
+ }
+
+ glu.xlsub(n) = nextl;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif // EIGEN_SPARSELU_UTILS_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h
new file mode 100644
index 00000000000..f24bd87d3e9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h
@@ -0,0 +1,180 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*
+
+ * NOTE: This file is the modified version of xcolumn_bmod.c file in SuperLU
+
+ * -- SuperLU routine (version 3.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * October 15, 2003
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_COLUMN_BMOD_H
+#define SPARSELU_COLUMN_BMOD_H
+
+namespace Eigen {
+
+namespace internal {
+/**
+ * \brief Performs numeric block updates (sup-col) in topological order
+ *
+ * \param jcol current column to update
+ * \param nseg Number of segments in the U part
+ * \param dense Store the full representation of the column
+ * \param tempv working array
+ * \param segrep segment representative ...
+ * \param repfnz ??? First nonzero column in each row ??? ...
+ * \param fpanelc First column in the current panel
+ * \param glu Global LU data.
+ * \return 0 - successful return
+ * > 0 - number of bytes allocated when run out of space
+ *
+ */
+template <typename Scalar, typename Index>
+Index SparseLUImpl<Scalar,Index>::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu)
+{
+ Index jsupno, k, ksub, krep, ksupno;
+ Index lptr, nrow, isub, irow, nextlu, new_next, ufirst;
+ Index fsupc, nsupc, nsupr, luptr, kfnz, no_zeros;
+ /* krep = representative of current k-th supernode
+ * fsupc = first supernodal column
+ * nsupc = number of columns in a supernode
+ * nsupr = number of rows in a supernode
+ * luptr = location of supernodal LU-block in storage
+ * kfnz = first nonz in the k-th supernodal segment
+ * no_zeros = no lf leading zeros in a supernodal U-segment
+ */
+
+ jsupno = glu.supno(jcol);
+ // For each nonzero supernode segment of U[*,j] in topological order
+ k = nseg - 1;
+ Index d_fsupc; // distance between the first column of the current panel and the
+ // first column of the current snode
+ Index fst_col; // First column within small LU update
+ Index segsize;
+ for (ksub = 0; ksub < nseg; ksub++)
+ {
+ krep = segrep(k); k--;
+ ksupno = glu.supno(krep);
+ if (jsupno != ksupno )
+ {
+ // outside the rectangular supernode
+ fsupc = glu.xsup(ksupno);
+ fst_col = (std::max)(fsupc, fpanelc);
+
+ // Distance from the current supernode to the current panel;
+ // d_fsupc = 0 if fsupc > fpanelc
+ d_fsupc = fst_col - fsupc;
+
+ luptr = glu.xlusup(fst_col) + d_fsupc;
+ lptr = glu.xlsub(fsupc) + d_fsupc;
+
+ kfnz = repfnz(krep);
+ kfnz = (std::max)(kfnz, fpanelc);
+
+ segsize = krep - kfnz + 1;
+ nsupc = krep - fst_col + 1;
+ nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
+ nrow = nsupr - d_fsupc - nsupc;
+ Index lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col);
+
+
+ // Perform a triangular solver and block update,
+ // then scatter the result of sup-col update to dense
+ no_zeros = kfnz - fst_col;
+ if(segsize==1)
+ LU_kernel_bmod<1>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+ else
+ LU_kernel_bmod<Dynamic>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+ } // end if jsupno
+ } // end for each segment
+
+ // Process the supernodal portion of L\U[*,j]
+ nextlu = glu.xlusup(jcol);
+ fsupc = glu.xsup(jsupno);
+
+ // copy the SPA dense into L\U[*,j]
+ Index mem;
+ new_next = nextlu + glu.xlsub(fsupc + 1) - glu.xlsub(fsupc);
+ Index offset = internal::first_multiple<Index>(new_next, internal::packet_traits<Scalar>::size) - new_next;
+ if(offset)
+ new_next += offset;
+ while (new_next > glu.nzlumax )
+ {
+ mem = memXpand<ScalarVector>(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions);
+ if (mem) return mem;
+ }
+
+ for (isub = glu.xlsub(fsupc); isub < glu.xlsub(fsupc+1); isub++)
+ {
+ irow = glu.lsub(isub);
+ glu.lusup(nextlu) = dense(irow);
+ dense(irow) = Scalar(0.0);
+ ++nextlu;
+ }
+
+ if(offset)
+ {
+ glu.lusup.segment(nextlu,offset).setZero();
+ nextlu += offset;
+ }
+ glu.xlusup(jcol + 1) = nextlu; // close L\U(*,jcol);
+
+ /* For more updates within the panel (also within the current supernode),
+ * should start from the first column of the panel, or the first column
+ * of the supernode, whichever is bigger. There are two cases:
+ * 1) fsupc < fpanelc, then fst_col <-- fpanelc
+ * 2) fsupc >= fpanelc, then fst_col <-- fsupc
+ */
+ fst_col = (std::max)(fsupc, fpanelc);
+
+ if (fst_col < jcol)
+ {
+ // Distance between the current supernode and the current panel
+ // d_fsupc = 0 if fsupc >= fpanelc
+ d_fsupc = fst_col - fsupc;
+
+ lptr = glu.xlsub(fsupc) + d_fsupc;
+ luptr = glu.xlusup(fst_col) + d_fsupc;
+ nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); // leading dimension
+ nsupc = jcol - fst_col; // excluding jcol
+ nrow = nsupr - d_fsupc - nsupc;
+
+ // points to the beginning of jcol in snode L\U(jsupno)
+ ufirst = glu.xlusup(jcol) + d_fsupc;
+ Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
+ Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
+ VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc);
+ u = A.template triangularView<UnitLower>().solve(u);
+
+ new (&A) Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
+ VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow);
+ l.noalias() -= A * u;
+
+ } // End if fst_col
+ return 0;
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // SPARSELU_COLUMN_BMOD_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_dfs.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_dfs.h
new file mode 100644
index 00000000000..4c04b0e44e9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_dfs.h
@@ -0,0 +1,177 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*
+
+ * NOTE: This file is the modified version of [s,d,c,z]column_dfs.c file in SuperLU
+
+ * -- SuperLU routine (version 2.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * November 15, 1997
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_COLUMN_DFS_H
+#define SPARSELU_COLUMN_DFS_H
+
+template <typename Scalar, typename Index> class SparseLUImpl;
+namespace Eigen {
+
+namespace internal {
+
+template<typename IndexVector, typename ScalarVector>
+struct column_dfs_traits : no_assignment_operator
+{
+ typedef typename ScalarVector::Scalar Scalar;
+ typedef typename IndexVector::Scalar Index;
+ column_dfs_traits(Index jcol, Index& jsuper, typename SparseLUImpl<Scalar, Index>::GlobalLU_t& glu, SparseLUImpl<Scalar, Index>& luImpl)
+ : m_jcol(jcol), m_jsuper_ref(jsuper), m_glu(glu), m_luImpl(luImpl)
+ {}
+ bool update_segrep(Index /*krep*/, Index /*jj*/)
+ {
+ return true;
+ }
+ void mem_expand(IndexVector& lsub, Index& nextl, Index chmark)
+ {
+ if (nextl >= m_glu.nzlmax)
+ m_luImpl.memXpand(lsub, m_glu.nzlmax, nextl, LSUB, m_glu.num_expansions);
+ if (chmark != (m_jcol-1)) m_jsuper_ref = emptyIdxLU;
+ }
+ enum { ExpandMem = true };
+
+ Index m_jcol;
+ Index& m_jsuper_ref;
+ typename SparseLUImpl<Scalar, Index>::GlobalLU_t& m_glu;
+ SparseLUImpl<Scalar, Index>& m_luImpl;
+};
+
+
+/**
+ * \brief Performs a symbolic factorization on column jcol and decide the supernode boundary
+ *
+ * A supernode representative is the last column of a supernode.
+ * The nonzeros in U[*,j] are segments that end at supernodes representatives.
+ * The routine returns a list of the supernodal representatives
+ * in topological order of the dfs that generates them.
+ * The location of the first nonzero in each supernodal segment
+ * (supernodal entry location) is also returned.
+ *
+ * \param m number of rows in the matrix
+ * \param jcol Current column
+ * \param perm_r Row permutation
+ * \param maxsuper Maximum number of column allowed in a supernode
+ * \param [in,out] nseg Number of segments in current U[*,j] - new segments appended
+ * \param lsub_col defines the rhs vector to start the dfs
+ * \param [in,out] segrep Segment representatives - new segments appended
+ * \param repfnz First nonzero location in each row
+ * \param xprune
+ * \param marker marker[i] == jj, if i was visited during dfs of current column jj;
+ * \param parent
+ * \param xplore working array
+ * \param glu global LU data
+ * \return 0 success
+ * > 0 number of bytes allocated when run out of space
+ *
+ */
+template <typename Scalar, typename Index>
+Index SparseLUImpl<Scalar,Index>::column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg, BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
+{
+
+ Index jsuper = glu.supno(jcol);
+ Index nextl = glu.xlsub(jcol);
+ VectorBlock<IndexVector> marker2(marker, 2*m, m);
+
+
+ column_dfs_traits<IndexVector, ScalarVector> traits(jcol, jsuper, glu, *this);
+
+ // For each nonzero in A(*,jcol) do dfs
+ for (Index k = 0; ((k < m) ? lsub_col[k] != emptyIdxLU : false) ; k++)
+ {
+ Index krow = lsub_col(k);
+ lsub_col(k) = emptyIdxLU;
+ Index kmark = marker2(krow);
+
+ // krow was visited before, go to the next nonz;
+ if (kmark == jcol) continue;
+
+ dfs_kernel(jcol, perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent,
+ xplore, glu, nextl, krow, traits);
+ } // for each nonzero ...
+
+ Index fsupc, jptr, jm1ptr, ito, ifrom, istop;
+ Index nsuper = glu.supno(jcol);
+ Index jcolp1 = jcol + 1;
+ Index jcolm1 = jcol - 1;
+
+ // check to see if j belongs in the same supernode as j-1
+ if ( jcol == 0 )
+ { // Do nothing for column 0
+ nsuper = glu.supno(0) = 0 ;
+ }
+ else
+ {
+ fsupc = glu.xsup(nsuper);
+ jptr = glu.xlsub(jcol); // Not yet compressed
+ jm1ptr = glu.xlsub(jcolm1);
+
+ // Use supernodes of type T2 : see SuperLU paper
+ if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU;
+
+ // Make sure the number of columns in a supernode doesn't
+ // exceed threshold
+ if ( (jcol - fsupc) >= maxsuper) jsuper = emptyIdxLU;
+
+ /* If jcol starts a new supernode, reclaim storage space in
+ * glu.lsub from previous supernode. Note we only store
+ * the subscript set of the first and last columns of
+ * a supernode. (first for num values, last for pruning)
+ */
+ if (jsuper == emptyIdxLU)
+ { // starts a new supernode
+ if ( (fsupc < jcolm1-1) )
+ { // >= 3 columns in nsuper
+ ito = glu.xlsub(fsupc+1);
+ glu.xlsub(jcolm1) = ito;
+ istop = ito + jptr - jm1ptr;
+ xprune(jcolm1) = istop; // intialize xprune(jcol-1)
+ glu.xlsub(jcol) = istop;
+
+ for (ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
+ glu.lsub(ito) = glu.lsub(ifrom);
+ nextl = ito; // = istop + length(jcol)
+ }
+ nsuper++;
+ glu.supno(jcol) = nsuper;
+ } // if a new supernode
+ } // end else: jcol > 0
+
+ // Tidy up the pointers before exit
+ glu.xsup(nsuper+1) = jcolp1;
+ glu.supno(jcolp1) = nsuper;
+ xprune(jcol) = nextl; // Intialize upper bound for pruning
+ glu.xlsub(jcolp1) = nextl;
+
+ return 0;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
new file mode 100644
index 00000000000..170610d9f29
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
@@ -0,0 +1,106 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+/*
+
+ * NOTE: This file is the modified version of [s,d,c,z]copy_to_ucol.c file in SuperLU
+
+ * -- SuperLU routine (version 2.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * November 15, 1997
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_COPY_TO_UCOL_H
+#define SPARSELU_COPY_TO_UCOL_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Performs numeric block updates (sup-col) in topological order
+ *
+ * \param jcol current column to update
+ * \param nseg Number of segments in the U part
+ * \param segrep segment representative ...
+ * \param repfnz First nonzero column in each row ...
+ * \param perm_r Row permutation
+ * \param dense Store the full representation of the column
+ * \param glu Global LU data.
+ * \return 0 - successful return
+ * > 0 - number of bytes allocated when run out of space
+ *
+ */
+template <typename Scalar, typename Index>
+Index SparseLUImpl<Scalar,Index>::copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu)
+{
+ Index ksub, krep, ksupno;
+
+ Index jsupno = glu.supno(jcol);
+
+ // For each nonzero supernode segment of U[*,j] in topological order
+ Index k = nseg - 1, i;
+ Index nextu = glu.xusub(jcol);
+ Index kfnz, isub, segsize;
+ Index new_next,irow;
+ Index fsupc, mem;
+ for (ksub = 0; ksub < nseg; ksub++)
+ {
+ krep = segrep(k); k--;
+ ksupno = glu.supno(krep);
+ if (jsupno != ksupno ) // should go into ucol();
+ {
+ kfnz = repfnz(krep);
+ if (kfnz != emptyIdxLU)
+ { // Nonzero U-segment
+ fsupc = glu.xsup(ksupno);
+ isub = glu.xlsub(fsupc) + kfnz - fsupc;
+ segsize = krep - kfnz + 1;
+ new_next = nextu + segsize;
+ while (new_next > glu.nzumax)
+ {
+ mem = memXpand<ScalarVector>(glu.ucol, glu.nzumax, nextu, UCOL, glu.num_expansions);
+ if (mem) return mem;
+ mem = memXpand<IndexVector>(glu.usub, glu.nzumax, nextu, USUB, glu.num_expansions);
+ if (mem) return mem;
+
+ }
+
+ for (i = 0; i < segsize; i++)
+ {
+ irow = glu.lsub(isub);
+ glu.usub(nextu) = perm_r(irow); // Unlike the L part, the U part is stored in its final order
+ glu.ucol(nextu) = dense(irow);
+ dense(irow) = Scalar(0.0);
+ nextu++;
+ isub++;
+ }
+
+ } // end nonzero U-segment
+
+ } // end if jsupno
+
+ } // end for each segment
+ glu.xusub(jcol + 1) = nextu; // close U(*,jcol)
+ return 0;
+}
+
+} // namespace internal
+} // end namespace Eigen
+
+#endif // SPARSELU_COPY_TO_UCOL_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_gemm_kernel.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
new file mode 100644
index 00000000000..9e4e3e72b70
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
@@ -0,0 +1,279 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSELU_GEMM_KERNEL_H
+#define EIGEN_SPARSELU_GEMM_KERNEL_H
+
+namespace Eigen {
+
+namespace internal {
+
+
+/** \internal
+ * A general matrix-matrix product kernel optimized for the SparseLU factorization.
+ * - A, B, and C must be column major
+ * - lda and ldc must be multiples of the respective packet size
+ * - C must have the same alignment as A
+ */
+template<typename Scalar,typename Index>
+EIGEN_DONT_INLINE
+void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc)
+{
+ using namespace Eigen::internal;
+
+ typedef typename packet_traits<Scalar>::type Packet;
+ enum {
+ NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+ PacketSize = packet_traits<Scalar>::size,
+ PM = 8, // peeling in M
+ RN = 2, // register blocking
+ RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking
+ BM = 4096/sizeof(Scalar), // number of rows of A-C per chunk
+ SM = PM*PacketSize // step along M
+ };
+ Index d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking
+ Index n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once
+ Index i0 = internal::first_aligned(A,m);
+
+ eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_aligned(C,m)));
+
+ // handle the non aligned rows of A and C without any optimization:
+ for(Index i=0; i<i0; ++i)
+ {
+ for(Index j=0; j<n; ++j)
+ {
+ Scalar c = C[i+j*ldc];
+ for(Index k=0; k<d; ++k)
+ c += B[k+j*ldb] * A[i+k*lda];
+ C[i+j*ldc] = c;
+ }
+ }
+ // process the remaining rows per chunk of BM rows
+ for(Index ib=i0; ib<m; ib+=BM)
+ {
+ Index actual_b = std::min<Index>(BM, m-ib); // actual number of rows
+ Index actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling
+ Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization
+
+ // Let's process two columns of B-C at once
+ for(Index j=0; j<n_end; j+=RN)
+ {
+ const Scalar* Bc0 = B+(j+0)*ldb;
+ const Scalar* Bc1 = B+(j+1)*ldb;
+
+ for(Index k=0; k<d_end; k+=RK)
+ {
+
+ // load and expand a RN x RK block of B
+ Packet b00, b10, b20, b30, b01, b11, b21, b31;
+ b00 = pset1<Packet>(Bc0[0]);
+ b10 = pset1<Packet>(Bc0[1]);
+ if(RK==4) b20 = pset1<Packet>(Bc0[2]);
+ if(RK==4) b30 = pset1<Packet>(Bc0[3]);
+ b01 = pset1<Packet>(Bc1[0]);
+ b11 = pset1<Packet>(Bc1[1]);
+ if(RK==4) b21 = pset1<Packet>(Bc1[2]);
+ if(RK==4) b31 = pset1<Packet>(Bc1[3]);
+
+ Packet a0, a1, a2, a3, c0, c1, t0, t1;
+
+ const Scalar* A0 = A+ib+(k+0)*lda;
+ const Scalar* A1 = A+ib+(k+1)*lda;
+ const Scalar* A2 = A+ib+(k+2)*lda;
+ const Scalar* A3 = A+ib+(k+3)*lda;
+
+ Scalar* C0 = C+ib+(j+0)*ldc;
+ Scalar* C1 = C+ib+(j+1)*ldc;
+
+ a0 = pload<Packet>(A0);
+ a1 = pload<Packet>(A1);
+ if(RK==4)
+ {
+ a2 = pload<Packet>(A2);
+ a3 = pload<Packet>(A3);
+ }
+ else
+ {
+ // workaround "may be used uninitialized in this function" warning
+ a2 = a3 = a0;
+ }
+
+#define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);}
+#define WORK(I) \
+ c0 = pload<Packet>(C0+i+(I)*PacketSize); \
+ c1 = pload<Packet>(C1+i+(I)*PacketSize); \
+ KMADD(c0, a0, b00, t0) \
+ KMADD(c1, a0, b01, t1) \
+ a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
+ KMADD(c0, a1, b10, t0) \
+ KMADD(c1, a1, b11, t1) \
+ a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
+ if(RK==4) KMADD(c0, a2, b20, t0) \
+ if(RK==4) KMADD(c1, a2, b21, t1) \
+ if(RK==4) a2 = pload<Packet>(A2+i+(I+1)*PacketSize); \
+ if(RK==4) KMADD(c0, a3, b30, t0) \
+ if(RK==4) KMADD(c1, a3, b31, t1) \
+ if(RK==4) a3 = pload<Packet>(A3+i+(I+1)*PacketSize); \
+ pstore(C0+i+(I)*PacketSize, c0); \
+ pstore(C1+i+(I)*PacketSize, c1)
+
+ // process rows of A' - C' with aggressive vectorization and peeling
+ for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
+ {
+ EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1");
+ prefetch((A0+i+(5)*PacketSize));
+ prefetch((A1+i+(5)*PacketSize));
+ if(RK==4) prefetch((A2+i+(5)*PacketSize));
+ if(RK==4) prefetch((A3+i+(5)*PacketSize));
+ WORK(0);
+ WORK(1);
+ WORK(2);
+ WORK(3);
+ WORK(4);
+ WORK(5);
+ WORK(6);
+ WORK(7);
+ }
+ // process the remaining rows with vectorization only
+ for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
+ {
+ WORK(0);
+ }
+#undef WORK
+ // process the remaining rows without vectorization
+ for(Index i=actual_b_end2; i<actual_b; ++i)
+ {
+ if(RK==4)
+ {
+ C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
+ C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3];
+ }
+ else
+ {
+ C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
+ C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1];
+ }
+ }
+
+ Bc0 += RK;
+ Bc1 += RK;
+ } // peeled loop on k
+ } // peeled loop on the columns j
+ // process the last column (we now perform a matrux-vector product)
+ if((n-n_end)>0)
+ {
+ const Scalar* Bc0 = B+(n-1)*ldb;
+
+ for(Index k=0; k<d_end; k+=RK)
+ {
+
+ // load and expand a 1 x RK block of B
+ Packet b00, b10, b20, b30;
+ b00 = pset1<Packet>(Bc0[0]);
+ b10 = pset1<Packet>(Bc0[1]);
+ if(RK==4) b20 = pset1<Packet>(Bc0[2]);
+ if(RK==4) b30 = pset1<Packet>(Bc0[3]);
+
+ Packet a0, a1, a2, a3, c0, t0/*, t1*/;
+
+ const Scalar* A0 = A+ib+(k+0)*lda;
+ const Scalar* A1 = A+ib+(k+1)*lda;
+ const Scalar* A2 = A+ib+(k+2)*lda;
+ const Scalar* A3 = A+ib+(k+3)*lda;
+
+ Scalar* C0 = C+ib+(n_end)*ldc;
+
+ a0 = pload<Packet>(A0);
+ a1 = pload<Packet>(A1);
+ if(RK==4)
+ {
+ a2 = pload<Packet>(A2);
+ a3 = pload<Packet>(A3);
+ }
+ else
+ {
+ // workaround "may be used uninitialized in this function" warning
+ a2 = a3 = a0;
+ }
+
+#define WORK(I) \
+ c0 = pload<Packet>(C0+i+(I)*PacketSize); \
+ KMADD(c0, a0, b00, t0) \
+ a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
+ KMADD(c0, a1, b10, t0) \
+ a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
+ if(RK==4) KMADD(c0, a2, b20, t0) \
+ if(RK==4) a2 = pload<Packet>(A2+i+(I+1)*PacketSize); \
+ if(RK==4) KMADD(c0, a3, b30, t0) \
+ if(RK==4) a3 = pload<Packet>(A3+i+(I+1)*PacketSize); \
+ pstore(C0+i+(I)*PacketSize, c0);
+
+ // agressive vectorization and peeling
+ for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
+ {
+ EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
+ WORK(0);
+ WORK(1);
+ WORK(2);
+ WORK(3);
+ WORK(4);
+ WORK(5);
+ WORK(6);
+ WORK(7);
+ }
+ // vectorization only
+ for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
+ {
+ WORK(0);
+ }
+ // remaining scalars
+ for(Index i=actual_b_end2; i<actual_b; ++i)
+ {
+ if(RK==4)
+ C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
+ else
+ C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
+ }
+
+ Bc0 += RK;
+#undef WORK
+ }
+ }
+
+ // process the last columns of A, corresponding to the last rows of B
+ Index rd = d-d_end;
+ if(rd>0)
+ {
+ for(Index j=0; j<n; ++j)
+ {
+ enum {
+ Alignment = PacketSize>1 ? Aligned : 0
+ };
+ typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector;
+ typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector;
+ if(rd==1) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b);
+
+ else if(rd==2) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
+ + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b);
+
+ else MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
+ + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b)
+ + B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b);
+ }
+ }
+
+ } // blocking on the rows of A and C
+}
+#undef KMADD
+
+} // namespace internal
+
+} // namespace Eigen
+
+#endif // EIGEN_SPARSELU_GEMM_KERNEL_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
new file mode 100644
index 00000000000..7a4e4305aa9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
@@ -0,0 +1,127 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* This file is a modified version of heap_relax_snode.c file in SuperLU
+ * -- SuperLU routine (version 3.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * October 15, 2003
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+
+#ifndef SPARSELU_HEAP_RELAX_SNODE_H
+#define SPARSELU_HEAP_RELAX_SNODE_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Identify the initial relaxed supernodes
+ *
+ * This routine applied to a symmetric elimination tree.
+ * It assumes that the matrix has been reordered according to the postorder of the etree
+ * \param n The number of columns
+ * \param et elimination tree
+ * \param relax_columns Maximum number of columns allowed in a relaxed snode
+ * \param descendants Number of descendants of each node in the etree
+ * \param relax_end last column in a supernode
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end)
+{
+
+ // The etree may not be postordered, but its heap ordered
+ IndexVector post;
+ internal::treePostorder(n, et, post); // Post order etree
+ IndexVector inv_post(n+1);
+ Index i;
+ for (i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
+
+ // Renumber etree in postorder
+ IndexVector iwork(n);
+ IndexVector et_save(n+1);
+ for (i = 0; i < n; ++i)
+ {
+ iwork(post(i)) = post(et(i));
+ }
+ et_save = et; // Save the original etree
+ et = iwork;
+
+ // compute the number of descendants of each node in the etree
+ relax_end.setConstant(emptyIdxLU);
+ Index j, parent;
+ descendants.setZero();
+ for (j = 0; j < n; j++)
+ {
+ parent = et(j);
+ if (parent != n) // not the dummy root
+ descendants(parent) += descendants(j) + 1;
+ }
+ // Identify the relaxed supernodes by postorder traversal of the etree
+ Index snode_start; // beginning of a snode
+ Index k;
+ Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree
+ Index nsuper_et = 0; // Number of relaxed snodes in the original etree
+ Index l;
+ for (j = 0; j < n; )
+ {
+ parent = et(j);
+ snode_start = j;
+ while ( parent != n && descendants(parent) < relax_columns )
+ {
+ j = parent;
+ parent = et(j);
+ }
+ // Found a supernode in postordered etree, j is the last column
+ ++nsuper_et_post;
+ k = n;
+ for (i = snode_start; i <= j; ++i)
+ k = (std::min)(k, inv_post(i));
+ l = inv_post(j);
+ if ( (l - k) == (j - snode_start) ) // Same number of columns in the snode
+ {
+ // This is also a supernode in the original etree
+ relax_end(k) = l; // Record last column
+ ++nsuper_et;
+ }
+ else
+ {
+ for (i = snode_start; i <= j; ++i)
+ {
+ l = inv_post(i);
+ if (descendants(i) == 0)
+ {
+ relax_end(l) = l;
+ ++nsuper_et;
+ }
+ }
+ }
+ j++;
+ // Search for a new leaf
+ while (descendants(j) != 0 && j < n) j++;
+ } // End postorder traversal of the etree
+
+ // Recover the original etree
+ et = et_save;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif // SPARSELU_HEAP_RELAX_SNODE_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
new file mode 100644
index 00000000000..0d0283b132b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
@@ -0,0 +1,130 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef SPARSELU_KERNEL_BMOD_H
+#define SPARSELU_KERNEL_BMOD_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Performs numeric block updates from a given supernode to a single column
+ *
+ * \param segsize Size of the segment (and blocks ) to use for updates
+ * \param[in,out] dense Packed values of the original matrix
+ * \param tempv temporary vector to use for updates
+ * \param lusup array containing the supernodes
+ * \param lda Leading dimension in the supernode
+ * \param nrow Number of rows in the rectangular part of the supernode
+ * \param lsub compressed row subscripts of supernodes
+ * \param lptr pointer to the first column of the current supernode in lsub
+ * \param no_zeros Number of nonzeros elements before the diagonal part of the supernode
+ * \return 0 on success
+ */
+template <int SegSizeAtCompileTime> struct LU_kernel_bmod
+{
+ template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index>
+ static EIGEN_DONT_INLINE void run(const int segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda,
+ const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros);
+};
+
+template <int SegSizeAtCompileTime>
+template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index>
+EIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const int segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda,
+ const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros)
+{
+ typedef typename ScalarVector::Scalar Scalar;
+ // First, copy U[*,j] segment from dense(*) to tempv(*)
+ // The result of triangular solve is in tempv[*];
+ // The result of matric-vector update is in dense[*]
+ Index isub = lptr + no_zeros;
+ int i;
+ Index irow;
+ for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)
+ {
+ irow = lsub(isub);
+ tempv(i) = dense(irow);
+ ++isub;
+ }
+ // Dense triangular solve -- start effective triangle
+ luptr += lda * no_zeros + no_zeros;
+ // Form Eigen matrix and vector
+ Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );
+ Map<Matrix<Scalar,SegSizeAtCompileTime,1> > u(tempv.data(), segsize);
+
+ u = A.template triangularView<UnitLower>().solve(u);
+
+ // Dense matrix-vector product y <-- B*x
+ luptr += segsize;
+ const Index PacketSize = internal::packet_traits<Scalar>::size;
+ Index ldl = internal::first_multiple(nrow, PacketSize);
+ Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
+ Index aligned_offset = internal::first_aligned(tempv.data()+segsize, PacketSize);
+ Index aligned_with_B_offset = (PacketSize-internal::first_aligned(B.data(), PacketSize))%PacketSize;
+ Map<Matrix<Scalar,Dynamic,1>, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) );
+
+ l.setZero();
+ internal::sparselu_gemm<Scalar>(l.rows(), l.cols(), B.cols(), B.data(), B.outerStride(), u.data(), u.outerStride(), l.data(), l.outerStride());
+
+ // Scatter tempv[] into SPA dense[] as a temporary storage
+ isub = lptr + no_zeros;
+ for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)
+ {
+ irow = lsub(isub++);
+ dense(irow) = tempv(i);
+ }
+
+ // Scatter l into SPA dense[]
+ for (i = 0; i < nrow; i++)
+ {
+ irow = lsub(isub++);
+ dense(irow) -= l(i);
+ }
+}
+
+template <> struct LU_kernel_bmod<1>
+{
+ template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index>
+ static EIGEN_DONT_INLINE void run(const int /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr,
+ const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros);
+};
+
+
+template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index>
+EIGEN_DONT_INLINE void LU_kernel_bmod<1>::run(const int /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr,
+ const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros)
+{
+ typedef typename ScalarVector::Scalar Scalar;
+ Scalar f = dense(lsub(lptr + no_zeros));
+ luptr += lda * no_zeros + no_zeros + 1;
+ const Scalar* a(lusup.data() + luptr);
+ const /*typename IndexVector::Scalar*/Index* irow(lsub.data()+lptr + no_zeros + 1);
+ Index i = 0;
+ for (; i+1 < nrow; i+=2)
+ {
+ Index i0 = *(irow++);
+ Index i1 = *(irow++);
+ Scalar a0 = *(a++);
+ Scalar a1 = *(a++);
+ Scalar d0 = dense.coeff(i0);
+ Scalar d1 = dense.coeff(i1);
+ d0 -= f*a0;
+ d1 -= f*a1;
+ dense.coeffRef(i0) = d0;
+ dense.coeffRef(i1) = d1;
+ }
+ if(i<nrow)
+ dense.coeffRef(*(irow++)) -= f * *(a++);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif // SPARSELU_KERNEL_BMOD_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_bmod.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_bmod.h
new file mode 100644
index 00000000000..da0e0fc3c60
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_bmod.h
@@ -0,0 +1,223 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*
+
+ * NOTE: This file is the modified version of [s,d,c,z]panel_bmod.c file in SuperLU
+
+ * -- SuperLU routine (version 3.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * October 15, 2003
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_PANEL_BMOD_H
+#define SPARSELU_PANEL_BMOD_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Performs numeric block updates (sup-panel) in topological order.
+ *
+ * Before entering this routine, the original nonzeros in the panel
+ * were already copied i nto the spa[m,w]
+ *
+ * \param m number of rows in the matrix
+ * \param w Panel size
+ * \param jcol Starting column of the panel
+ * \param nseg Number of segments in the U part
+ * \param dense Store the full representation of the panel
+ * \param tempv working array
+ * \param segrep segment representative... first row in the segment
+ * \param repfnz First nonzero rows
+ * \param glu Global LU data.
+ *
+ *
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::panel_bmod(const Index m, const Index w, const Index jcol,
+ const Index nseg, ScalarVector& dense, ScalarVector& tempv,
+ IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu)
+{
+
+ Index ksub,jj,nextl_col;
+ Index fsupc, nsupc, nsupr, nrow;
+ Index krep, kfnz;
+ Index lptr; // points to the row subscripts of a supernode
+ Index luptr; // ...
+ Index segsize,no_zeros ;
+ // For each nonz supernode segment of U[*,j] in topological order
+ Index k = nseg - 1;
+ const Index PacketSize = internal::packet_traits<Scalar>::size;
+
+ for (ksub = 0; ksub < nseg; ksub++)
+ { // For each updating supernode
+ /* krep = representative of current k-th supernode
+ * fsupc = first supernodal column
+ * nsupc = number of columns in a supernode
+ * nsupr = number of rows in a supernode
+ */
+ krep = segrep(k); k--;
+ fsupc = glu.xsup(glu.supno(krep));
+ nsupc = krep - fsupc + 1;
+ nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
+ nrow = nsupr - nsupc;
+ lptr = glu.xlsub(fsupc);
+
+ // loop over the panel columns to detect the actual number of columns and rows
+ Index u_rows = 0;
+ Index u_cols = 0;
+ for (jj = jcol; jj < jcol + w; jj++)
+ {
+ nextl_col = (jj-jcol) * m;
+ VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
+
+ kfnz = repfnz_col(krep);
+ if ( kfnz == emptyIdxLU )
+ continue; // skip any zero segment
+
+ segsize = krep - kfnz + 1;
+ u_cols++;
+ u_rows = (std::max)(segsize,u_rows);
+ }
+
+ if(nsupc >= 2)
+ {
+ Index ldu = internal::first_multiple<Index>(u_rows, PacketSize);
+ Map<Matrix<Scalar,Dynamic,Dynamic>, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
+
+ // gather U
+ Index u_col = 0;
+ for (jj = jcol; jj < jcol + w; jj++)
+ {
+ nextl_col = (jj-jcol) * m;
+ VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
+ VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
+
+ kfnz = repfnz_col(krep);
+ if ( kfnz == emptyIdxLU )
+ continue; // skip any zero segment
+
+ segsize = krep - kfnz + 1;
+ luptr = glu.xlusup(fsupc);
+ no_zeros = kfnz - fsupc;
+
+ Index isub = lptr + no_zeros;
+ Index off = u_rows-segsize;
+ for (Index i = 0; i < off; i++) U(i,u_col) = 0;
+ for (Index i = 0; i < segsize; i++)
+ {
+ Index irow = glu.lsub(isub);
+ U(i+off,u_col) = dense_col(irow);
+ ++isub;
+ }
+ u_col++;
+ }
+ // solve U = A^-1 U
+ luptr = glu.xlusup(fsupc);
+ Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);
+ no_zeros = (krep - u_rows + 1) - fsupc;
+ luptr += lda * no_zeros + no_zeros;
+ Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
+ U = A.template triangularView<UnitLower>().solve(U);
+
+ // update
+ luptr += u_rows;
+ Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
+ eigen_assert(tempv.size()>w*ldu + nrow*w + 1);
+
+ Index ldl = internal::first_multiple<Index>(nrow, PacketSize);
+ Index offset = (PacketSize-internal::first_aligned(B.data(), PacketSize)) % PacketSize;
+ Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
+
+ L.setZero();
+ internal::sparselu_gemm<Scalar>(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride());
+
+ // scatter U and L
+ u_col = 0;
+ for (jj = jcol; jj < jcol + w; jj++)
+ {
+ nextl_col = (jj-jcol) * m;
+ VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
+ VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
+
+ kfnz = repfnz_col(krep);
+ if ( kfnz == emptyIdxLU )
+ continue; // skip any zero segment
+
+ segsize = krep - kfnz + 1;
+ no_zeros = kfnz - fsupc;
+ Index isub = lptr + no_zeros;
+
+ Index off = u_rows-segsize;
+ for (Index i = 0; i < segsize; i++)
+ {
+ Index irow = glu.lsub(isub++);
+ dense_col(irow) = U.coeff(i+off,u_col);
+ U.coeffRef(i+off,u_col) = 0;
+ }
+
+ // Scatter l into SPA dense[]
+ for (Index i = 0; i < nrow; i++)
+ {
+ Index irow = glu.lsub(isub++);
+ dense_col(irow) -= L.coeff(i,u_col);
+ L.coeffRef(i,u_col) = 0;
+ }
+ u_col++;
+ }
+ }
+ else // level 2 only
+ {
+ // Sequence through each column in the panel
+ for (jj = jcol; jj < jcol + w; jj++)
+ {
+ nextl_col = (jj-jcol) * m;
+ VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
+ VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
+
+ kfnz = repfnz_col(krep);
+ if ( kfnz == emptyIdxLU )
+ continue; // skip any zero segment
+
+ segsize = krep - kfnz + 1;
+ luptr = glu.xlusup(fsupc);
+
+ Index lda = glu.xlusup(fsupc+1)-glu.xlusup(fsupc);// nsupr
+
+ // Perform a trianglar solve and block update,
+ // then scatter the result of sup-col update to dense[]
+ no_zeros = kfnz - fsupc;
+ if(segsize==1) LU_kernel_bmod<1>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+ else if(segsize==2) LU_kernel_bmod<2>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+ else if(segsize==3) LU_kernel_bmod<3>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+ else LU_kernel_bmod<Dynamic>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+ } // End for each column in the panel
+ }
+
+ } // End for each updating supernode
+} // end panel bmod
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // SPARSELU_PANEL_BMOD_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_dfs.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_dfs.h
new file mode 100644
index 00000000000..dc0054efd25
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_dfs.h
@@ -0,0 +1,258 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*
+
+ * NOTE: This file is the modified version of [s,d,c,z]panel_dfs.c file in SuperLU
+
+ * -- SuperLU routine (version 2.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * November 15, 1997
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_PANEL_DFS_H
+#define SPARSELU_PANEL_DFS_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename IndexVector>
+struct panel_dfs_traits
+{
+ typedef typename IndexVector::Scalar Index;
+ panel_dfs_traits(Index jcol, Index* marker)
+ : m_jcol(jcol), m_marker(marker)
+ {}
+ bool update_segrep(Index krep, Index jj)
+ {
+ if(m_marker[krep]<m_jcol)
+ {
+ m_marker[krep] = jj;
+ return true;
+ }
+ return false;
+ }
+ void mem_expand(IndexVector& /*glu.lsub*/, Index /*nextl*/, Index /*chmark*/) {}
+ enum { ExpandMem = false };
+ Index m_jcol;
+ Index* m_marker;
+};
+
+
+template <typename Scalar, typename Index>
+template <typename Traits>
+void SparseLUImpl<Scalar,Index>::dfs_kernel(const Index jj, IndexVector& perm_r,
+ Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
+ Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
+ IndexVector& xplore, GlobalLU_t& glu,
+ Index& nextl_col, Index krow, Traits& traits
+ )
+{
+
+ Index kmark = marker(krow);
+
+ // For each unmarked krow of jj
+ marker(krow) = jj;
+ Index kperm = perm_r(krow);
+ if (kperm == emptyIdxLU ) {
+ // krow is in L : place it in structure of L(*, jj)
+ panel_lsub(nextl_col++) = krow; // krow is indexed into A
+
+ traits.mem_expand(panel_lsub, nextl_col, kmark);
+ }
+ else
+ {
+ // krow is in U : if its supernode-representative krep
+ // has been explored, update repfnz(*)
+ // krep = supernode representative of the current row
+ Index krep = glu.xsup(glu.supno(kperm)+1) - 1;
+ // First nonzero element in the current column:
+ Index myfnz = repfnz_col(krep);
+
+ if (myfnz != emptyIdxLU )
+ {
+ // Representative visited before
+ if (myfnz > kperm ) repfnz_col(krep) = kperm;
+
+ }
+ else
+ {
+ // Otherwise, perform dfs starting at krep
+ Index oldrep = emptyIdxLU;
+ parent(krep) = oldrep;
+ repfnz_col(krep) = kperm;
+ Index xdfs = glu.xlsub(krep);
+ Index maxdfs = xprune(krep);
+
+ Index kpar;
+ do
+ {
+ // For each unmarked kchild of krep
+ while (xdfs < maxdfs)
+ {
+ Index kchild = glu.lsub(xdfs);
+ xdfs++;
+ Index chmark = marker(kchild);
+
+ if (chmark != jj )
+ {
+ marker(kchild) = jj;
+ Index chperm = perm_r(kchild);
+
+ if (chperm == emptyIdxLU)
+ {
+ // case kchild is in L: place it in L(*, j)
+ panel_lsub(nextl_col++) = kchild;
+ traits.mem_expand(panel_lsub, nextl_col, chmark);
+ }
+ else
+ {
+ // case kchild is in U :
+ // chrep = its supernode-rep. If its rep has been explored,
+ // update its repfnz(*)
+ Index chrep = glu.xsup(glu.supno(chperm)+1) - 1;
+ myfnz = repfnz_col(chrep);
+
+ if (myfnz != emptyIdxLU)
+ { // Visited before
+ if (myfnz > chperm)
+ repfnz_col(chrep) = chperm;
+ }
+ else
+ { // Cont. dfs at snode-rep of kchild
+ xplore(krep) = xdfs;
+ oldrep = krep;
+ krep = chrep; // Go deeper down G(L)
+ parent(krep) = oldrep;
+ repfnz_col(krep) = chperm;
+ xdfs = glu.xlsub(krep);
+ maxdfs = xprune(krep);
+
+ } // end if myfnz != -1
+ } // end if chperm == -1
+
+ } // end if chmark !=jj
+ } // end while xdfs < maxdfs
+
+ // krow has no more unexplored nbrs :
+ // Place snode-rep krep in postorder DFS, if this
+ // segment is seen for the first time. (Note that
+ // "repfnz(krep)" may change later.)
+ // Baktrack dfs to its parent
+ if(traits.update_segrep(krep,jj))
+ //if (marker1(krep) < jcol )
+ {
+ segrep(nseg) = krep;
+ ++nseg;
+ //marker1(krep) = jj;
+ }
+
+ kpar = parent(krep); // Pop recursion, mimic recursion
+ if (kpar == emptyIdxLU)
+ break; // dfs done
+ krep = kpar;
+ xdfs = xplore(krep);
+ maxdfs = xprune(krep);
+
+ } while (kpar != emptyIdxLU); // Do until empty stack
+
+ } // end if (myfnz = -1)
+
+ } // end if (kperm == -1)
+}
+
+/**
+ * \brief Performs a symbolic factorization on a panel of columns [jcol, jcol+w)
+ *
+ * A supernode representative is the last column of a supernode.
+ * The nonzeros in U[*,j] are segments that end at supernodes representatives
+ *
+ * The routine returns a list of the supernodal representatives
+ * in topological order of the dfs that generates them. This list is
+ * a superset of the topological order of each individual column within
+ * the panel.
+ * The location of the first nonzero in each supernodal segment
+ * (supernodal entry location) is also returned. Each column has
+ * a separate list for this purpose.
+ *
+ * Two markers arrays are used for dfs :
+ * marker[i] == jj, if i was visited during dfs of current column jj;
+ * marker1[i] >= jcol, if i was visited by earlier columns in this panel;
+ *
+ * \param[in] m number of rows in the matrix
+ * \param[in] w Panel size
+ * \param[in] jcol Starting column of the panel
+ * \param[in] A Input matrix in column-major storage
+ * \param[in] perm_r Row permutation
+ * \param[out] nseg Number of U segments
+ * \param[out] dense Accumulate the column vectors of the panel
+ * \param[out] panel_lsub Subscripts of the row in the panel
+ * \param[out] segrep Segment representative i.e first nonzero row of each segment
+ * \param[out] repfnz First nonzero location in each row
+ * \param[out] xprune The pruned elimination tree
+ * \param[out] marker work vector
+ * \param parent The elimination tree
+ * \param xplore work vector
+ * \param glu The global data structure
+ *
+ */
+
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
+{
+ Index nextl_col; // Next available position in panel_lsub[*,jj]
+
+ // Initialize pointers
+ VectorBlock<IndexVector> marker1(marker, m, m);
+ nseg = 0;
+
+ panel_dfs_traits<IndexVector> traits(jcol, marker1.data());
+
+ // For each column in the panel
+ for (Index jj = jcol; jj < jcol + w; jj++)
+ {
+ nextl_col = (jj - jcol) * m;
+
+ VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero location in each row
+ VectorBlock<ScalarVector> dense_col(dense,nextl_col, m); // Accumulate a column vector here
+
+
+ // For each nnz in A[*, jj] do depth first search
+ for (typename MatrixType::InnerIterator it(A, jj); it; ++it)
+ {
+ Index krow = it.row();
+ dense_col(krow) = it.value();
+
+ Index kmark = marker(krow);
+ if (kmark == jj)
+ continue; // krow visited before, go to the next nonzero
+
+ dfs_kernel(jj, perm_r, nseg, panel_lsub, segrep, repfnz_col, xprune, marker, parent,
+ xplore, glu, nextl_col, krow, traits);
+ }// end for nonzeros in column jj
+
+ } // end for column jj
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // SPARSELU_PANEL_DFS_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h
new file mode 100644
index 00000000000..ddcd4ec98f8
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h
@@ -0,0 +1,134 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*
+
+ * NOTE: This file is the modified version of xpivotL.c file in SuperLU
+
+ * -- SuperLU routine (version 3.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * October 15, 2003
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_PIVOTL_H
+#define SPARSELU_PIVOTL_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Performs the numerical pivotin on the current column of L, and the CDIV operation.
+ *
+ * Pivot policy :
+ * (1) Compute thresh = u * max_(i>=j) abs(A_ij);
+ * (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
+ * pivot row = k;
+ * ELSE IF abs(A_jj) >= thresh THEN
+ * pivot row = j;
+ * ELSE
+ * pivot row = m;
+ *
+ * Note: If you absolutely want to use a given pivot order, then set u=0.0.
+ *
+ * \param jcol The current column of L
+ * \param diagpivotthresh diagonal pivoting threshold
+ * \param[in,out] perm_r Row permutation (threshold pivoting)
+ * \param[in] iperm_c column permutation - used to finf diagonal of Pc*A*Pc'
+ * \param[out] pivrow The pivot row
+ * \param glu Global LU data
+ * \return 0 if success, i > 0 if U(i,i) is exactly zero
+ *
+ */
+template <typename Scalar, typename Index>
+Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu)
+{
+
+ Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol
+ Index nsupc = jcol - fsupc; // Number of columns in the supernode portion, excluding jcol; nsupc >=0
+ Index lptr = glu.xlsub(fsupc); // pointer to the starting location of the row subscripts for this supernode portion
+ Index nsupr = glu.xlsub(fsupc+1) - lptr; // Number of rows in the supernode
+ Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); // leading dimension
+ Scalar* lu_sup_ptr = &(glu.lusup.data()[glu.xlusup(fsupc)]); // Start of the current supernode
+ Scalar* lu_col_ptr = &(glu.lusup.data()[glu.xlusup(jcol)]); // Start of jcol in the supernode
+ Index* lsub_ptr = &(glu.lsub.data()[lptr]); // Start of row indices of the supernode
+
+ // Determine the largest abs numerical value for partial pivoting
+ Index diagind = iperm_c(jcol); // diagonal index
+ RealScalar pivmax = 0.0;
+ Index pivptr = nsupc;
+ Index diag = emptyIdxLU;
+ RealScalar rtemp;
+ Index isub, icol, itemp, k;
+ for (isub = nsupc; isub < nsupr; ++isub) {
+ rtemp = std::abs(lu_col_ptr[isub]);
+ if (rtemp > pivmax) {
+ pivmax = rtemp;
+ pivptr = isub;
+ }
+ if (lsub_ptr[isub] == diagind) diag = isub;
+ }
+
+ // Test for singularity
+ if ( pivmax == 0.0 ) {
+ pivrow = lsub_ptr[pivptr];
+ perm_r(pivrow) = jcol;
+ return (jcol+1);
+ }
+
+ RealScalar thresh = diagpivotthresh * pivmax;
+
+ // Choose appropriate pivotal element
+
+ {
+ // Test if the diagonal element can be used as a pivot (given the threshold value)
+ if (diag >= 0 )
+ {
+ // Diagonal element exists
+ rtemp = std::abs(lu_col_ptr[diag]);
+ if (rtemp != 0.0 && rtemp >= thresh) pivptr = diag;
+ }
+ pivrow = lsub_ptr[pivptr];
+ }
+
+ // Record pivot row
+ perm_r(pivrow) = jcol;
+ // Interchange row subscripts
+ if (pivptr != nsupc )
+ {
+ std::swap( lsub_ptr[pivptr], lsub_ptr[nsupc] );
+ // Interchange numerical values as well, for the two rows in the whole snode
+ // such that L is indexed the same way as A
+ for (icol = 0; icol <= nsupc; icol++)
+ {
+ itemp = pivptr + icol * lda;
+ std::swap(lu_sup_ptr[itemp], lu_sup_ptr[nsupc + icol * lda]);
+ }
+ }
+ // cdiv operations
+ Scalar temp = Scalar(1.0) / lu_col_ptr[nsupc];
+ for (k = nsupc+1; k < nsupr; k++)
+ lu_col_ptr[k] *= temp;
+ return 0;
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // SPARSELU_PIVOTL_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_pruneL.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_pruneL.h
new file mode 100644
index 00000000000..66460d16884
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_pruneL.h
@@ -0,0 +1,135 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*
+
+ * NOTE: This file is the modified version of [s,d,c,z]pruneL.c file in SuperLU
+
+ * -- SuperLU routine (version 2.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * November 15, 1997
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_PRUNEL_H
+#define SPARSELU_PRUNEL_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Prunes the L-structure.
+ *
+ * It prunes the L-structure of supernodes whose L-structure contains the current pivot row "pivrow"
+ *
+ *
+ * \param jcol The current column of L
+ * \param[in] perm_r Row permutation
+ * \param[out] pivrow The pivot row
+ * \param nseg Number of segments
+ * \param segrep
+ * \param repfnz
+ * \param[out] xprune
+ * \param glu Global LU data
+ *
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu)
+{
+ // For each supernode-rep irep in U(*,j]
+ Index jsupno = glu.supno(jcol);
+ Index i,irep,irep1;
+ bool movnum, do_prune = false;
+ Index kmin = 0, kmax = 0, minloc, maxloc,krow;
+ for (i = 0; i < nseg; i++)
+ {
+ irep = segrep(i);
+ irep1 = irep + 1;
+ do_prune = false;
+
+ // Don't prune with a zero U-segment
+ if (repfnz(irep) == emptyIdxLU) continue;
+
+ // If a snode overlaps with the next panel, then the U-segment
+ // is fragmented into two parts -- irep and irep1. We should let
+ // pruning occur at the rep-column in irep1s snode.
+ if (glu.supno(irep) == glu.supno(irep1) ) continue; // don't prune
+
+ // If it has not been pruned & it has a nonz in row L(pivrow,i)
+ if (glu.supno(irep) != jsupno )
+ {
+ if ( xprune (irep) >= glu.xlsub(irep1) )
+ {
+ kmin = glu.xlsub(irep);
+ kmax = glu.xlsub(irep1) - 1;
+ for (krow = kmin; krow <= kmax; krow++)
+ {
+ if (glu.lsub(krow) == pivrow)
+ {
+ do_prune = true;
+ break;
+ }
+ }
+ }
+
+ if (do_prune)
+ {
+ // do a quicksort-type partition
+ // movnum=true means that the num values have to be exchanged
+ movnum = false;
+ if (irep == glu.xsup(glu.supno(irep)) ) // Snode of size 1
+ movnum = true;
+
+ while (kmin <= kmax)
+ {
+ if (perm_r(glu.lsub(kmax)) == emptyIdxLU)
+ kmax--;
+ else if ( perm_r(glu.lsub(kmin)) != emptyIdxLU)
+ kmin++;
+ else
+ {
+ // kmin below pivrow (not yet pivoted), and kmax
+ // above pivrow: interchange the two suscripts
+ std::swap(glu.lsub(kmin), glu.lsub(kmax));
+
+ // If the supernode has only one column, then we
+ // only keep one set of subscripts. For any subscript
+ // intercnahge performed, similar interchange must be
+ // done on the numerical values.
+ if (movnum)
+ {
+ minloc = glu.xlusup(irep) + ( kmin - glu.xlsub(irep) );
+ maxloc = glu.xlusup(irep) + ( kmax - glu.xlsub(irep) );
+ std::swap(glu.lusup(minloc), glu.lusup(maxloc));
+ }
+ kmin++;
+ kmax--;
+ }
+ } // end while
+
+ xprune(irep) = kmin; //Pruning
+ } // end if do_prune
+ } // end pruning
+ } // End for each U-segment
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // SPARSELU_PRUNEL_H
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_relax_snode.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_relax_snode.h
new file mode 100644
index 00000000000..58ec32e27e9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_relax_snode.h
@@ -0,0 +1,83 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* This file is a modified version of heap_relax_snode.c file in SuperLU
+ * -- SuperLU routine (version 3.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * October 15, 2003
+ *
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+
+#ifndef SPARSELU_RELAX_SNODE_H
+#define SPARSELU_RELAX_SNODE_H
+
+namespace Eigen {
+
+namespace internal {
+
+/**
+ * \brief Identify the initial relaxed supernodes
+ *
+ * This routine is applied to a column elimination tree.
+ * It assumes that the matrix has been reordered according to the postorder of the etree
+ * \param n the number of columns
+ * \param et elimination tree
+ * \param relax_columns Maximum number of columns allowed in a relaxed snode
+ * \param descendants Number of descendants of each node in the etree
+ * \param relax_end last column in a supernode
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end)
+{
+
+ // compute the number of descendants of each node in the etree
+ Index j, parent;
+ relax_end.setConstant(emptyIdxLU);
+ descendants.setZero();
+ for (j = 0; j < n; j++)
+ {
+ parent = et(j);
+ if (parent != n) // not the dummy root
+ descendants(parent) += descendants(j) + 1;
+ }
+ // Identify the relaxed supernodes by postorder traversal of the etree
+ Index snode_start; // beginning of a snode
+ for (j = 0; j < n; )
+ {
+ parent = et(j);
+ snode_start = j;
+ while ( parent != n && descendants(parent) < relax_columns )
+ {
+ j = parent;
+ parent = et(j);
+ }
+ // Found a supernode in postordered etree, j is the last column
+ relax_end(snode_start) = j; // Record last column
+ j++;
+ // Search for a new leaf
+ while (descendants(j) != 0 && j < n) j++;
+ } // End postorder traversal of the etree
+
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif
diff --git a/extern/Eigen3/Eigen/src/SparseQR/SparseQR.h b/extern/Eigen3/Eigen/src/SparseQR/SparseQR.h
new file mode 100644
index 00000000000..afda43bfc67
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseQR/SparseQR.h
@@ -0,0 +1,657 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012-2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012-2013 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_QR_H
+#define EIGEN_SPARSE_QR_H
+
+namespace Eigen {
+
+template<typename MatrixType, typename OrderingType> class SparseQR;
+template<typename SparseQRType> struct SparseQRMatrixQReturnType;
+template<typename SparseQRType> struct SparseQRMatrixQTransposeReturnType;
+template<typename SparseQRType, typename Derived> struct SparseQR_QProduct;
+namespace internal {
+ template <typename SparseQRType> struct traits<SparseQRMatrixQReturnType<SparseQRType> >
+ {
+ typedef typename SparseQRType::MatrixType ReturnType;
+ typedef typename ReturnType::Index Index;
+ typedef typename ReturnType::StorageKind StorageKind;
+ };
+ template <typename SparseQRType> struct traits<SparseQRMatrixQTransposeReturnType<SparseQRType> >
+ {
+ typedef typename SparseQRType::MatrixType ReturnType;
+ };
+ template <typename SparseQRType, typename Derived> struct traits<SparseQR_QProduct<SparseQRType, Derived> >
+ {
+ typedef typename Derived::PlainObject ReturnType;
+ };
+} // End namespace internal
+
+/**
+ * \ingroup SparseQR_Module
+ * \class SparseQR
+ * \brief Sparse left-looking rank-revealing QR factorization
+ *
+ * This class implements a left-looking rank-revealing QR decomposition
+ * of sparse matrices. When a column has a norm less than a given tolerance
+ * it is implicitly permuted to the end. The QR factorization thus obtained is
+ * given by A*P = Q*R where R is upper triangular or trapezoidal.
+ *
+ * P is the column permutation which is the product of the fill-reducing and the
+ * rank-revealing permutations. Use colsPermutation() to get it.
+ *
+ * Q is the orthogonal matrix represented as products of Householder reflectors.
+ * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.
+ * You can then apply it to a vector.
+ *
+ * R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient.
+ * matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank.
+ *
+ * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
+ * \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module
+ * OrderingMethods \endlink module for the list of built-in and external ordering methods.
+ *
+ *
+ */
+template<typename _MatrixType, typename _OrderingType>
+class SparseQR
+{
+ public:
+ typedef _MatrixType MatrixType;
+ typedef _OrderingType OrderingType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef SparseMatrix<Scalar,ColMajor,Index> QRMatrixType;
+ typedef Matrix<Index, Dynamic, 1> IndexVector;
+ typedef Matrix<Scalar, Dynamic, 1> ScalarVector;
+ typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+ public:
+ SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false)
+ { }
+
+ SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false)
+ {
+ compute(mat);
+ }
+ void compute(const MatrixType& mat)
+ {
+ analyzePattern(mat);
+ factorize(mat);
+ }
+ void analyzePattern(const MatrixType& mat);
+ void factorize(const MatrixType& mat);
+
+ /** \returns the number of rows of the represented matrix.
+ */
+ inline Index rows() const { return m_pmat.rows(); }
+
+ /** \returns the number of columns of the represented matrix.
+ */
+ inline Index cols() const { return m_pmat.cols();}
+
+ /** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization.
+ */
+ const QRMatrixType& matrixR() const { return m_R; }
+
+ /** \returns the number of non linearly dependent columns as determined by the pivoting threshold.
+ *
+ * \sa setPivotThreshold()
+ */
+ Index rank() const
+ {
+ eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
+ return m_nonzeropivots;
+ }
+
+ /** \returns an expression of the matrix Q as products of sparse Householder reflectors.
+ * The common usage of this function is to apply it to a dense matrix or vector
+ * \code
+ * VectorXd B1, B2;
+ * // Initialize B1
+ * B2 = matrixQ() * B1;
+ * \endcode
+ *
+ * To get a plain SparseMatrix representation of Q:
+ * \code
+ * SparseMatrix<double> Q;
+ * Q = SparseQR<SparseMatrix<double> >(A).matrixQ();
+ * \endcode
+ * Internally, this call simply performs a sparse product between the matrix Q
+ * and a sparse identity matrix. However, due to the fact that the sparse
+ * reflectors are stored unsorted, two transpositions are needed to sort
+ * them before performing the product.
+ */
+ SparseQRMatrixQReturnType<SparseQR> matrixQ() const
+ { return SparseQRMatrixQReturnType<SparseQR>(*this); }
+
+ /** \returns a const reference to the column permutation P that was applied to A such that A*P = Q*R
+ * It is the combination of the fill-in reducing permutation and numerical column pivoting.
+ */
+ const PermutationType& colsPermutation() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_outputPerm_c;
+ }
+
+ /** \returns A string describing the type of error.
+ * This method is provided to ease debugging, not to handle errors.
+ */
+ std::string lastErrorMessage() const { return m_lastError; }
+
+ /** \internal */
+ template<typename Rhs, typename Dest>
+ bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &dest) const
+ {
+ eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
+ eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
+
+ Index rank = this->rank();
+
+ // Compute Q^T * b;
+ typename Dest::PlainObject y, b;
+ y = this->matrixQ().transpose() * B;
+ b = y;
+
+ // Solve with the triangular matrix R
+ y.resize((std::max)(cols(),Index(y.rows())),y.cols());
+ y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank));
+ y.bottomRows(y.rows()-rank).setZero();
+
+ // Apply the column permutation
+ if (m_perm_c.size()) dest.topRows(cols()) = colsPermutation() * y.topRows(cols());
+ else dest = y.topRows(cols());
+
+ m_info = Success;
+ return true;
+ }
+
+
+ /** Sets the threshold that is used to determine linearly dependent columns during the factorization.
+ *
+ * In practice, if during the factorization the norm of the column that has to be eliminated is below
+ * this threshold, then the entire column is treated as zero, and it is moved at the end.
+ */
+ void setPivotThreshold(const RealScalar& threshold)
+ {
+ m_useDefaultThreshold = false;
+ m_threshold = threshold;
+ }
+
+ /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<SparseQR, Rhs> solve(const MatrixBase<Rhs>& B) const
+ {
+ eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
+ eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
+ return internal::solve_retval<SparseQR, Rhs>(*this, B.derived());
+ }
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<SparseQR, Rhs> solve(const SparseMatrixBase<Rhs>& B) const
+ {
+ eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
+ eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
+ return internal::sparse_solve_retval<SparseQR, Rhs>(*this, B.derived());
+ }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the QR factorization reports a numerical problem
+ * \c InvalidInput if the input matrix is invalid
+ *
+ * \sa iparm()
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+ return m_info;
+ }
+
+ protected:
+ inline void sort_matrix_Q()
+ {
+ if(this->m_isQSorted) return;
+ // The matrix Q is sorted during the transposition
+ SparseMatrix<Scalar, RowMajor, Index> mQrm(this->m_Q);
+ this->m_Q = mQrm;
+ this->m_isQSorted = true;
+ }
+
+
+ protected:
+ bool m_isInitialized;
+ bool m_analysisIsok;
+ bool m_factorizationIsok;
+ mutable ComputationInfo m_info;
+ std::string m_lastError;
+ QRMatrixType m_pmat; // Temporary matrix
+ QRMatrixType m_R; // The triangular factor matrix
+ QRMatrixType m_Q; // The orthogonal reflectors
+ ScalarVector m_hcoeffs; // The Householder coefficients
+ PermutationType m_perm_c; // Fill-reducing Column permutation
+ PermutationType m_pivotperm; // The permutation for rank revealing
+ PermutationType m_outputPerm_c; // The final column permutation
+ RealScalar m_threshold; // Threshold to determine null Householder reflections
+ bool m_useDefaultThreshold; // Use default threshold
+ Index m_nonzeropivots; // Number of non zero pivots found
+ IndexVector m_etree; // Column elimination tree
+ IndexVector m_firstRowElt; // First element in each row
+ bool m_isQSorted; // whether Q is sorted or not
+
+ template <typename, typename > friend struct SparseQR_QProduct;
+ template <typename > friend struct SparseQRMatrixQReturnType;
+
+};
+
+/** \brief Preprocessing step of a QR factorization
+ *
+ * In this step, the fill-reducing permutation is computed and applied to the columns of A
+ * and the column elimination tree is computed as well. Only the sparcity pattern of \a mat is exploited.
+ *
+ * \note In this step it is assumed that there is no empty row in the matrix \a mat.
+ */
+template <typename MatrixType, typename OrderingType>
+void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
+{
+ // Compute the column fill reducing ordering
+ OrderingType ord;
+ ord(mat, m_perm_c);
+ Index n = mat.cols();
+ Index m = mat.rows();
+
+ if (!m_perm_c.size())
+ {
+ m_perm_c.resize(n);
+ m_perm_c.indices().setLinSpaced(n, 0,n-1);
+ }
+
+ // Compute the column elimination tree of the permuted matrix
+ m_outputPerm_c = m_perm_c.inverse();
+ internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
+
+ m_R.resize(n, n);
+ m_Q.resize(m, n);
+
+ // Allocate space for nonzero elements : rough estimation
+ m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
+ m_Q.reserve(2*mat.nonZeros());
+ m_hcoeffs.resize(n);
+ m_analysisIsok = true;
+}
+
+/** \brief Performs the numerical QR factorization of the input matrix
+ *
+ * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with
+ * a matrix having the same sparcity pattern than \a mat.
+ *
+ * \param mat The sparse column-major matrix
+ */
+template <typename MatrixType, typename OrderingType>
+void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
+{
+ using std::abs;
+ using std::max;
+
+ eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
+ Index m = mat.rows();
+ Index n = mat.cols();
+ IndexVector mark(m); mark.setConstant(-1); // Record the visited nodes
+ IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q
+ Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
+ ScalarVector tval(m); // The dense vector used to compute the current column
+ bool found_diag;
+
+ m_pmat = mat;
+ m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
+ // Apply the fill-in reducing permutation lazily:
+ for (int i = 0; i < n; i++)
+ {
+ Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
+ m_pmat.outerIndexPtr()[p] = mat.outerIndexPtr()[i];
+ m_pmat.innerNonZeroPtr()[p] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i];
+ }
+
+ /* Compute the default threshold, see :
+ * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
+ * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
+ */
+ if(m_useDefaultThreshold)
+ {
+ RealScalar max2Norm = 0.0;
+ for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm());
+ m_threshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
+ }
+
+ // Initialize the numerical permutation
+ m_pivotperm.setIdentity(n);
+
+ Index nonzeroCol = 0; // Record the number of valid pivots
+
+ // Left looking rank-revealing QR factorization: compute a column of R and Q at a time
+ for (Index col = 0; col < (std::min)(n,m); ++col)
+ {
+ mark.setConstant(-1);
+ m_R.startVec(col);
+ m_Q.startVec(col);
+ mark(nonzeroCol) = col;
+ Qidx(0) = nonzeroCol;
+ nzcolR = 0; nzcolQ = 1;
+ found_diag = col>=m;
+ tval.setZero();
+
+ // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,
+ // all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k.
+ // Note: if the diagonal entry does not exist, then its contribution must be explicitly added,
+ // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found.
+ for (typename MatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
+ {
+ Index curIdx = nonzeroCol ;
+ if(itp) curIdx = itp.row();
+ if(curIdx == nonzeroCol) found_diag = true;
+
+ // Get the nonzeros indexes of the current column of R
+ Index st = m_firstRowElt(curIdx); // The traversal of the etree starts here
+ if (st < 0 )
+ {
+ m_lastError = "Empty row found during numerical factorization";
+ m_info = InvalidInput;
+ return;
+ }
+
+ // Traverse the etree
+ Index bi = nzcolR;
+ for (; mark(st) != col; st = m_etree(st))
+ {
+ Ridx(nzcolR) = st; // Add this row to the list,
+ mark(st) = col; // and mark this row as visited
+ nzcolR++;
+ }
+
+ // Reverse the list to get the topological ordering
+ Index nt = nzcolR-bi;
+ for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1));
+
+ // Copy the current (curIdx,pcol) value of the input matrix
+ if(itp) tval(curIdx) = itp.value();
+ else tval(curIdx) = Scalar(0);
+
+ // Compute the pattern of Q(:,k)
+ if(curIdx > nonzeroCol && mark(curIdx) != col )
+ {
+ Qidx(nzcolQ) = curIdx; // Add this row to the pattern of Q,
+ mark(curIdx) = col; // and mark it as visited
+ nzcolQ++;
+ }
+ }
+
+ // Browse all the indexes of R(:,col) in reverse order
+ for (Index i = nzcolR-1; i >= 0; i--)
+ {
+ Index curIdx = m_pivotperm.indices()(Ridx(i));
+
+ // Apply the curIdx-th householder vector to the current column (temporarily stored into tval)
+ Scalar tdot(0);
+
+ // First compute q' * tval
+ tdot = m_Q.col(curIdx).dot(tval);
+
+ tdot *= m_hcoeffs(curIdx);
+
+ // Then update tval = tval - q * tau
+ // FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient "dense ?= sparse")
+ for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
+ tval(itq.row()) -= itq.value() * tdot;
+
+ // Detect fill-in for the current column of Q
+ if(m_etree(Ridx(i)) == nonzeroCol)
+ {
+ for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
+ {
+ Index iQ = itq.row();
+ if (mark(iQ) != col)
+ {
+ Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q,
+ mark(iQ) = col; // and mark it as visited
+ }
+ }
+ }
+ } // End update current column
+
+ // Compute the Householder reflection that eliminate the current column
+ // FIXME this step should call the Householder module.
+ Scalar tau;
+ RealScalar beta;
+ Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);
+
+ // First, the squared norm of Q((col+1):m, col)
+ RealScalar sqrNorm = 0.;
+ for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
+
+ if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
+ {
+ tau = RealScalar(0);
+ beta = numext::real(c0);
+ tval(Qidx(0)) = 1;
+ }
+ else
+ {
+ beta = std::sqrt(numext::abs2(c0) + sqrNorm);
+ if(numext::real(c0) >= RealScalar(0))
+ beta = -beta;
+ tval(Qidx(0)) = 1;
+ for (Index itq = 1; itq < nzcolQ; ++itq)
+ tval(Qidx(itq)) /= (c0 - beta);
+ tau = numext::conj((beta-c0) / beta);
+
+ }
+
+ // Insert values in R
+ for (Index i = nzcolR-1; i >= 0; i--)
+ {
+ Index curIdx = Ridx(i);
+ if(curIdx < nonzeroCol)
+ {
+ m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx);
+ tval(curIdx) = Scalar(0.);
+ }
+ }
+
+ if(abs(beta) >= m_threshold)
+ {
+ m_R.insertBackByOuterInner(col, nonzeroCol) = beta;
+ nonzeroCol++;
+ // The householder coefficient
+ m_hcoeffs(col) = tau;
+ // Record the householder reflections
+ for (Index itq = 0; itq < nzcolQ; ++itq)
+ {
+ Index iQ = Qidx(itq);
+ m_Q.insertBackByOuterInnerUnordered(col,iQ) = tval(iQ);
+ tval(iQ) = Scalar(0.);
+ }
+ }
+ else
+ {
+ // Zero pivot found: move implicitly this column to the end
+ m_hcoeffs(col) = Scalar(0);
+ for (Index j = nonzeroCol; j < n-1; j++)
+ std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]);
+
+ // Recompute the column elimination tree
+ internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
+ }
+ }
+
+ // Finalize the column pointers of the sparse matrices R and Q
+ m_Q.finalize();
+ m_Q.makeCompressed();
+ m_R.finalize();
+ m_R.makeCompressed();
+ m_isQSorted = false;
+
+ m_nonzeropivots = nonzeroCol;
+
+ if(nonzeroCol<n)
+ {
+ // Permute the triangular factor to put the 'dead' columns to the end
+ MatrixType tempR(m_R);
+ m_R = tempR * m_pivotperm;
+
+ // Update the column permutation
+ m_outputPerm_c = m_outputPerm_c * m_pivotperm;
+ }
+
+ m_isInitialized = true;
+ m_factorizationIsok = true;
+ m_info = Success;
+}
+
+namespace internal {
+
+template<typename _MatrixType, typename OrderingType, typename Rhs>
+struct solve_retval<SparseQR<_MatrixType,OrderingType>, Rhs>
+ : solve_retval_base<SparseQR<_MatrixType,OrderingType>, Rhs>
+{
+ typedef SparseQR<_MatrixType,OrderingType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+template<typename _MatrixType, typename OrderingType, typename Rhs>
+struct sparse_solve_retval<SparseQR<_MatrixType, OrderingType>, Rhs>
+ : sparse_solve_retval_base<SparseQR<_MatrixType, OrderingType>, Rhs>
+{
+ typedef SparseQR<_MatrixType, OrderingType> Dec;
+ EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec, Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ this->defaultEvalTo(dst);
+ }
+};
+} // end namespace internal
+
+template <typename SparseQRType, typename Derived>
+struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived> >
+{
+ typedef typename SparseQRType::QRMatrixType MatrixType;
+ typedef typename SparseQRType::Scalar Scalar;
+ typedef typename SparseQRType::Index Index;
+ // Get the references
+ SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) :
+ m_qr(qr),m_other(other),m_transpose(transpose) {}
+ inline Index rows() const { return m_transpose ? m_qr.rows() : m_qr.cols(); }
+ inline Index cols() const { return m_other.cols(); }
+
+ // Assign to a vector
+ template<typename DesType>
+ void evalTo(DesType& res) const
+ {
+ Index n = m_qr.cols();
+ res = m_other;
+ if (m_transpose)
+ {
+ eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
+ //Compute res = Q' * other column by column
+ for(Index j = 0; j < res.cols(); j++){
+ for (Index k = 0; k < n; k++)
+ {
+ Scalar tau = Scalar(0);
+ tau = m_qr.m_Q.col(k).dot(res.col(j));
+ if(tau==Scalar(0)) continue;
+ tau = tau * m_qr.m_hcoeffs(k);
+ res.col(j) -= tau * m_qr.m_Q.col(k);
+ }
+ }
+ }
+ else
+ {
+ eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
+ // Compute res = Q' * other column by column
+ for(Index j = 0; j < res.cols(); j++)
+ {
+ for (Index k = n-1; k >=0; k--)
+ {
+ Scalar tau = Scalar(0);
+ tau = m_qr.m_Q.col(k).dot(res.col(j));
+ if(tau==Scalar(0)) continue;
+ tau = tau * m_qr.m_hcoeffs(k);
+ res.col(j) -= tau * m_qr.m_Q.col(k);
+ }
+ }
+ }
+ }
+
+ const SparseQRType& m_qr;
+ const Derived& m_other;
+ bool m_transpose;
+};
+
+template<typename SparseQRType>
+struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<SparseQRType> >
+{
+ typedef typename SparseQRType::Index Index;
+ typedef typename SparseQRType::Scalar Scalar;
+ typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+ SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {}
+ template<typename Derived>
+ SparseQR_QProduct<SparseQRType, Derived> operator*(const MatrixBase<Derived>& other)
+ {
+ return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(),false);
+ }
+ SparseQRMatrixQTransposeReturnType<SparseQRType> adjoint() const
+ {
+ return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
+ }
+ inline Index rows() const { return m_qr.rows(); }
+ inline Index cols() const { return m_qr.cols(); }
+ // To use for operations with the transpose of Q
+ SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const
+ {
+ return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
+ }
+ template<typename Dest> void evalTo(MatrixBase<Dest>& dest) const
+ {
+ dest.derived() = m_qr.matrixQ() * Dest::Identity(m_qr.rows(), m_qr.rows());
+ }
+ template<typename Dest> void evalTo(SparseMatrixBase<Dest>& dest) const
+ {
+ Dest idMat(m_qr.rows(), m_qr.rows());
+ idMat.setIdentity();
+ // Sort the sparse householder reflectors if needed
+ const_cast<SparseQRType *>(&m_qr)->sort_matrix_Q();
+ dest.derived() = SparseQR_QProduct<SparseQRType, Dest>(m_qr, idMat, false);
+ }
+
+ const SparseQRType& m_qr;
+};
+
+template<typename SparseQRType>
+struct SparseQRMatrixQTransposeReturnType
+{
+ SparseQRMatrixQTransposeReturnType(const SparseQRType& qr) : m_qr(qr) {}
+ template<typename Derived>
+ SparseQR_QProduct<SparseQRType,Derived> operator*(const MatrixBase<Derived>& other)
+ {
+ return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(), true);
+ }
+ const SparseQRType& m_qr;
+};
+
+} // end namespace Eigen
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h b/extern/Eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h
index 11fb014dd93..bcb355760c5 100644
--- a/extern/Eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h
+++ b/extern/Eigen3/Eigen/src/SuperLUSupport/SuperLUSupport.h
@@ -160,7 +160,7 @@ struct SluMatrix : SuperMatrix
res.ncol = mat.cols();
res.storage.lda = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride();
- res.storage.values = mat.data();
+ res.storage.values = (void*)(mat.data());
return res;
}
@@ -353,14 +353,14 @@ class SuperLUBase : internal::noncopyable
*
* \sa compute()
*/
-// template<typename Rhs>
-// inline const internal::sparse_solve_retval<SuperLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
-// {
-// eigen_assert(m_isInitialized && "SuperLU is not initialized.");
-// eigen_assert(rows()==b.rows()
-// && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
-// return internal::sparse_solve_retval<SuperLU, Rhs>(*this, b.derived());
-// }
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<SuperLUBase, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "SuperLU is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::sparse_solve_retval<SuperLUBase, Rhs>(*this, b.derived());
+ }
/** Performs a symbolic decomposition on the sparcity of \a matrix.
*
@@ -377,7 +377,7 @@ class SuperLUBase : internal::noncopyable
}
template<typename Stream>
- void dumpMemory(Stream& s)
+ void dumpMemory(Stream& /*s*/)
{}
protected:
@@ -496,8 +496,8 @@ class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >
SuperLU(const MatrixType& matrix) : Base()
{
- Base::init();
- compute(matrix);
+ init();
+ Base::compute(matrix);
}
~SuperLU()
@@ -612,6 +612,7 @@ void SuperLU<MatrixType>::factorize(const MatrixType& a)
this->initFactorization(a);
+ m_sluOptions.ColPerm = COLAMD;
int info = 0;
RealScalar recip_pivot_growth, rcond;
RealScalar ferr, berr;
@@ -833,7 +834,7 @@ class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> >
SuperILU(const MatrixType& matrix) : Base()
{
init();
- compute(matrix);
+ Base::compute(matrix);
}
~SuperILU()
@@ -1014,7 +1015,7 @@ struct sparse_solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
template<typename Dest> void evalTo(Dest& dst) const
{
- dec().derived()._solve(rhs(),dst);
+ this->defaultEvalTo(dst);
}
};
diff --git a/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h b/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
index f01720362de..3a48cecf769 100644
--- a/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
+++ b/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
@@ -39,7 +39,7 @@ inline int umfpack_symbolic(int n_row,int n_col,
const int Ap[], const int Ai[], const std::complex<double> Ax[], void **Symbolic,
const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
{
- return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Control,Info);
+ return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Control,Info);
}
inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
@@ -53,7 +53,7 @@ inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<d
void *Symbolic, void **Numeric,
const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
{
- return umfpack_zi_numeric(Ap,Ai,&internal::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
+ return umfpack_zi_numeric(Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
}
inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
@@ -67,7 +67,7 @@ inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::co
std::complex<double> X[], const std::complex<double> B[], void *Numeric,
const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
{
- return umfpack_zi_solve(sys,Ap,Ai,&internal::real_ref(Ax[0]),0,&internal::real_ref(X[0]),0,&internal::real_ref(B[0]),0,Numeric,Control,Info);
+ return umfpack_zi_solve(sys,Ap,Ai,&numext::real_ref(Ax[0]),0,&numext::real_ref(X[0]),0,&numext::real_ref(B[0]),0,Numeric,Control,Info);
}
inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
@@ -89,9 +89,9 @@ inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui
inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], int Up[], int Ui[], std::complex<double> Ux[],
int P[], int Q[], std::complex<double> Dx[], int *do_recip, double Rs[], void *Numeric)
{
- double& lx0_real = internal::real_ref(Lx[0]);
- double& ux0_real = internal::real_ref(Ux[0]);
- double& dx0_real = internal::real_ref(Dx[0]);
+ double& lx0_real = numext::real_ref(Lx[0]);
+ double& ux0_real = numext::real_ref(Ux[0]);
+ double& dx0_real = numext::real_ref(Dx[0]);
return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,
Dx?&dx0_real:0,0,do_recip,Rs,Numeric);
}
@@ -103,7 +103,7 @@ inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle,
inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
{
- double& mx_real = internal::real_ref(*Mx);
+ double& mx_real = numext::real_ref(*Mx);
return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
}
@@ -114,7 +114,7 @@ inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *N
* using the UmfPack library. The sparse matrix A must be squared and full rank.
* The vectors or matrices X and B can be either dense or sparse.
*
- * \WARNING The input matrix A should be in a \b compressed and \b column-major form.
+ * \warning The input matrix A should be in a \b compressed and \b column-major form.
* Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
*
@@ -215,14 +215,14 @@ class UmfPackLU : internal::noncopyable
*
* \sa compute()
*/
-// template<typename Rhs>
-// inline const internal::sparse_solve_retval<UmfPAckLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
-// {
-// eigen_assert(m_isInitialized && "UmfPAckLU is not initialized.");
-// eigen_assert(rows()==b.rows()
-// && "UmfPAckLU::solve(): invalid number of rows of the right hand side matrix b");
-// return internal::sparse_solve_retval<UmfPAckLU, Rhs>(*this, b.derived());
-// }
+ template<typename Rhs>
+ inline const internal::sparse_solve_retval<UmfPackLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "UmfPackLU is not initialized.");
+ eigen_assert(rows()==b.rows()
+ && "UmfPackLU::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::sparse_solve_retval<UmfPackLU, Rhs>(*this, b.derived());
+ }
/** Performs a symbolic decomposition on the sparcity of \a matrix.
*
@@ -381,7 +381,8 @@ bool UmfPackLU<MatrixType>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDe
const int rhsCols = b.cols();
eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet");
eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet");
-
+ eigen_assert(b.derived().data() != x.derived().data() && " Umfpack does not support inplace solve");
+
int errorCode;
for (int j=0; j<rhsCols; ++j)
{
@@ -420,7 +421,7 @@ struct sparse_solve_retval<UmfPackLU<_MatrixType>, Rhs>
template<typename Dest> void evalTo(Dest& dst) const
{
- dec()._solve(rhs(),dst);
+ this->defaultEvalTo(dst);
}
};
diff --git a/extern/Eigen3/Eigen/src/misc/SparseSolve.h b/extern/Eigen3/Eigen/src/misc/SparseSolve.h
index 272c4a479d7..244bb8ec753 100644
--- a/extern/Eigen3/Eigen/src/misc/SparseSolve.h
+++ b/extern/Eigen3/Eigen/src/misc/SparseSolve.h
@@ -47,6 +47,23 @@ template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval_b
}
protected:
+ template<typename DestScalar, int DestOptions, typename DestIndex>
+ inline void defaultEvalTo(SparseMatrix<DestScalar,DestOptions,DestIndex>& dst) const
+ {
+ // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
+ static const int NbColsAtOnce = 4;
+ int rhsCols = m_rhs.cols();
+ int size = m_rhs.rows();
+ Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,rhsCols);
+ Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,rhsCols);
+ for(int k=0; k<rhsCols; k+=NbColsAtOnce)
+ {
+ int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
+ tmp.leftCols(actualCols) = m_rhs.middleCols(k,actualCols);
+ tmpX.leftCols(actualCols) = m_dec.solve(tmp.leftCols(actualCols));
+ dst.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
+ }
+ }
const DecompositionType& m_dec;
typename Rhs::Nested m_rhs;
};
diff --git a/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h
index 5b979ebf89d..5c8c476eecf 100644
--- a/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h
+++ b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h
@@ -33,8 +33,14 @@ EIGEN_MAKE_CWISE_BINARY_OP(min,internal::scalar_min_op)
*
* \sa max()
*/
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const ConstantReturnType>
-(min)(const Scalar &other) const
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived,
+ const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+min
+#else
+(min)
+#endif
+(const Scalar &other) const
{
return (min)(Derived::PlainObject::Constant(rows(), cols(), other));
}
@@ -52,8 +58,14 @@ EIGEN_MAKE_CWISE_BINARY_OP(max,internal::scalar_max_op)
*
* \sa min()
*/
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const ConstantReturnType>
-(max)(const Scalar &other) const
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived,
+ const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+max
+#else
+(max)
+#endif
+(const Scalar &other) const
{
return (max)(Derived::PlainObject::Constant(rows(), cols(), other));
}
diff --git a/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h
index 0dffaf4135c..a596367906f 100644
--- a/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h
+++ b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h
@@ -200,3 +200,4 @@ EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<=, std::less_equal)
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>, std::greater)
EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>=, std::greater_equal)
+
diff --git a/extern/Eigen3/Eigen/src/plugins/BlockMethods.h b/extern/Eigen3/Eigen/src/plugins/BlockMethods.h
index ef224001a54..2788251e0cd 100644
--- a/extern/Eigen3/Eigen/src/plugins/BlockMethods.h
+++ b/extern/Eigen3/Eigen/src/plugins/BlockMethods.h
@@ -8,8 +8,6 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_BLOCKMETHODS_H
-#define EIGEN_BLOCKMETHODS_H
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -32,6 +30,10 @@ template<int N> struct ConstNColsBlockXpr { typedef const Block<const Derived, i
template<int N> struct NRowsBlockXpr { typedef Block<Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; };
template<int N> struct ConstNRowsBlockXpr { typedef const Block<const Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; };
+typedef VectorBlock<Derived> SegmentReturnType;
+typedef const VectorBlock<const Derived> ConstSegmentReturnType;
+template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };
+template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };
#endif // not EIGEN_PARSED_BY_DOXYGEN
@@ -88,12 +90,13 @@ inline const Block<const Derived> topRightCorner(Index cRows, Index cCols) const
/** \returns an expression of a fixed-size top-right corner of *this.
*
- * The template parameters CRows and CCols are the number of rows and columns in the corner.
+ * \tparam CRows the number of rows in the corner
+ * \tparam CCols the number of columns in the corner
*
* Example: \include MatrixBase_template_int_int_topRightCorner.cpp
* Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out
*
- * \sa class Block, block(Index,Index,Index,Index)
+ * \sa class Block, block<int,int>(Index,Index)
*/
template<int CRows, int CCols>
inline Block<Derived, CRows, CCols> topRightCorner()
@@ -108,6 +111,35 @@ inline const Block<const Derived, CRows, CCols> topRightCorner() const
return Block<const Derived, CRows, CCols>(derived(), 0, cols() - CCols);
}
+/** \returns an expression of a top-right corner of *this.
+ *
+ * \tparam CRows number of rows in corner as specified at compile-time
+ * \tparam CCols number of columns in corner as specified at compile-time
+ * \param cRows number of rows in corner as specified at run-time
+ * \param cCols number of columns in corner as specified at run-time
+ *
+ * This function is mainly useful for corners where the number of rows is specified at compile-time
+ * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
+ * information should not contradict. In other words, \a cRows should equal \a CRows unless
+ * \a CRows is \a Dynamic, and the same for the number of columns.
+ *
+ * Example: \include MatrixBase_template_int_int_topRightCorner_int_int.cpp
+ * Output: \verbinclude MatrixBase_template_int_int_topRightCorner_int_int.out
+ *
+ * \sa class Block
+ */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> topRightCorner(Index cRows, Index cCols)
+{
+ return Block<Derived, CRows, CCols>(derived(), 0, cols() - cCols, cRows, cCols);
+}
+
+/** This is the const version of topRightCorner<int, int>(Index, Index).*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> topRightCorner(Index cRows, Index cCols) const
+{
+ return Block<const Derived, CRows, CCols>(derived(), 0, cols() - cCols, cRows, cCols);
+}
@@ -154,6 +186,36 @@ inline const Block<const Derived, CRows, CCols> topLeftCorner() const
return Block<const Derived, CRows, CCols>(derived(), 0, 0);
}
+/** \returns an expression of a top-left corner of *this.
+ *
+ * \tparam CRows number of rows in corner as specified at compile-time
+ * \tparam CCols number of columns in corner as specified at compile-time
+ * \param cRows number of rows in corner as specified at run-time
+ * \param cCols number of columns in corner as specified at run-time
+ *
+ * This function is mainly useful for corners where the number of rows is specified at compile-time
+ * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
+ * information should not contradict. In other words, \a cRows should equal \a CRows unless
+ * \a CRows is \a Dynamic, and the same for the number of columns.
+ *
+ * Example: \include MatrixBase_template_int_int_topLeftCorner_int_int.cpp
+ * Output: \verbinclude MatrixBase_template_int_int_topLeftCorner_int_int.out
+ *
+ * \sa class Block
+ */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> topLeftCorner(Index cRows, Index cCols)
+{
+ return Block<Derived, CRows, CCols>(derived(), 0, 0, cRows, cCols);
+}
+
+/** This is the const version of topLeftCorner<int, int>(Index, Index).*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> topLeftCorner(Index cRows, Index cCols) const
+{
+ return Block<const Derived, CRows, CCols>(derived(), 0, 0, cRows, cCols);
+}
+
/** \returns a dynamic-size expression of a bottom-right corner of *this.
@@ -199,6 +261,36 @@ inline const Block<const Derived, CRows, CCols> bottomRightCorner() const
return Block<const Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
}
+/** \returns an expression of a bottom-right corner of *this.
+ *
+ * \tparam CRows number of rows in corner as specified at compile-time
+ * \tparam CCols number of columns in corner as specified at compile-time
+ * \param cRows number of rows in corner as specified at run-time
+ * \param cCols number of columns in corner as specified at run-time
+ *
+ * This function is mainly useful for corners where the number of rows is specified at compile-time
+ * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
+ * information should not contradict. In other words, \a cRows should equal \a CRows unless
+ * \a CRows is \a Dynamic, and the same for the number of columns.
+ *
+ * Example: \include MatrixBase_template_int_int_bottomRightCorner_int_int.cpp
+ * Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner_int_int.out
+ *
+ * \sa class Block
+ */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> bottomRightCorner(Index cRows, Index cCols)
+{
+ return Block<Derived, CRows, CCols>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+}
+
+/** This is the const version of bottomRightCorner<int, int>(Index, Index).*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> bottomRightCorner(Index cRows, Index cCols) const
+{
+ return Block<const Derived, CRows, CCols>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+}
+
/** \returns a dynamic-size expression of a bottom-left corner of *this.
@@ -244,6 +336,36 @@ inline const Block<const Derived, CRows, CCols> bottomLeftCorner() const
return Block<const Derived, CRows, CCols>(derived(), rows() - CRows, 0);
}
+/** \returns an expression of a bottom-left corner of *this.
+ *
+ * \tparam CRows number of rows in corner as specified at compile-time
+ * \tparam CCols number of columns in corner as specified at compile-time
+ * \param cRows number of rows in corner as specified at run-time
+ * \param cCols number of columns in corner as specified at run-time
+ *
+ * This function is mainly useful for corners where the number of rows is specified at compile-time
+ * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
+ * information should not contradict. In other words, \a cRows should equal \a CRows unless
+ * \a CRows is \a Dynamic, and the same for the number of columns.
+ *
+ * Example: \include MatrixBase_template_int_int_bottomLeftCorner_int_int.cpp
+ * Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner_int_int.out
+ *
+ * \sa class Block
+ */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> bottomLeftCorner(Index cRows, Index cCols)
+{
+ return Block<Derived, CRows, CCols>(derived(), rows() - cRows, 0, cRows, cCols);
+}
+
+/** This is the const version of bottomLeftCorner<int, int>(Index, Index).*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> bottomLeftCorner(Index cRows, Index cCols) const
+{
+ return Block<const Derived, CRows, CCols>(derived(), rows() - cRows, 0, cRows, cCols);
+}
+
/** \returns a block consisting of the top rows of *this.
@@ -268,7 +390,11 @@ inline ConstRowsBlockXpr topRows(Index n) const
/** \returns a block consisting of the top rows of *this.
*
- * \tparam N the number of rows in the block
+ * \tparam N the number of rows in the block as specified at compile-time
+ * \param n the number of rows in the block as specified at run-time
+ *
+ * The compile-time and run-time information should not contradict. In other words,
+ * \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_topRows.cpp
* Output: \verbinclude MatrixBase_template_int_topRows.out
@@ -276,16 +402,16 @@ inline ConstRowsBlockXpr topRows(Index n) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
-inline typename NRowsBlockXpr<N>::Type topRows()
+inline typename NRowsBlockXpr<N>::Type topRows(Index n = N)
{
- return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, N, cols());
+ return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
}
/** This is the const version of topRows<int>().*/
template<int N>
-inline typename ConstNRowsBlockXpr<N>::Type topRows() const
+inline typename ConstNRowsBlockXpr<N>::Type topRows(Index n = N) const
{
- return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, N, cols());
+ return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
}
@@ -312,7 +438,11 @@ inline ConstRowsBlockXpr bottomRows(Index n) const
/** \returns a block consisting of the bottom rows of *this.
*
- * \tparam N the number of rows in the block
+ * \tparam N the number of rows in the block as specified at compile-time
+ * \param n the number of rows in the block as specified at run-time
+ *
+ * The compile-time and run-time information should not contradict. In other words,
+ * \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_bottomRows.cpp
* Output: \verbinclude MatrixBase_template_int_bottomRows.out
@@ -320,16 +450,16 @@ inline ConstRowsBlockXpr bottomRows(Index n) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
-inline typename NRowsBlockXpr<N>::Type bottomRows()
+inline typename NRowsBlockXpr<N>::Type bottomRows(Index n = N)
{
- return typename NRowsBlockXpr<N>::Type(derived(), rows() - N, 0, N, cols());
+ return typename NRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
}
/** This is the const version of bottomRows<int>().*/
template<int N>
-inline typename ConstNRowsBlockXpr<N>::Type bottomRows() const
+inline typename ConstNRowsBlockXpr<N>::Type bottomRows(Index n = N) const
{
- return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - N, 0, N, cols());
+ return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
}
@@ -337,28 +467,32 @@ inline typename ConstNRowsBlockXpr<N>::Type bottomRows() const
/** \returns a block consisting of a range of rows of *this.
*
* \param startRow the index of the first row in the block
- * \param numRows the number of rows in the block
+ * \param n the number of rows in the block
*
* Example: \include DenseBase_middleRows_int.cpp
* Output: \verbinclude DenseBase_middleRows_int.out
*
* \sa class Block, block(Index,Index,Index,Index)
*/
-inline RowsBlockXpr middleRows(Index startRow, Index numRows)
+inline RowsBlockXpr middleRows(Index startRow, Index n)
{
- return RowsBlockXpr(derived(), startRow, 0, numRows, cols());
+ return RowsBlockXpr(derived(), startRow, 0, n, cols());
}
/** This is the const version of middleRows(Index,Index).*/
-inline ConstRowsBlockXpr middleRows(Index startRow, Index numRows) const
+inline ConstRowsBlockXpr middleRows(Index startRow, Index n) const
{
- return ConstRowsBlockXpr(derived(), startRow, 0, numRows, cols());
+ return ConstRowsBlockXpr(derived(), startRow, 0, n, cols());
}
/** \returns a block consisting of a range of rows of *this.
*
- * \tparam N the number of rows in the block
+ * \tparam N the number of rows in the block as specified at compile-time
* \param startRow the index of the first row in the block
+ * \param n the number of rows in the block as specified at run-time
+ *
+ * The compile-time and run-time information should not contradict. In other words,
+ * \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include DenseBase_template_int_middleRows.cpp
* Output: \verbinclude DenseBase_template_int_middleRows.out
@@ -366,16 +500,16 @@ inline ConstRowsBlockXpr middleRows(Index startRow, Index numRows) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
-inline typename NRowsBlockXpr<N>::Type middleRows(Index startRow)
+inline typename NRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N)
{
- return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, N, cols());
+ return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
}
/** This is the const version of middleRows<int>().*/
template<int N>
-inline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow) const
+inline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) const
{
- return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, N, cols());
+ return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
}
@@ -402,7 +536,11 @@ inline ConstColsBlockXpr leftCols(Index n) const
/** \returns a block consisting of the left columns of *this.
*
- * \tparam N the number of columns in the block
+ * \tparam N the number of columns in the block as specified at compile-time
+ * \param n the number of columns in the block as specified at run-time
+ *
+ * The compile-time and run-time information should not contradict. In other words,
+ * \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_leftCols.cpp
* Output: \verbinclude MatrixBase_template_int_leftCols.out
@@ -410,16 +548,16 @@ inline ConstColsBlockXpr leftCols(Index n) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
-inline typename NColsBlockXpr<N>::Type leftCols()
+inline typename NColsBlockXpr<N>::Type leftCols(Index n = N)
{
- return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), N);
+ return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
}
/** This is the const version of leftCols<int>().*/
template<int N>
-inline typename ConstNColsBlockXpr<N>::Type leftCols() const
+inline typename ConstNColsBlockXpr<N>::Type leftCols(Index n = N) const
{
- return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), N);
+ return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
}
@@ -446,7 +584,11 @@ inline ConstColsBlockXpr rightCols(Index n) const
/** \returns a block consisting of the right columns of *this.
*
- * \tparam N the number of columns in the block
+ * \tparam N the number of columns in the block as specified at compile-time
+ * \param n the number of columns in the block as specified at run-time
+ *
+ * The compile-time and run-time information should not contradict. In other words,
+ * \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include MatrixBase_template_int_rightCols.cpp
* Output: \verbinclude MatrixBase_template_int_rightCols.out
@@ -454,16 +596,16 @@ inline ConstColsBlockXpr rightCols(Index n) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
-inline typename NColsBlockXpr<N>::Type rightCols()
+inline typename NColsBlockXpr<N>::Type rightCols(Index n = N)
{
- return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - N, rows(), N);
+ return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
}
/** This is the const version of rightCols<int>().*/
template<int N>
-inline typename ConstNColsBlockXpr<N>::Type rightCols() const
+inline typename ConstNColsBlockXpr<N>::Type rightCols(Index n = N) const
{
- return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - N, rows(), N);
+ return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
}
@@ -491,8 +633,12 @@ inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const
/** \returns a block consisting of a range of columns of *this.
*
- * \tparam N the number of columns in the block
+ * \tparam N the number of columns in the block as specified at compile-time
* \param startCol the index of the first column in the block
+ * \param n the number of columns in the block as specified at run-time
+ *
+ * The compile-time and run-time information should not contradict. In other words,
+ * \a n should equal \a N unless \a N is \a Dynamic.
*
* Example: \include DenseBase_template_int_middleCols.cpp
* Output: \verbinclude DenseBase_template_int_middleCols.out
@@ -500,16 +646,16 @@ inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const
* \sa class Block, block(Index,Index,Index,Index)
*/
template<int N>
-inline typename NColsBlockXpr<N>::Type middleCols(Index startCol)
+inline typename NColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N)
{
- return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), N);
+ return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
}
/** This is the const version of middleCols<int>().*/
template<int N>
-inline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol) const
+inline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) const
{
- return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), N);
+ return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
}
@@ -543,6 +689,40 @@ inline const Block<const Derived, BlockRows, BlockCols> block(Index startRow, In
return Block<const Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
}
+/** \returns an expression of a block in *this.
+ *
+ * \tparam BlockRows number of rows in block as specified at compile-time
+ * \tparam BlockCols number of columns in block as specified at compile-time
+ * \param startRow the first row in the block
+ * \param startCol the first column in the block
+ * \param blockRows number of rows in block as specified at run-time
+ * \param blockCols number of columns in block as specified at run-time
+ *
+ * This function is mainly useful for blocks where the number of rows is specified at compile-time
+ * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
+ * information should not contradict. In other words, \a blockRows should equal \a BlockRows unless
+ * \a BlockRows is \a Dynamic, and the same for the number of columns.
+ *
+ * Example: \include MatrixBase_template_int_int_block_int_int_int_int.cpp
+ * Output: \verbinclude MatrixBase_template_int_int_block_int_int_int_int.cpp
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int BlockRows, int BlockCols>
+inline Block<Derived, BlockRows, BlockCols> block(Index startRow, Index startCol,
+ Index blockRows, Index blockCols)
+{
+ return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol, blockRows, blockCols);
+}
+
+/** This is the const version of block<>(Index, Index, Index, Index). */
+template<int BlockRows, int BlockCols>
+inline const Block<const Derived, BlockRows, BlockCols> block(Index startRow, Index startCol,
+ Index blockRows, Index blockCols) const
+{
+ return Block<const Derived, BlockRows, BlockCols>(derived(), startRow, startCol, blockRows, blockCols);
+}
+
/** \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0.
*
* Example: \include MatrixBase_col.cpp
@@ -577,4 +757,179 @@ inline ConstRowXpr row(Index i) const
return ConstRowXpr(derived(), i);
}
-#endif // EIGEN_BLOCKMETHODS_H
+/** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this.
+ *
+ * \only_for_vectors
+ *
+ * \param start the first coefficient in the segment
+ * \param n the number of coefficients in the segment
+ *
+ * Example: \include MatrixBase_segment_int_int.cpp
+ * Output: \verbinclude MatrixBase_segment_int_int.out
+ *
+ * \note Even though the returned expression has dynamic size, in the case
+ * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+ * which means that evaluating it does not cause a dynamic memory allocation.
+ *
+ * \sa class Block, segment(Index)
+ */
+inline SegmentReturnType segment(Index start, Index n)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return SegmentReturnType(derived(), start, n);
+}
+
+
+/** This is the const version of segment(Index,Index).*/
+inline ConstSegmentReturnType segment(Index start, Index n) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return ConstSegmentReturnType(derived(), start, n);
+}
+
+/** \returns a dynamic-size expression of the first coefficients of *this.
+ *
+ * \only_for_vectors
+ *
+ * \param n the number of coefficients in the segment
+ *
+ * Example: \include MatrixBase_start_int.cpp
+ * Output: \verbinclude MatrixBase_start_int.out
+ *
+ * \note Even though the returned expression has dynamic size, in the case
+ * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+ * which means that evaluating it does not cause a dynamic memory allocation.
+ *
+ * \sa class Block, block(Index,Index)
+ */
+inline SegmentReturnType head(Index n)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return SegmentReturnType(derived(), 0, n);
+}
+
+/** This is the const version of head(Index).*/
+inline ConstSegmentReturnType head(Index n) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return ConstSegmentReturnType(derived(), 0, n);
+}
+
+/** \returns a dynamic-size expression of the last coefficients of *this.
+ *
+ * \only_for_vectors
+ *
+ * \param n the number of coefficients in the segment
+ *
+ * Example: \include MatrixBase_end_int.cpp
+ * Output: \verbinclude MatrixBase_end_int.out
+ *
+ * \note Even though the returned expression has dynamic size, in the case
+ * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+ * which means that evaluating it does not cause a dynamic memory allocation.
+ *
+ * \sa class Block, block(Index,Index)
+ */
+inline SegmentReturnType tail(Index n)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return SegmentReturnType(derived(), this->size() - n, n);
+}
+
+/** This is the const version of tail(Index).*/
+inline ConstSegmentReturnType tail(Index n) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return ConstSegmentReturnType(derived(), this->size() - n, n);
+}
+
+/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
+ *
+ * \only_for_vectors
+ *
+ * \tparam N the number of coefficients in the segment as specified at compile-time
+ * \param start the index of the first element in the segment
+ * \param n the number of coefficients in the segment as specified at compile-time
+ *
+ * The compile-time and run-time information should not contradict. In other words,
+ * \a n should equal \a N unless \a N is \a Dynamic.
+ *
+ * Example: \include MatrixBase_template_int_segment.cpp
+ * Output: \verbinclude MatrixBase_template_int_segment.out
+ *
+ * \sa class Block
+ */
+template<int N>
+inline typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename FixedSegmentReturnType<N>::Type(derived(), start, n);
+}
+
+/** This is the const version of segment<int>(Index).*/
+template<int N>
+inline typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index n = N) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename ConstFixedSegmentReturnType<N>::Type(derived(), start, n);
+}
+
+/** \returns a fixed-size expression of the first coefficients of *this.
+ *
+ * \only_for_vectors
+ *
+ * \tparam N the number of coefficients in the segment as specified at compile-time
+ * \param n the number of coefficients in the segment as specified at run-time
+ *
+ * The compile-time and run-time information should not contradict. In other words,
+ * \a n should equal \a N unless \a N is \a Dynamic.
+ *
+ * Example: \include MatrixBase_template_int_start.cpp
+ * Output: \verbinclude MatrixBase_template_int_start.out
+ *
+ * \sa class Block
+ */
+template<int N>
+inline typename FixedSegmentReturnType<N>::Type head(Index n = N)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename FixedSegmentReturnType<N>::Type(derived(), 0, n);
+}
+
+/** This is the const version of head<int>().*/
+template<int N>
+inline typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename ConstFixedSegmentReturnType<N>::Type(derived(), 0, n);
+}
+
+/** \returns a fixed-size expression of the last coefficients of *this.
+ *
+ * \only_for_vectors
+ *
+ * \tparam N the number of coefficients in the segment as specified at compile-time
+ * \param n the number of coefficients in the segment as specified at run-time
+ *
+ * The compile-time and run-time information should not contradict. In other words,
+ * \a n should equal \a N unless \a N is \a Dynamic.
+ *
+ * Example: \include MatrixBase_template_int_end.cpp
+ * Output: \verbinclude MatrixBase_template_int_end.out
+ *
+ * \sa class Block
+ */
+template<int N>
+inline typename FixedSegmentReturnType<N>::Type tail(Index n = N)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename FixedSegmentReturnType<N>::Type(derived(), size() - n);
+}
+
+/** This is the const version of tail<int>.*/
+template<int N>
+inline typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename ConstFixedSegmentReturnType<N>::Type(derived(), size() - n);
+}
diff --git a/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h
index 3a737df7b86..7f62149e04b 100644
--- a/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h
+++ b/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h
@@ -83,7 +83,7 @@ cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const ConstantReturnType>
cwiseMin(const Scalar &other) const
{
- return cwiseMin(Derived::PlainObject::Constant(rows(), cols(), other));
+ return cwiseMin(Derived::Constant(rows(), cols(), other));
}
/** \returns an expression of the coefficient-wise max of *this and \a other
@@ -107,7 +107,7 @@ cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const ConstantReturnType>
cwiseMax(const Scalar &other) const
{
- return cwiseMax(Derived::PlainObject::Constant(rows(), cols(), other));
+ return cwiseMax(Derived::Constant(rows(), cols(), other));
}
diff --git a/extern/Eigen3/eigen-update.sh b/extern/Eigen3/eigen-update.sh
index 7be67890173..1cf0337adf6 100755
--- a/extern/Eigen3/eigen-update.sh
+++ b/extern/Eigen3/eigen-update.sh
@@ -17,7 +17,7 @@ if [ -d eigen ]
then
cd eigen
# put here the version you want to use
- hg up 3.0
+ hg up 3.2.1
rm -f `find Eigen/ -type f -name "CMakeLists.txt"`
cp -r Eigen ..
cd ..