Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/extern
diff options
context:
space:
mode:
Diffstat (limited to 'extern')
-rw-r--r--extern/CMakeLists.txt6
-rw-r--r--extern/Eigen2/Eigen/Array39
-rw-r--r--extern/Eigen2/Eigen/Cholesky65
-rw-r--r--extern/Eigen2/Eigen/Core155
-rw-r--r--extern/Eigen2/Eigen/Eigen2
-rw-r--r--extern/Eigen2/Eigen/Geometry51
-rw-r--r--extern/Eigen2/Eigen/LeastSquares27
-rw-r--r--extern/Eigen2/Eigen/QR73
-rw-r--r--extern/Eigen2/Eigen/QtAlignedMalloc29
-rw-r--r--extern/Eigen2/Eigen/Sparse132
-rw-r--r--extern/Eigen2/Eigen/StdVector147
-rw-r--r--extern/Eigen2/Eigen/src/Array/CwiseOperators.h453
-rw-r--r--extern/Eigen2/Eigen/src/Array/Functors.h309
-rw-r--r--extern/Eigen2/Eigen/src/Array/Norms.h80
-rw-r--r--extern/Eigen2/Eigen/src/Array/PartialRedux.h347
-rw-r--r--extern/Eigen2/Eigen/src/Cholesky/LDLT.h198
-rw-r--r--extern/Eigen2/Eigen/src/Cholesky/LLT.h219
-rw-r--r--extern/Eigen2/Eigen/src/Core/Assign.h445
-rw-r--r--extern/Eigen2/Eigen/src/Core/Block.h752
-rw-r--r--extern/Eigen2/Eigen/src/Core/CacheFriendlyProduct.h753
-rw-r--r--extern/Eigen2/Eigen/src/Core/Coeffs.h384
-rw-r--r--extern/Eigen2/Eigen/src/Core/CwiseBinaryOp.h304
-rw-r--r--extern/Eigen2/Eigen/src/Core/CwiseUnaryOp.h229
-rw-r--r--extern/Eigen2/Eigen/src/Core/DiagonalCoeffs.h124
-rw-r--r--extern/Eigen2/Eigen/src/Core/DiagonalMatrix.h144
-rw-r--r--extern/Eigen2/Eigen/src/Core/DiagonalProduct.h130
-rw-r--r--extern/Eigen2/Eigen/src/Core/Dot.h361
-rw-r--r--extern/Eigen2/Eigen/src/Core/Functors.h378
-rw-r--r--extern/Eigen2/Eigen/src/Core/Fuzzy.h234
-rw-r--r--extern/Eigen2/Eigen/src/Core/GenericPacketMath.h150
-rw-r--r--extern/Eigen2/Eigen/src/Core/Map.h111
-rw-r--r--extern/Eigen2/Eigen/src/Core/MapBase.h202
-rw-r--r--extern/Eigen2/Eigen/src/Core/MathFunctions.h295
-rw-r--r--extern/Eigen2/Eigen/src/Core/Matrix.h639
-rw-r--r--extern/Eigen2/Eigen/src/Core/MatrixBase.h632
-rw-r--r--extern/Eigen2/Eigen/src/Core/MatrixStorage.h249
-rw-r--r--extern/Eigen2/Eigen/src/Core/NumTraits.h142
-rw-r--r--extern/Eigen2/Eigen/src/Core/Part.h377
-rw-r--r--extern/Eigen2/Eigen/src/Core/Product.h769
-rw-r--r--extern/Eigen2/Eigen/src/Core/Redux.h117
-rw-r--r--extern/Eigen2/Eigen/src/Core/SolveTriangular.h297
-rw-r--r--extern/Eigen2/Eigen/src/Core/Sum.h271
-rw-r--r--extern/Eigen2/Eigen/src/Core/Transpose.h228
-rw-r--r--extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h354
-rw-r--r--extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h321
-rw-r--r--extern/Eigen2/Eigen/src/Core/util/Constants.h254
-rw-r--r--extern/Eigen2/Eigen/src/Core/util/DisableMSVCWarnings.h5
-rw-r--r--extern/Eigen2/Eigen/src/Core/util/EnableMSVCWarnings.h4
-rw-r--r--extern/Eigen2/Eigen/src/Core/util/ForwardDeclarations.h125
-rw-r--r--extern/Eigen2/Eigen/src/Core/util/Macros.h273
-rw-r--r--extern/Eigen2/Eigen/src/Core/util/Memory.h387
-rw-r--r--extern/Eigen2/Eigen/src/Core/util/Meta.h183
-rw-r--r--extern/Eigen2/Eigen/src/Core/util/XprHelper.h219
-rw-r--r--extern/Eigen2/Eigen/src/Geometry/OrthoMethods.h119
-rw-r--r--extern/Eigen2/Eigen/src/LU/Inverse.h258
-rw-r--r--extern/Eigen2/Eigen/src/LU/LU.h541
-rw-r--r--extern/Eigen2/Eigen/src/QR/EigenSolver.h722
-rw-r--r--extern/Eigen2/Eigen/src/QR/HessenbergDecomposition.h250
-rw-r--r--extern/Eigen2/Eigen/src/QR/QR.h334
-rw-r--r--extern/Eigen2/Eigen/src/QR/SelfAdjointEigenSolver.h402
-rw-r--r--extern/Eigen2/Eigen/src/QR/Tridiagonalization.h431
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/CholmodSupport.h236
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/RandomSetter.h330
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseCwise.h178
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h453
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h186
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseDiagonalProduct.h159
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h102
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h346
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseLLT.h205
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseLU.h148
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h452
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseProduct.h415
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseTranspose.h90
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SparseUtil.h148
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h565
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h210
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/TriangularSolver.h178
-rw-r--r--extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h289
-rw-r--r--extern/Eigen3/Eigen/Array11
-rw-r--r--extern/Eigen3/Eigen/Cholesky33
-rw-r--r--extern/Eigen3/Eigen/Core360
-rw-r--r--extern/Eigen3/Eigen/Dense (renamed from extern/Eigen2/Eigen/Dense)3
-rw-r--r--extern/Eigen3/Eigen/Eigen2
-rw-r--r--extern/Eigen3/Eigen/Eigen2Support82
-rw-r--r--extern/Eigen3/Eigen/Eigenvalues44
-rw-r--r--extern/Eigen3/Eigen/Geometry67
-rw-r--r--extern/Eigen3/Eigen/Householder27
-rw-r--r--extern/Eigen3/Eigen/Jacobi30
-rw-r--r--extern/Eigen3/Eigen/LU (renamed from extern/Eigen2/Eigen/LU)19
-rw-r--r--extern/Eigen3/Eigen/LeastSquares36
-rw-r--r--extern/Eigen3/Eigen/QR45
-rw-r--r--extern/Eigen3/Eigen/QtAlignedMalloc34
-rw-r--r--extern/Eigen3/Eigen/SVD (renamed from extern/Eigen2/Eigen/SVD)19
-rw-r--r--extern/Eigen3/Eigen/Sparse69
-rw-r--r--extern/Eigen3/Eigen/StdDeque (renamed from extern/Eigen2/Eigen/src/Sparse/SparseRedux.h)38
-rw-r--r--extern/Eigen3/Eigen/StdList (renamed from extern/Eigen2/Eigen/src/QR/QrInstantiations.cpp)30
-rw-r--r--extern/Eigen3/Eigen/StdVector (renamed from extern/Eigen2/Eigen/src/Core/CoreInstantiations.cpp)35
-rw-r--r--extern/Eigen3/Eigen/src/Cholesky/LDLT.h484
-rw-r--r--extern/Eigen3/Eigen/src/Cholesky/LLT.h386
-rw-r--r--extern/Eigen3/Eigen/src/Core/Array.h322
-rw-r--r--extern/Eigen3/Eigen/src/Core/ArrayBase.h239
-rw-r--r--extern/Eigen3/Eigen/src/Core/ArrayWrapper.h239
-rw-r--r--extern/Eigen3/Eigen/src/Core/Assign.h593
-rw-r--r--extern/Eigen3/Eigen/src/Core/BandMatrix.h346
-rw-r--r--extern/Eigen3/Eigen/src/Core/Block.h349
-rw-r--r--extern/Eigen3/Eigen/src/Core/BooleanRedux.h (renamed from extern/Eigen2/Eigen/src/Array/BooleanRedux.h)80
-rw-r--r--extern/Eigen3/Eigen/src/Core/CommaInitializer.h (renamed from extern/Eigen2/Eigen/src/Core/CommaInitializer.h)70
-rw-r--r--extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h240
-rw-r--r--extern/Eigen3/Eigen/src/Core/CwiseNullaryOp.h (renamed from extern/Eigen2/Eigen/src/Core/CwiseNullaryOp.h)374
-rw-r--r--extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h137
-rw-r--r--extern/Eigen3/Eigen/src/Core/CwiseUnaryView.h148
-rw-r--r--extern/Eigen3/Eigen/src/Core/DenseBase.h543
-rw-r--r--extern/Eigen3/Eigen/src/Core/DenseCoeffsBase.h765
-rw-r--r--extern/Eigen3/Eigen/src/Core/DenseStorage.h304
-rw-r--r--extern/Eigen3/Eigen/src/Core/Diagonal.h227
-rw-r--r--extern/Eigen3/Eigen/src/Core/DiagonalMatrix.h306
-rw-r--r--extern/Eigen3/Eigen/src/Core/DiagonalProduct.h135
-rw-r--r--extern/Eigen3/Eigen/src/Core/Dot.h272
-rw-r--r--extern/Eigen3/Eigen/src/Core/EigenBase.h172
-rw-r--r--extern/Eigen3/Eigen/src/Core/Flagged.h (renamed from extern/Eigen2/Eigen/src/Core/Flagged.h)90
-rw-r--r--extern/Eigen3/Eigen/src/Core/ForceAlignedAccess.h157
-rw-r--r--extern/Eigen3/Eigen/src/Core/Functors.h942
-rw-r--r--extern/Eigen3/Eigen/src/Core/Fuzzy.h161
-rw-r--r--extern/Eigen3/Eigen/src/Core/GenericPacketMath.h339
-rw-r--r--extern/Eigen3/Eigen/src/Core/GlobalFunctions.h95
-rw-r--r--extern/Eigen3/Eigen/src/Core/IO.h (renamed from extern/Eigen2/Eigen/src/Core/IO.h)128
-rw-r--r--extern/Eigen3/Eigen/src/Core/Map.h205
-rw-r--r--extern/Eigen3/Eigen/src/Core/MapBase.h255
-rw-r--r--extern/Eigen3/Eigen/src/Core/MathFunctions.h843
-rw-r--r--extern/Eigen3/Eigen/src/Core/Matrix.h439
-rw-r--r--extern/Eigen3/Eigen/src/Core/MatrixBase.h520
-rw-r--r--extern/Eigen3/Eigen/src/Core/NestByValue.h (renamed from extern/Eigen2/Eigen/src/Core/NestByValue.h)45
-rw-r--r--extern/Eigen3/Eigen/src/Core/NoAlias.h136
-rw-r--r--extern/Eigen3/Eigen/src/Core/NumTraits.h160
-rw-r--r--extern/Eigen3/Eigen/src/Core/PermutationMatrix.h696
-rw-r--r--extern/Eigen3/Eigen/src/Core/PlainObjectBase.h737
-rw-r--r--extern/Eigen3/Eigen/src/Core/Product.h625
-rw-r--r--extern/Eigen3/Eigen/src/Core/ProductBase.h288
-rw-r--r--extern/Eigen3/Eigen/src/Core/Random.h (renamed from extern/Eigen2/Eigen/src/Array/Random.h)97
-rw-r--r--extern/Eigen3/Eigen/src/Core/Redux.h404
-rw-r--r--extern/Eigen3/Eigen/src/Core/Replicate.h179
-rw-r--r--extern/Eigen3/Eigen/src/Core/ReturnByValue.h99
-rw-r--r--extern/Eigen3/Eigen/src/Core/Reverse.h230
-rw-r--r--extern/Eigen3/Eigen/src/Core/Select.h (renamed from extern/Eigen2/Eigen/src/Array/Select.h)83
-rw-r--r--extern/Eigen3/Eigen/src/Core/SelfAdjointView.h325
-rw-r--r--extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h195
-rw-r--r--extern/Eigen3/Eigen/src/Core/SolveTriangular.h263
-rw-r--r--extern/Eigen3/Eigen/src/Core/StableNorm.h190
-rw-r--r--extern/Eigen3/Eigen/src/Core/Stride.h119
-rw-r--r--extern/Eigen3/Eigen/src/Core/Swap.h (renamed from extern/Eigen2/Eigen/src/Core/Swap.h)85
-rw-r--r--extern/Eigen3/Eigen/src/Core/Transpose.h425
-rw-r--r--extern/Eigen3/Eigen/src/Core/Transpositions.h447
-rw-r--r--extern/Eigen3/Eigen/src/Core/TriangularMatrix.h838
-rw-r--r--extern/Eigen3/Eigen/src/Core/VectorBlock.h296
-rw-r--r--extern/Eigen3/Eigen/src/Core/VectorwiseOp.h557
-rw-r--r--extern/Eigen3/Eigen/src/Core/Visitor.h (renamed from extern/Eigen2/Eigen/src/Core/Visitor.h)114
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/AltiVec/Complex.h228
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h509
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/Default/Settings.h64
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h270
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h420
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/SSE/Complex.h447
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h395
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/SSE/PacketMath.h634
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h452
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h1285
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h439
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h225
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector.h559
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/Parallelizer.h154
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h427
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h278
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointProduct.h136
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h104
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h403
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector.h325
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h319
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularSolverVector.h150
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/BlasUtil.h271
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Constants.h439
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/DisableStupidWarnings.h42
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h307
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Macros.h418
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Memory.h911
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Meta.h229
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h14
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/StaticAssert.h (renamed from extern/Eigen2/Eigen/src/Core/util/StaticAssert.h)90
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/XprHelper.h460
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Block.h137
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Cwise.h (renamed from extern/Eigen2/Eigen/src/Core/Cwise.h)87
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/CwiseOperators.h309
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AlignedBox.h (renamed from extern/Eigen2/Eigen/src/Geometry/AlignedBox.h)35
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/All.h115
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AngleAxis.h (renamed from extern/Eigen2/Eigen/src/Geometry/AngleAxis.h)10
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h (renamed from extern/Eigen2/Eigen/src/Geometry/Hyperplane.h)29
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h (renamed from extern/Eigen2/Eigen/src/Geometry/ParametrizedLine.h)18
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h (renamed from extern/Eigen2/Eigen/src/Geometry/Quaternion.h)77
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h (renamed from extern/Eigen2/Eigen/src/Geometry/Rotation2D.h)10
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h (renamed from extern/Eigen2/Eigen/src/Geometry/RotationBase.h)5
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h (renamed from extern/Eigen2/Eigen/src/Geometry/Scaling.h)10
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h (renamed from extern/Eigen2/Eigen/src/Geometry/Transform.h)67
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h (renamed from extern/Eigen2/Eigen/src/Geometry/Translation.h)10
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/LU.h133
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Lazy.h82
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h (renamed from extern/Eigen2/Eigen/src/LeastSquares/LeastSquares.h)6
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Macros.h (renamed from extern/Eigen2/Eigen/src/Cholesky/CholeskyInstantiations.cpp)22
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/MathFunctions.h68
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Memory.h58
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Meta.h86
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/Minor.h (renamed from extern/Eigen2/Eigen/src/Core/Minor.h)48
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/QR.h79
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/SVD.h (renamed from extern/Eigen2/Eigen/src/SVD/SVD.h)49
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/TriangularSolver.h53
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/VectorBlock.h105
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h332
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur.h448
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h588
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/EigenvaluesCommon.h31
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h239
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h384
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h170
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h474
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h520
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h568
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/AlignedBox.h352
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/AngleAxis.h241
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/EulerAngles.h (renamed from extern/Eigen2/Eigen/src/Geometry/EulerAngles.h)34
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Homogeneous.h318
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Hyperplane.h280
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/OrthoMethods.h229
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/ParametrizedLine.h168
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Quaternion.h751
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Rotation2D.h165
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/RotationBase.h217
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Scaling.h182
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Transform.h1396
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Translation.h215
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Umeyama.h183
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/arch/Geometry_SSE.h126
-rw-r--r--extern/Eigen3/Eigen/src/Householder/BlockHouseholder.h79
-rw-r--r--extern/Eigen3/Eigen/src/Householder/Householder.h133
-rw-r--r--extern/Eigen3/Eigen/src/Householder/HouseholderSequence.h429
-rw-r--r--extern/Eigen3/Eigen/src/Jacobi/Jacobi.h430
-rw-r--r--extern/Eigen3/Eigen/src/LU/Determinant.h (renamed from extern/Eigen2/Eigen/src/LU/Determinant.h)76
-rw-r--r--extern/Eigen3/Eigen/src/LU/FullPivLU.h754
-rw-r--r--extern/Eigen3/Eigen/src/LU/Inverse.h407
-rw-r--r--extern/Eigen3/Eigen/src/LU/PartialPivLU.h509
-rw-r--r--extern/Eigen3/Eigen/src/LU/arch/Inverse_SSE.h340
-rw-r--r--extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h532
-rw-r--r--extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h546
-rw-r--r--extern/Eigen3/Eigen/src/QR/HouseholderQR.h355
-rw-r--r--extern/Eigen3/Eigen/src/SVD/JacobiSVD.h716
-rw-r--r--extern/Eigen3/Eigen/src/SVD/UpperBidiagonalization.h159
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/AmbiVector.h (renamed from extern/Eigen2/Eigen/src/Sparse/AmbiVector.h)136
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h (renamed from extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h)65
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/CoreIterators.h (renamed from extern/Eigen2/Eigen/src/Sparse/CoreIterators.h)25
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h (renamed from extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h)235
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h (renamed from extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h)118
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseAssign.h (renamed from extern/Eigen2/Eigen/src/Sparse/SparseAssign.h)0
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseBlock.h (renamed from extern/Eigen2/Eigen/src/Sparse/SparseBlock.h)359
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h375
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h146
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h231
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h195
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseDot.h (renamed from extern/Eigen2/Eigen/src/Sparse/SparseDot.h)36
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h (renamed from extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h)10
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h651
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h (renamed from extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h)428
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseProduct.h141
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseRedux.h56
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h454
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h401
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h68
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h100
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseUtil.h130
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseVector.h (renamed from extern/Eigen2/Eigen/src/Sparse/SparseVector.h)263
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseView.h109
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h339
-rw-r--r--extern/Eigen3/Eigen/src/StlSupport/StdDeque.h149
-rw-r--r--extern/Eigen3/Eigen/src/StlSupport/StdList.h129
-rw-r--r--extern/Eigen3/Eigen/src/StlSupport/StdVector.h (renamed from extern/Eigen2/Eigen/NewStdVector)127
-rw-r--r--extern/Eigen3/Eigen/src/StlSupport/details.h99
-rw-r--r--extern/Eigen3/Eigen/src/misc/Image.h95
-rw-r--r--extern/Eigen3/Eigen/src/misc/Kernel.h92
-rw-r--r--extern/Eigen3/Eigen/src/misc/Solve.h87
-rw-r--r--extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h143
-rw-r--r--extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h202
-rw-r--r--extern/Eigen3/Eigen/src/plugins/BlockMethods.h595
-rw-r--r--extern/Eigen3/Eigen/src/plugins/CommonCwiseBinaryOps.h61
-rw-r--r--extern/Eigen3/Eigen/src/plugins/CommonCwiseUnaryOps.h187
-rw-r--r--extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h120
-rw-r--r--extern/Eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h82
-rwxr-xr-xextern/Eigen3/eigen-update.sh (renamed from extern/Eigen2/eigen-update.sh)14
-rw-r--r--extern/SConscript4
-rw-r--r--extern/colamd/CMakeLists.txt41
-rw-r--r--extern/colamd/Doc/ChangeLog129
-rw-r--r--extern/colamd/Doc/lesser.txt504
-rw-r--r--extern/colamd/Include/UFconfig.h118
-rw-r--r--extern/colamd/Include/colamd.h255
-rw-r--r--extern/colamd/README.txt127
-rw-r--r--extern/colamd/SConscript14
-rw-r--r--extern/colamd/Source/colamd.c3611
-rw-r--r--extern/colamd/Source/colamd_global.c24
-rw-r--r--extern/libmv/CMakeLists.txt211
-rw-r--r--extern/libmv/ChangeLog312
-rw-r--r--extern/libmv/SConscript58
-rwxr-xr-xextern/libmv/bundle.sh247
-rw-r--r--extern/libmv/files.txt141
-rw-r--r--extern/libmv/libmv-capi.cpp707
-rw-r--r--extern/libmv/libmv-capi.h125
-rw-r--r--extern/libmv/libmv/base/id_generator.h37
-rw-r--r--extern/libmv/libmv/base/scoped_ptr.h60
-rw-r--r--extern/libmv/libmv/base/vector.h172
-rw-r--r--extern/libmv/libmv/base/vector_utils.h34
-rw-r--r--extern/libmv/libmv/image/array_nd.cc108
-rw-r--r--extern/libmv/libmv/image/array_nd.h473
-rw-r--r--extern/libmv/libmv/image/convolve.cc305
-rw-r--r--extern/libmv/libmv/image/convolve.h93
-rw-r--r--extern/libmv/libmv/image/image.h158
-rw-r--r--extern/libmv/libmv/image/sample.h103
-rw-r--r--extern/libmv/libmv/image/tuple.h90
-rw-r--r--extern/libmv/libmv/logging/logging.h31
-rw-r--r--extern/libmv/libmv/multiview/conditioning.cc99
-rw-r--r--extern/libmv/libmv/multiview/conditioning.h60
-rw-r--r--extern/libmv/libmv/multiview/euclidean_resection.cc661
-rw-r--r--extern/libmv/libmv/multiview/euclidean_resection.h124
-rw-r--r--extern/libmv/libmv/multiview/fundamental.cc391
-rw-r--r--extern/libmv/libmv/multiview/fundamental.h144
-rw-r--r--extern/libmv/libmv/multiview/nviewtriangulation.h80
-rw-r--r--extern/libmv/libmv/multiview/projection.cc221
-rw-r--r--extern/libmv/libmv/multiview/projection.h231
-rw-r--r--extern/libmv/libmv/multiview/resection.h62
-rw-r--r--extern/libmv/libmv/multiview/triangulation.cc49
-rw-r--r--extern/libmv/libmv/multiview/triangulation.h38
-rw-r--r--extern/libmv/libmv/numeric/dogleg.h261
-rw-r--r--extern/libmv/libmv/numeric/function_derivative.h107
-rw-r--r--extern/libmv/libmv/numeric/levenberg_marquardt.h183
-rw-r--r--extern/libmv/libmv/numeric/numeric.cc136
-rw-r--r--extern/libmv/libmv/numeric/numeric.h479
-rw-r--r--extern/libmv/libmv/numeric/poly.cc23
-rw-r--r--extern/libmv/libmv/numeric/poly.h123
-rw-r--r--extern/libmv/libmv/numeric/tinyvector.cc0
-rw-r--r--extern/libmv/libmv/simple_pipeline/bundle.cc184
-rw-r--r--extern/libmv/libmv/simple_pipeline/bundle.h72
-rw-r--r--extern/libmv/libmv/simple_pipeline/camera_intrinsics.cc345
-rw-r--r--extern/libmv/libmv/simple_pipeline/camera_intrinsics.h152
-rw-r--r--extern/libmv/libmv/simple_pipeline/detect.cc110
-rw-r--r--extern/libmv/libmv/simple_pipeline/detect.h72
-rw-r--r--extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc218
-rw-r--r--extern/libmv/libmv/simple_pipeline/initialize_reconstruction.h74
-rw-r--r--extern/libmv/libmv/simple_pipeline/intersect.cc205
-rw-r--r--extern/libmv/libmv/simple_pipeline/intersect.h77
-rw-r--r--extern/libmv/libmv/simple_pipeline/pipeline.cc317
-rw-r--r--extern/libmv/libmv/simple_pipeline/pipeline.h95
-rw-r--r--extern/libmv/libmv/simple_pipeline/reconstruction.cc191
-rw-r--r--extern/libmv/libmv/simple_pipeline/reconstruction.h217
-rw-r--r--extern/libmv/libmv/simple_pipeline/resect.cc271
-rw-r--r--extern/libmv/libmv/simple_pipeline/resect.h86
-rw-r--r--extern/libmv/libmv/simple_pipeline/tracks.cc159
-rw-r--r--extern/libmv/libmv/simple_pipeline/tracks.h119
-rw-r--r--extern/libmv/libmv/tracking/klt_region_tracker.cc132
-rw-r--r--extern/libmv/libmv/tracking/klt_region_tracker.h55
-rw-r--r--extern/libmv/libmv/tracking/pyramid_region_tracker.cc78
-rw-r--r--extern/libmv/libmv/tracking/pyramid_region_tracker.h46
-rw-r--r--extern/libmv/libmv/tracking/region_tracker.h48
-rw-r--r--extern/libmv/libmv/tracking/retrack_region_tracker.cc47
-rw-r--r--extern/libmv/libmv/tracking/retrack_region_tracker.h48
-rw-r--r--extern/libmv/libmv/tracking/sad.cc174
-rw-r--r--extern/libmv/libmv/tracking/sad.h109
-rw-r--r--extern/libmv/libmv/tracking/trklt_region_tracker.cc141
-rw-r--r--extern/libmv/libmv/tracking/trklt_region_tracker.h65
-rwxr-xr-xextern/libmv/mkfiles.sh4
-rw-r--r--extern/libmv/patches/bundle_tweaks.patch122
-rw-r--r--extern/libmv/patches/config_mac.patch13
-rw-r--r--extern/libmv/patches/fast.patch24
-rw-r--r--extern/libmv/patches/function_derivative.patch21
-rw-r--r--extern/libmv/patches/high_distortion_crash_fix.patch21
-rw-r--r--extern/libmv/patches/levenberg_marquardt.patch71
-rw-r--r--extern/libmv/patches/mingw.patch13
-rw-r--r--extern/libmv/patches/msvc2010.patch12
-rw-r--r--extern/libmv/patches/scaled_distortion.patch261
-rw-r--r--extern/libmv/patches/series11
-rw-r--r--extern/libmv/patches/snrptinf_fix.patch15
-rw-r--r--extern/libmv/patches/v3d_verbosity.patch12
-rw-r--r--extern/libmv/third_party/fast/LICENSE30
-rw-r--r--extern/libmv/third_party/fast/README31
-rw-r--r--extern/libmv/third_party/fast/README.libmv9
-rw-r--r--extern/libmv/third_party/fast/fast.c71
-rw-r--r--extern/libmv/third_party/fast/fast.h39
-rw-r--r--extern/libmv/third_party/fast/fast_10.c4666
-rw-r--r--extern/libmv/third_party/fast/fast_11.c3910
-rw-r--r--extern/libmv/third_party/fast/fast_12.c3134
-rw-r--r--extern/libmv/third_party/fast/fast_9.c5910
-rw-r--r--extern/libmv/third_party/fast/nonmax.c117
-rw-r--r--extern/libmv/third_party/gflags/README.libmv14
-rw-r--r--extern/libmv/third_party/gflags/config.h110
-rw-r--r--extern/libmv/third_party/gflags/gflags.cc1971
-rw-r--r--extern/libmv/third_party/gflags/gflags.h589
-rw-r--r--extern/libmv/third_party/gflags/gflags_completions.cc765
-rw-r--r--extern/libmv/third_party/gflags/gflags_completions.h121
-rw-r--r--extern/libmv/third_party/gflags/gflags_reporting.cc446
-rw-r--r--extern/libmv/third_party/gflags/mutex.h349
-rw-r--r--extern/libmv/third_party/glog/AUTHORS2
-rw-r--r--extern/libmv/third_party/glog/COPYING65
-rw-r--r--extern/libmv/third_party/glog/ChangeLog59
-rw-r--r--extern/libmv/third_party/glog/NEWS0
-rw-r--r--extern/libmv/third_party/glog/README5
-rw-r--r--extern/libmv/third_party/glog/README.libmv38
-rw-r--r--extern/libmv/third_party/glog/src/base/commandlineflags.h132
-rw-r--r--extern/libmv/third_party/glog/src/base/googleinit.h51
-rw-r--r--extern/libmv/third_party/glog/src/base/mutex.h325
-rw-r--r--extern/libmv/third_party/glog/src/config.h11
-rw-r--r--extern/libmv/third_party/glog/src/config_linux.h164
-rw-r--r--extern/libmv/third_party/glog/src/config_mac.h159
-rw-r--r--extern/libmv/third_party/glog/src/demangle.cc1231
-rw-r--r--extern/libmv/third_party/glog/src/demangle.h84
-rw-r--r--extern/libmv/third_party/glog/src/glog/log_severity.h84
-rw-r--r--extern/libmv/third_party/glog/src/glog/logging.h1507
-rw-r--r--extern/libmv/third_party/glog/src/glog/raw_logging.h185
-rw-r--r--extern/libmv/third_party/glog/src/glog/vlog_is_on.h129
-rw-r--r--extern/libmv/third_party/glog/src/logging.cc1783
-rw-r--r--extern/libmv/third_party/glog/src/raw_logging.cc172
-rw-r--r--extern/libmv/third_party/glog/src/signalhandler.cc348
-rw-r--r--extern/libmv/third_party/glog/src/stacktrace.h60
-rw-r--r--extern/libmv/third_party/glog/src/stacktrace_generic-inl.h59
-rw-r--r--extern/libmv/third_party/glog/src/stacktrace_libunwind-inl.h87
-rw-r--r--extern/libmv/third_party/glog/src/stacktrace_powerpc-inl.h130
-rw-r--r--extern/libmv/third_party/glog/src/stacktrace_x86-inl.h139
-rw-r--r--extern/libmv/third_party/glog/src/stacktrace_x86_64-inl.h105
-rw-r--r--extern/libmv/third_party/glog/src/symbolize.cc681
-rw-r--r--extern/libmv/third_party/glog/src/symbolize.h116
-rw-r--r--extern/libmv/third_party/glog/src/utilities.cc335
-rw-r--r--extern/libmv/third_party/glog/src/utilities.h222
-rw-r--r--extern/libmv/third_party/glog/src/vlog_is_on.cc249
-rw-r--r--extern/libmv/third_party/glog/src/windows/config.h136
-rw-r--r--extern/libmv/third_party/glog/src/windows/glog/log_severity.h88
-rw-r--r--extern/libmv/third_party/glog/src/windows/glog/logging.h1510
-rw-r--r--extern/libmv/third_party/glog/src/windows/glog/raw_logging.h189
-rw-r--r--extern/libmv/third_party/glog/src/windows/glog/vlog_is_on.h133
-rw-r--r--extern/libmv/third_party/glog/src/windows/port.cc64
-rw-r--r--extern/libmv/third_party/glog/src/windows/port.h149
-rwxr-xr-xextern/libmv/third_party/glog/src/windows/preprocess.sh118
-rw-r--r--extern/libmv/third_party/ldl/CMakeLists.txt5
-rw-r--r--extern/libmv/third_party/ldl/Doc/ChangeLog39
-rw-r--r--extern/libmv/third_party/ldl/Doc/lesser.txt504
-rw-r--r--extern/libmv/third_party/ldl/Include/ldl.h104
-rw-r--r--extern/libmv/third_party/ldl/README.libmv10
-rw-r--r--extern/libmv/third_party/ldl/README.txt136
-rw-r--r--extern/libmv/third_party/ldl/Source/ldl.c507
-rw-r--r--extern/libmv/third_party/msinttypes/README.libmv5
-rw-r--r--extern/libmv/third_party/msinttypes/inttypes.h305
-rw-r--r--extern/libmv/third_party/msinttypes/stdint.h247
-rw-r--r--extern/libmv/third_party/ssba/COPYING.TXT165
-rw-r--r--extern/libmv/third_party/ssba/Geometry/v3d_cameramatrix.h204
-rw-r--r--extern/libmv/third_party/ssba/Geometry/v3d_distortion.h97
-rw-r--r--extern/libmv/third_party/ssba/Geometry/v3d_metricbundle.cpp365
-rw-r--r--extern/libmv/third_party/ssba/Geometry/v3d_metricbundle.h346
-rw-r--r--extern/libmv/third_party/ssba/Math/v3d_linear.h923
-rw-r--r--extern/libmv/third_party/ssba/Math/v3d_linear_utils.h391
-rw-r--r--extern/libmv/third_party/ssba/Math/v3d_mathutilities.h59
-rw-r--r--extern/libmv/third_party/ssba/Math/v3d_optimization.cpp955
-rw-r--r--extern/libmv/third_party/ssba/Math/v3d_optimization.h273
-rw-r--r--extern/libmv/third_party/ssba/README.TXT92
-rwxr-xr-xextern/libmv/third_party/ssba/README.libmv23
465 files changed, 107671 insertions, 22625 deletions
diff --git a/extern/CMakeLists.txt b/extern/CMakeLists.txt
index 0386b3d71d8..36ddddf2325 100644
--- a/extern/CMakeLists.txt
+++ b/extern/CMakeLists.txt
@@ -27,6 +27,8 @@
# Otherwise we get warnings here that we cant fix in external projects
remove_strict_flags()
+add_subdirectory(colamd)
+
if(WITH_BULLET)
add_subdirectory(bullet2)
endif()
@@ -62,3 +64,7 @@ endif()
if(WITH_LZMA)
add_subdirectory(lzma)
endif()
+
+if(WITH_LIBMV)
+ add_subdirectory(libmv)
+endif()
diff --git a/extern/Eigen2/Eigen/Array b/extern/Eigen2/Eigen/Array
deleted file mode 100644
index c847f9521fe..00000000000
--- a/extern/Eigen2/Eigen/Array
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef EIGEN_ARRAY_MODULE_H
-#define EIGEN_ARRAY_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableMSVCWarnings.h"
-
-namespace Eigen {
-
-/** \defgroup Array_Module Array module
- * This module provides several handy features to manipulate matrices as simple array of values.
- * In addition to listed classes, it defines various methods of the Cwise interface
- * (accessible from MatrixBase::cwise()), including:
- * - matrix-scalar sum,
- * - coeff-wise comparison operators,
- * - sin, cos, sqrt, pow, exp, log, square, cube, inverse (reciprocal).
- *
- * This module also provides various MatrixBase methods, including:
- * - \ref MatrixBase::all() "all", \ref MatrixBase::any() "any",
- * - \ref MatrixBase::Random() "random matrix initialization"
- *
- * \code
- * #include <Eigen/Array>
- * \endcode
- */
-
-#include "src/Array/CwiseOperators.h"
-#include "src/Array/Functors.h"
-#include "src/Array/BooleanRedux.h"
-#include "src/Array/Select.h"
-#include "src/Array/PartialRedux.h"
-#include "src/Array/Random.h"
-#include "src/Array/Norms.h"
-
-} // namespace Eigen
-
-#include "src/Core/util/EnableMSVCWarnings.h"
-
-#endif // EIGEN_ARRAY_MODULE_H
diff --git a/extern/Eigen2/Eigen/Cholesky b/extern/Eigen2/Eigen/Cholesky
deleted file mode 100644
index f1806f726c7..00000000000
--- a/extern/Eigen2/Eigen/Cholesky
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef EIGEN_CHOLESKY_MODULE_H
-#define EIGEN_CHOLESKY_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableMSVCWarnings.h"
-
-// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module
-#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2)
- #ifndef EIGEN_HIDE_HEAVY_CODE
- #define EIGEN_HIDE_HEAVY_CODE
- #endif
-#elif defined EIGEN_HIDE_HEAVY_CODE
- #undef EIGEN_HIDE_HEAVY_CODE
-#endif
-
-namespace Eigen {
-
-/** \defgroup Cholesky_Module Cholesky module
- *
- * \nonstableyet
- *
- * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices.
- * Those decompositions are accessible via the following MatrixBase methods:
- * - MatrixBase::llt(),
- * - MatrixBase::ldlt()
- *
- * \code
- * #include <Eigen/Cholesky>
- * \endcode
- */
-
-#include "src/Array/CwiseOperators.h"
-#include "src/Array/Functors.h"
-#include "src/Cholesky/LLT.h"
-#include "src/Cholesky/LDLT.h"
-
-} // namespace Eigen
-
-#define EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \
- PREFIX template class LLT<MATRIXTYPE>; \
- PREFIX template class LDLT<MATRIXTYPE>
-
-#define EIGEN_CHOLESKY_MODULE_INSTANTIATE(PREFIX) \
- EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix2f,PREFIX); \
- EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix2d,PREFIX); \
- EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix3f,PREFIX); \
- EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix3d,PREFIX); \
- EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix4f,PREFIX); \
- EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix4d,PREFIX); \
- EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXf,PREFIX); \
- EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXd,PREFIX); \
- EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXcf,PREFIX); \
- EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXcd,PREFIX)
-
-#ifdef EIGEN_EXTERN_INSTANTIATIONS
-
-namespace Eigen {
- EIGEN_CHOLESKY_MODULE_INSTANTIATE(extern);
-} // namespace Eigen
-#endif
-
-#include "src/Core/util/EnableMSVCWarnings.h"
-
-#endif // EIGEN_CHOLESKY_MODULE_H
diff --git a/extern/Eigen2/Eigen/Core b/extern/Eigen2/Eigen/Core
deleted file mode 100644
index 060c60f1306..00000000000
--- a/extern/Eigen2/Eigen/Core
+++ /dev/null
@@ -1,155 +0,0 @@
-#ifndef EIGEN_CORE_H
-#define EIGEN_CORE_H
-
-// first thing Eigen does: prevent MSVC from committing suicide
-#include "src/Core/util/DisableMSVCWarnings.h"
-
-#ifdef _MSC_VER
- #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled
- #if (_MSC_VER >= 1500) // 2008 or later
- // Remember that usage of defined() in a #define is undefined by the standard.
- // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
- #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || defined(_M_X64)
- #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
- #endif
- #endif
-#endif
-
-#ifdef __GNUC__
- #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__>=x && __GNUC_MINOR__>=y) || __GNUC__>x)
-#else
- #define EIGEN_GNUC_AT_LEAST(x,y) 0
-#endif
-
-// Remember that usage of defined() in a #define is undefined by the standard
-#if (defined __SSE2__) && ( (!defined __GNUC__) || EIGEN_GNUC_AT_LEAST(4,2) )
- #define EIGEN_SSE2_BUT_NOT_OLD_GCC
-#endif
-
-#ifndef EIGEN_DONT_VECTORIZE
- #if defined (EIGEN_SSE2_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_SSE
- #include <emmintrin.h>
- #include <xmmintrin.h>
- #ifdef __SSE3__
- #include <pmmintrin.h>
- #endif
- #ifdef __SSSE3__
- #include <tmmintrin.h>
- #endif
- #elif defined __ALTIVEC__
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_ALTIVEC
- #include <altivec.h>
- // We need to #undef all these ugly tokens defined in <altivec.h>
- // => use __vector instead of vector
- #undef bool
- #undef vector
- #undef pixel
- #endif
-#endif
-
-#include <cstdlib>
-#include <cmath>
-#include <complex>
-#include <cassert>
-#include <functional>
-#include <iostream>
-#include <cstring>
-#include <string>
-#include <limits>
-#include <cstddef>
-
-#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(EIGEN_NO_EXCEPTIONS)
- #define EIGEN_EXCEPTIONS
-#endif
-
-#ifdef EIGEN_EXCEPTIONS
- #include <new>
-#endif
-
-// this needs to be done after all possible windows C header includes and before any Eigen source includes
-// (system C++ includes are supposed to be able to deal with this already):
-// windows.h defines min and max macros which would make Eigen fail to compile.
-#if defined(min) || defined(max)
-#error The preprocessor symbols 'min' or 'max' are defined. If you are compiling on Windows, do #define NOMINMAX to prevent windows.h from defining these symbols.
-#endif
-
-namespace Eigen {
-
-/** \defgroup Core_Module Core module
- * This is the main module of Eigen providing dense matrix and vector support
- * (both fixed and dynamic size) with all the features corresponding to a BLAS library
- * and much more...
- *
- * \code
- * #include <Eigen/Core>
- * \endcode
- */
-
-#include "src/Core/util/Macros.h"
-#include "src/Core/util/Constants.h"
-#include "src/Core/util/ForwardDeclarations.h"
-#include "src/Core/util/Meta.h"
-#include "src/Core/util/XprHelper.h"
-#include "src/Core/util/StaticAssert.h"
-#include "src/Core/util/Memory.h"
-
-#include "src/Core/NumTraits.h"
-#include "src/Core/MathFunctions.h"
-#include "src/Core/GenericPacketMath.h"
-
-#if defined EIGEN_VECTORIZE_SSE
- #include "src/Core/arch/SSE/PacketMath.h"
-#elif defined EIGEN_VECTORIZE_ALTIVEC
- #include "src/Core/arch/AltiVec/PacketMath.h"
-#endif
-
-#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16
-#endif
-
-#include "src/Core/Functors.h"
-#include "src/Core/MatrixBase.h"
-#include "src/Core/Coeffs.h"
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874
- // at least confirmed with Doxygen 1.5.5 and 1.5.6
- #include "src/Core/Assign.h"
-#endif
-
-#include "src/Core/MatrixStorage.h"
-#include "src/Core/NestByValue.h"
-#include "src/Core/Flagged.h"
-#include "src/Core/Matrix.h"
-#include "src/Core/Cwise.h"
-#include "src/Core/CwiseBinaryOp.h"
-#include "src/Core/CwiseUnaryOp.h"
-#include "src/Core/CwiseNullaryOp.h"
-#include "src/Core/Dot.h"
-#include "src/Core/Product.h"
-#include "src/Core/DiagonalProduct.h"
-#include "src/Core/SolveTriangular.h"
-#include "src/Core/MapBase.h"
-#include "src/Core/Map.h"
-#include "src/Core/Block.h"
-#include "src/Core/Minor.h"
-#include "src/Core/Transpose.h"
-#include "src/Core/DiagonalMatrix.h"
-#include "src/Core/DiagonalCoeffs.h"
-#include "src/Core/Sum.h"
-#include "src/Core/Redux.h"
-#include "src/Core/Visitor.h"
-#include "src/Core/Fuzzy.h"
-#include "src/Core/IO.h"
-#include "src/Core/Swap.h"
-#include "src/Core/CommaInitializer.h"
-#include "src/Core/Part.h"
-#include "src/Core/CacheFriendlyProduct.h"
-
-} // namespace Eigen
-
-#include "src/Core/util/EnableMSVCWarnings.h"
-
-#endif // EIGEN_CORE_H
diff --git a/extern/Eigen2/Eigen/Eigen b/extern/Eigen2/Eigen/Eigen
deleted file mode 100644
index 654c8dc6380..00000000000
--- a/extern/Eigen2/Eigen/Eigen
+++ /dev/null
@@ -1,2 +0,0 @@
-#include "Dense"
-#include "Sparse"
diff --git a/extern/Eigen2/Eigen/Geometry b/extern/Eigen2/Eigen/Geometry
deleted file mode 100644
index 617b25eb6f5..00000000000
--- a/extern/Eigen2/Eigen/Geometry
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef EIGEN_GEOMETRY_MODULE_H
-#define EIGEN_GEOMETRY_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableMSVCWarnings.h"
-
-#include "Array"
-#include <limits>
-
-#ifndef M_PI
-#define M_PI 3.14159265358979323846
-#endif
-
-namespace Eigen {
-
-/** \defgroup Geometry_Module Geometry module
- *
- * \nonstableyet
- *
- * This module provides support for:
- * - fixed-size homogeneous transformations
- * - translation, scaling, 2D and 3D rotations
- * - quaternions
- * - \ref MatrixBase::cross() "cross product"
- * - \ref MatrixBase::unitOrthogonal() "orthognal vector generation"
- * - some linear components: parametrized-lines and hyperplanes
- *
- * \code
- * #include <Eigen/Geometry>
- * \endcode
- */
-
-#include "src/Geometry/OrthoMethods.h"
-#include "src/Geometry/RotationBase.h"
-#include "src/Geometry/Rotation2D.h"
-#include "src/Geometry/Quaternion.h"
-#include "src/Geometry/AngleAxis.h"
-#include "src/Geometry/EulerAngles.h"
-#include "src/Geometry/Transform.h"
-#include "src/Geometry/Translation.h"
-#include "src/Geometry/Scaling.h"
-#include "src/Geometry/Hyperplane.h"
-#include "src/Geometry/ParametrizedLine.h"
-#include "src/Geometry/AlignedBox.h"
-
-} // namespace Eigen
-
-#include "src/Core/util/EnableMSVCWarnings.h"
-
-#endif // EIGEN_GEOMETRY_MODULE_H
diff --git a/extern/Eigen2/Eigen/LeastSquares b/extern/Eigen2/Eigen/LeastSquares
deleted file mode 100644
index 573a13cb42f..00000000000
--- a/extern/Eigen2/Eigen/LeastSquares
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef EIGEN_REGRESSION_MODULE_H
-#define EIGEN_REGRESSION_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableMSVCWarnings.h"
-
-#include "QR"
-#include "Geometry"
-
-namespace Eigen {
-
-/** \defgroup LeastSquares_Module LeastSquares module
- * This module provides linear regression and related features.
- *
- * \code
- * #include <Eigen/LeastSquares>
- * \endcode
- */
-
-#include "src/LeastSquares/LeastSquares.h"
-
-} // namespace Eigen
-
-#include "src/Core/util/EnableMSVCWarnings.h"
-
-#endif // EIGEN_REGRESSION_MODULE_H
diff --git a/extern/Eigen2/Eigen/QR b/extern/Eigen2/Eigen/QR
deleted file mode 100644
index 97907d1e50f..00000000000
--- a/extern/Eigen2/Eigen/QR
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef EIGEN_QR_MODULE_H
-#define EIGEN_QR_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableMSVCWarnings.h"
-
-#include "Cholesky"
-
-// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module
-#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2)
- #ifndef EIGEN_HIDE_HEAVY_CODE
- #define EIGEN_HIDE_HEAVY_CODE
- #endif
-#elif defined EIGEN_HIDE_HEAVY_CODE
- #undef EIGEN_HIDE_HEAVY_CODE
-#endif
-
-namespace Eigen {
-
-/** \defgroup QR_Module QR module
- *
- * \nonstableyet
- *
- * This module mainly provides QR decomposition and an eigen value solver.
- * This module also provides some MatrixBase methods, including:
- * - MatrixBase::qr(),
- * - MatrixBase::eigenvalues(),
- * - MatrixBase::operatorNorm()
- *
- * \code
- * #include <Eigen/QR>
- * \endcode
- */
-
-#include "src/QR/QR.h"
-#include "src/QR/Tridiagonalization.h"
-#include "src/QR/EigenSolver.h"
-#include "src/QR/SelfAdjointEigenSolver.h"
-#include "src/QR/HessenbergDecomposition.h"
-
-// declare all classes for a given matrix type
-#define EIGEN_QR_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \
- PREFIX template class QR<MATRIXTYPE>; \
- PREFIX template class Tridiagonalization<MATRIXTYPE>; \
- PREFIX template class HessenbergDecomposition<MATRIXTYPE>; \
- PREFIX template class SelfAdjointEigenSolver<MATRIXTYPE>
-
-// removed because it does not support complex yet
-// PREFIX template class EigenSolver<MATRIXTYPE>
-
-// declare all class for all types
-#define EIGEN_QR_MODULE_INSTANTIATE(PREFIX) \
- EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix2f,PREFIX); \
- EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix2d,PREFIX); \
- EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix3f,PREFIX); \
- EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix3d,PREFIX); \
- EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix4f,PREFIX); \
- EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix4d,PREFIX); \
- EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXf,PREFIX); \
- EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXd,PREFIX); \
- EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXcf,PREFIX); \
- EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXcd,PREFIX)
-
-#ifdef EIGEN_EXTERN_INSTANTIATIONS
- EIGEN_QR_MODULE_INSTANTIATE(extern);
-#endif // EIGEN_EXTERN_INSTANTIATIONS
-
-} // namespace Eigen
-
-#include "src/Core/util/EnableMSVCWarnings.h"
-
-#endif // EIGEN_QR_MODULE_H
diff --git a/extern/Eigen2/Eigen/QtAlignedMalloc b/extern/Eigen2/Eigen/QtAlignedMalloc
deleted file mode 100644
index fde227328fa..00000000000
--- a/extern/Eigen2/Eigen/QtAlignedMalloc
+++ /dev/null
@@ -1,29 +0,0 @@
-
-#ifndef EIGEN_QTMALLOC_MODULE_H
-#define EIGEN_QTMALLOC_MODULE_H
-
-#include "Core"
-
-#if (!EIGEN_MALLOC_ALREADY_ALIGNED)
-
-inline void *qMalloc(size_t size)
-{
- return Eigen::ei_aligned_malloc(size);
-}
-
-inline void qFree(void *ptr)
-{
- Eigen::ei_aligned_free(ptr);
-}
-
-inline void *qRealloc(void *ptr, size_t size)
-{
- void* newPtr = Eigen::ei_aligned_malloc(size);
- memcpy(newPtr, ptr, size);
- Eigen::ei_aligned_free(ptr);
- return newPtr;
-}
-
-#endif
-
-#endif // EIGEN_QTMALLOC_MODULE_H
diff --git a/extern/Eigen2/Eigen/Sparse b/extern/Eigen2/Eigen/Sparse
deleted file mode 100644
index 536c284549b..00000000000
--- a/extern/Eigen2/Eigen/Sparse
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef EIGEN_SPARSE_MODULE_H
-#define EIGEN_SPARSE_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableMSVCWarnings.h"
-
-#include <vector>
-#include <map>
-#include <cstdlib>
-#include <cstring>
-#include <algorithm>
-
-#ifdef EIGEN_GOOGLEHASH_SUPPORT
- #include <google/dense_hash_map>
-#endif
-
-#ifdef EIGEN_CHOLMOD_SUPPORT
- extern "C" {
- #include "cholmod.h"
- }
-#endif
-
-#ifdef EIGEN_TAUCS_SUPPORT
- // taucs.h declares a lot of mess
- #define isnan
- #define finite
- #define isinf
- extern "C" {
- #include "taucs.h"
- }
- #undef isnan
- #undef finite
- #undef isinf
-
- #ifdef min
- #undef min
- #endif
- #ifdef max
- #undef max
- #endif
- #ifdef complex
- #undef complex
- #endif
-#endif
-
-#ifdef EIGEN_SUPERLU_SUPPORT
- typedef int int_t;
- #include "superlu/slu_Cnames.h"
- #include "superlu/supermatrix.h"
- #include "superlu/slu_util.h"
-
- namespace SuperLU_S {
- #include "superlu/slu_sdefs.h"
- }
- namespace SuperLU_D {
- #include "superlu/slu_ddefs.h"
- }
- namespace SuperLU_C {
- #include "superlu/slu_cdefs.h"
- }
- namespace SuperLU_Z {
- #include "superlu/slu_zdefs.h"
- }
- namespace Eigen { struct SluMatrix; }
-#endif
-
-#ifdef EIGEN_UMFPACK_SUPPORT
- #include "umfpack.h"
-#endif
-
-namespace Eigen {
-
-/** \defgroup Sparse_Module Sparse module
- *
- * \nonstableyet
- *
- * See the \ref TutorialSparse "Sparse tutorial"
- *
- * \code
- * #include <Eigen/QR>
- * \endcode
- */
-
-#include "src/Sparse/SparseUtil.h"
-#include "src/Sparse/SparseMatrixBase.h"
-#include "src/Sparse/CompressedStorage.h"
-#include "src/Sparse/AmbiVector.h"
-#include "src/Sparse/RandomSetter.h"
-#include "src/Sparse/SparseBlock.h"
-#include "src/Sparse/SparseMatrix.h"
-#include "src/Sparse/DynamicSparseMatrix.h"
-#include "src/Sparse/MappedSparseMatrix.h"
-#include "src/Sparse/SparseVector.h"
-#include "src/Sparse/CoreIterators.h"
-#include "src/Sparse/SparseTranspose.h"
-#include "src/Sparse/SparseCwise.h"
-#include "src/Sparse/SparseCwiseUnaryOp.h"
-#include "src/Sparse/SparseCwiseBinaryOp.h"
-#include "src/Sparse/SparseDot.h"
-#include "src/Sparse/SparseAssign.h"
-#include "src/Sparse/SparseRedux.h"
-#include "src/Sparse/SparseFuzzy.h"
-#include "src/Sparse/SparseFlagged.h"
-#include "src/Sparse/SparseProduct.h"
-#include "src/Sparse/SparseDiagonalProduct.h"
-#include "src/Sparse/TriangularSolver.h"
-#include "src/Sparse/SparseLLT.h"
-#include "src/Sparse/SparseLDLT.h"
-#include "src/Sparse/SparseLU.h"
-
-#ifdef EIGEN_CHOLMOD_SUPPORT
-# include "src/Sparse/CholmodSupport.h"
-#endif
-
-#ifdef EIGEN_TAUCS_SUPPORT
-# include "src/Sparse/TaucsSupport.h"
-#endif
-
-#ifdef EIGEN_SUPERLU_SUPPORT
-# include "src/Sparse/SuperLUSupport.h"
-#endif
-
-#ifdef EIGEN_UMFPACK_SUPPORT
-# include "src/Sparse/UmfPackSupport.h"
-#endif
-
-} // namespace Eigen
-
-#include "src/Core/util/EnableMSVCWarnings.h"
-
-#endif // EIGEN_SPARSE_MODULE_H
diff --git a/extern/Eigen2/Eigen/StdVector b/extern/Eigen2/Eigen/StdVector
deleted file mode 100644
index c0744d6a0f3..00000000000
--- a/extern/Eigen2/Eigen/StdVector
+++ /dev/null
@@ -1,147 +0,0 @@
-#ifdef EIGEN_USE_NEW_STDVECTOR
-#include "NewStdVector"
-#else
-
-#ifndef EIGEN_STDVECTOR_MODULE_H
-#define EIGEN_STDVECTOR_MODULE_H
-
-#if defined(_GLIBCXX_VECTOR) || defined(_VECTOR_)
-#error you must include <Eigen/StdVector> before <vector>. Also note that <Eigen/Sparse> includes <vector>, so it must be included after <Eigen/StdVector> too.
-#endif
-
-#ifndef EIGEN_GNUC_AT_LEAST
-#ifdef __GNUC__
- #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__>=x && __GNUC_MINOR__>=y) || __GNUC__>x)
-#else
- #define EIGEN_GNUC_AT_LEAST(x,y) 0
-#endif
-#endif
-
-#define vector std_vector
-#include <vector>
-#undef vector
-
-namespace Eigen {
-
-template<typename T> class aligned_allocator;
-
-// meta programming to determine if a class has a given member
-struct ei_does_not_have_aligned_operator_new_marker_sizeof {int a[1];};
-struct ei_has_aligned_operator_new_marker_sizeof {int a[2];};
-
-template<typename ClassType>
-struct ei_has_aligned_operator_new {
- template<typename T>
- static ei_has_aligned_operator_new_marker_sizeof
- test(T const *, typename T::ei_operator_new_marker_type const * = 0);
- static ei_does_not_have_aligned_operator_new_marker_sizeof
- test(...);
-
- // note that the following indirection is needed for gcc-3.3
- enum {ret = sizeof(test(static_cast<ClassType*>(0)))
- == sizeof(ei_has_aligned_operator_new_marker_sizeof) };
-};
-
-#ifdef _MSC_VER
-
- // sometimes, MSVC detects, at compile time, that the argument x
- // in std::vector::resize(size_t s,T x) won't be aligned and generate an error
- // even if this function is never called. Whence this little wrapper.
- #define _EIGEN_WORKAROUND_MSVC_STD_VECTOR(T) Eigen::ei_workaround_msvc_std_vector<T>
- template<typename T> struct ei_workaround_msvc_std_vector : public T
- {
- inline ei_workaround_msvc_std_vector() : T() {}
- inline ei_workaround_msvc_std_vector(const T& other) : T(other) {}
- inline operator T& () { return *static_cast<T*>(this); }
- inline operator const T& () const { return *static_cast<const T*>(this); }
- template<typename OtherT>
- inline T& operator=(const OtherT& other)
- { T::operator=(other); return *this; }
- inline ei_workaround_msvc_std_vector& operator=(const ei_workaround_msvc_std_vector& other)
- { T::operator=(other); return *this; }
- };
-
-#else
-
- #define _EIGEN_WORKAROUND_MSVC_STD_VECTOR(T) T
-
-#endif
-
-}
-
-namespace std {
-
-#define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \
- public: \
- typedef T value_type; \
- typedef typename vector_base::allocator_type allocator_type; \
- typedef typename vector_base::size_type size_type; \
- typedef typename vector_base::iterator iterator; \
- explicit vector(const allocator_type& __a = allocator_type()) : vector_base(__a) {} \
- vector(const vector& c) : vector_base(c) {} \
- vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
- vector(iterator start, iterator end) : vector_base(start, end) {} \
- vector& operator=(const vector& __x) { \
- vector_base::operator=(__x); \
- return *this; \
- }
-
-template<typename T,
- typename AllocT = std::allocator<T>,
- bool HasAlignedNew = Eigen::ei_has_aligned_operator_new<T>::ret>
-class vector : public std::std_vector<T,AllocT>
-{
- typedef std_vector<T, AllocT> vector_base;
- EIGEN_STD_VECTOR_SPECIALIZATION_BODY
-};
-
-template<typename T,typename DummyAlloc>
-class vector<T,DummyAlloc,true>
- : public std::std_vector<_EIGEN_WORKAROUND_MSVC_STD_VECTOR(T),
- Eigen::aligned_allocator<_EIGEN_WORKAROUND_MSVC_STD_VECTOR(T)> >
-{
- typedef std_vector<_EIGEN_WORKAROUND_MSVC_STD_VECTOR(T),
- Eigen::aligned_allocator<_EIGEN_WORKAROUND_MSVC_STD_VECTOR(T)> > vector_base;
- EIGEN_STD_VECTOR_SPECIALIZATION_BODY
-
- void resize(size_type __new_size)
- { resize(__new_size, T()); }
-
- #if defined(_VECTOR_)
- // workaround MSVC std::vector implementation
- void resize(size_type __new_size, const value_type& __x)
- {
- if (vector_base::size() < __new_size)
- vector_base::_Insert_n(vector_base::end(), __new_size - vector_base::size(), __x);
- else if (__new_size < vector_base::size())
- vector_base::erase(vector_base::begin() + __new_size, vector_base::end());
- }
- #elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2)
- // workaround GCC std::vector implementation
- void resize(size_type __new_size, const value_type& __x)
- {
- if (__new_size < vector_base::size())
- vector_base::_M_erase_at_end(this->_M_impl._M_start + __new_size);
- else
- vector_base::insert(vector_base::end(), __new_size - vector_base::size(), __x);
- }
- #elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,1)
- void resize(size_type __new_size, const value_type& __x)
- {
- if (__new_size < vector_base::size())
- vector_base::erase(vector_base::begin() + __new_size, vector_base::end());
- else
- vector_base::insert(vector_base::end(), __new_size - vector_base::size(), __x);
- }
- #else
- // Before gcc-4.1 we already have: std::vector::resize(size_type,const T&),
- // so no need for a workaround !
- using vector_base::resize;
- #endif
-};
-
-}
-
-#endif // EIGEN_STDVECTOR_MODULE_H
-
-#endif // EIGEN_USE_NEW_STDVECTOR \ No newline at end of file
diff --git a/extern/Eigen2/Eigen/src/Array/CwiseOperators.h b/extern/Eigen2/Eigen/src/Array/CwiseOperators.h
deleted file mode 100644
index 4b6346daa51..00000000000
--- a/extern/Eigen2/Eigen/src/Array/CwiseOperators.h
+++ /dev/null
@@ -1,453 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_ARRAY_CWISE_OPERATORS_H
-#define EIGEN_ARRAY_CWISE_OPERATORS_H
-
-// -- unary operators --
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise square root of *this.
- *
- * Example: \include Cwise_sqrt.cpp
- * Output: \verbinclude Cwise_sqrt.out
- *
- * \sa pow(), square()
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op)
-Cwise<ExpressionType>::sqrt() const
-{
- return _expression();
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise exponential of *this.
- *
- * Example: \include Cwise_exp.cpp
- * Output: \verbinclude Cwise_exp.out
- *
- * \sa pow(), log(), sin(), cos()
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op)
-Cwise<ExpressionType>::exp() const
-{
- return _expression();
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise logarithm of *this.
- *
- * Example: \include Cwise_log.cpp
- * Output: \verbinclude Cwise_log.out
- *
- * \sa exp()
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op)
-Cwise<ExpressionType>::log() const
-{
- return _expression();
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise cosine of *this.
- *
- * Example: \include Cwise_cos.cpp
- * Output: \verbinclude Cwise_cos.out
- *
- * \sa sin(), exp()
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op)
-Cwise<ExpressionType>::cos() const
-{
- return _expression();
-}
-
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise sine of *this.
- *
- * Example: \include Cwise_sin.cpp
- * Output: \verbinclude Cwise_sin.out
- *
- * \sa cos(), exp()
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op)
-Cwise<ExpressionType>::sin() const
-{
- return _expression();
-}
-
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise power of *this to the given exponent.
- *
- * Example: \include Cwise_pow.cpp
- * Output: \verbinclude Cwise_pow.out
- *
- * \sa exp(), log()
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op)
-Cwise<ExpressionType>::pow(const Scalar& exponent) const
-{
- return EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op)(_expression(), ei_scalar_pow_op<Scalar>(exponent));
-}
-
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise inverse of *this.
- *
- * Example: \include Cwise_inverse.cpp
- * Output: \verbinclude Cwise_inverse.out
- *
- * \sa operator/(), operator*()
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op)
-Cwise<ExpressionType>::inverse() const
-{
- return _expression();
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise square of *this.
- *
- * Example: \include Cwise_square.cpp
- * Output: \verbinclude Cwise_square.out
- *
- * \sa operator/(), operator*(), abs2()
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op)
-Cwise<ExpressionType>::square() const
-{
- return _expression();
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise cube of *this.
- *
- * Example: \include Cwise_cube.cpp
- * Output: \verbinclude Cwise_cube.out
- *
- * \sa square(), pow()
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op)
-Cwise<ExpressionType>::cube() const
-{
- return _expression();
-}
-
-
-// -- binary operators --
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise \< operator of *this and \a other
- *
- * Example: \include Cwise_less.cpp
- * Output: \verbinclude Cwise_less.out
- *
- * \sa MatrixBase::all(), MatrixBase::any(), operator>(), operator<=()
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)
-Cwise<ExpressionType>::operator<(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)(_expression(), other.derived());
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise \<= operator of *this and \a other
- *
- * Example: \include Cwise_less_equal.cpp
- * Output: \verbinclude Cwise_less_equal.out
- *
- * \sa MatrixBase::all(), MatrixBase::any(), operator>=(), operator<()
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)
-Cwise<ExpressionType>::operator<=(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)(_expression(), other.derived());
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise \> operator of *this and \a other
- *
- * Example: \include Cwise_greater.cpp
- * Output: \verbinclude Cwise_greater.out
- *
- * \sa MatrixBase::all(), MatrixBase::any(), operator>=(), operator<()
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)
-Cwise<ExpressionType>::operator>(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)(_expression(), other.derived());
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise \>= operator of *this and \a other
- *
- * Example: \include Cwise_greater_equal.cpp
- * Output: \verbinclude Cwise_greater_equal.out
- *
- * \sa MatrixBase::all(), MatrixBase::any(), operator>(), operator<=()
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)
-Cwise<ExpressionType>::operator>=(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)(_expression(), other.derived());
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise == operator of *this and \a other
- *
- * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
- * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
- * generally a far better idea to use a fuzzy comparison as provided by MatrixBase::isApprox() and
- * MatrixBase::isMuchSmallerThan().
- *
- * Example: \include Cwise_equal_equal.cpp
- * Output: \verbinclude Cwise_equal_equal.out
- *
- * \sa MatrixBase::all(), MatrixBase::any(), MatrixBase::isApprox(), MatrixBase::isMuchSmallerThan()
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)
-Cwise<ExpressionType>::operator==(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)(_expression(), other.derived());
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise != operator of *this and \a other
- *
- * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
- * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
- * generally a far better idea to use a fuzzy comparison as provided by MatrixBase::isApprox() and
- * MatrixBase::isMuchSmallerThan().
- *
- * Example: \include Cwise_not_equal.cpp
- * Output: \verbinclude Cwise_not_equal.out
- *
- * \sa MatrixBase::all(), MatrixBase::any(), MatrixBase::isApprox(), MatrixBase::isMuchSmallerThan()
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)
-Cwise<ExpressionType>::operator!=(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)(_expression(), other.derived());
-}
-
-// comparisons to scalar value
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise \< operator of *this and a scalar \a s
- *
- * \sa operator<(const MatrixBase<OtherDerived> &) const
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less)
-Cwise<ExpressionType>::operator<(Scalar s) const
-{
- return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less)(_expression(),
- typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise \<= operator of *this and a scalar \a s
- *
- * \sa operator<=(const MatrixBase<OtherDerived> &) const
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal)
-Cwise<ExpressionType>::operator<=(Scalar s) const
-{
- return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal)(_expression(),
- typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise \> operator of *this and a scalar \a s
- *
- * \sa operator>(const MatrixBase<OtherDerived> &) const
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater)
-Cwise<ExpressionType>::operator>(Scalar s) const
-{
- return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater)(_expression(),
- typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise \>= operator of *this and a scalar \a s
- *
- * \sa operator>=(const MatrixBase<OtherDerived> &) const
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal)
-Cwise<ExpressionType>::operator>=(Scalar s) const
-{
- return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal)(_expression(),
- typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise == operator of *this and a scalar \a s
- *
- * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
- * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
- * generally a far better idea to use a fuzzy comparison as provided by MatrixBase::isApprox() and
- * MatrixBase::isMuchSmallerThan().
- *
- * \sa operator==(const MatrixBase<OtherDerived> &) const
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to)
-Cwise<ExpressionType>::operator==(Scalar s) const
-{
- return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to)(_expression(),
- typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
-}
-
-/** \array_module
- *
- * \returns an expression of the coefficient-wise != operator of *this and a scalar \a s
- *
- * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
- * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
- * generally a far better idea to use a fuzzy comparison as provided by MatrixBase::isApprox() and
- * MatrixBase::isMuchSmallerThan().
- *
- * \sa operator!=(const MatrixBase<OtherDerived> &) const
- */
-template<typename ExpressionType>
-inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to)
-Cwise<ExpressionType>::operator!=(Scalar s) const
-{
- return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to)(_expression(),
- typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
-}
-
-// scalar addition
-
-/** \array_module
- *
- * \returns an expression of \c *this with each coeff incremented by the constant \a scalar
- *
- * Example: \include Cwise_plus.cpp
- * Output: \verbinclude Cwise_plus.out
- *
- * \sa operator+=(), operator-()
- */
-template<typename ExpressionType>
-inline const typename Cwise<ExpressionType>::ScalarAddReturnType
-Cwise<ExpressionType>::operator+(const Scalar& scalar) const
-{
- return typename Cwise<ExpressionType>::ScalarAddReturnType(m_matrix, ei_scalar_add_op<Scalar>(scalar));
-}
-
-/** \array_module
- *
- * Adds the given \a scalar to each coeff of this expression.
- *
- * Example: \include Cwise_plus_equal.cpp
- * Output: \verbinclude Cwise_plus_equal.out
- *
- * \sa operator+(), operator-=()
- */
-template<typename ExpressionType>
-inline ExpressionType& Cwise<ExpressionType>::operator+=(const Scalar& scalar)
-{
- return m_matrix.const_cast_derived() = *this + scalar;
-}
-
-/** \array_module
- *
- * \returns an expression of \c *this with each coeff decremented by the constant \a scalar
- *
- * Example: \include Cwise_minus.cpp
- * Output: \verbinclude Cwise_minus.out
- *
- * \sa operator+(), operator-=()
- */
-template<typename ExpressionType>
-inline const typename Cwise<ExpressionType>::ScalarAddReturnType
-Cwise<ExpressionType>::operator-(const Scalar& scalar) const
-{
- return *this + (-scalar);
-}
-
-/** \array_module
- *
- * Substracts the given \a scalar from each coeff of this expression.
- *
- * Example: \include Cwise_minus_equal.cpp
- * Output: \verbinclude Cwise_minus_equal.out
- *
- * \sa operator+=(), operator-()
- */
-
-template<typename ExpressionType>
-inline ExpressionType& Cwise<ExpressionType>::operator-=(const Scalar& scalar)
-{
- return m_matrix.const_cast_derived() = *this - scalar;
-}
-
-#endif // EIGEN_ARRAY_CWISE_OPERATORS_H
diff --git a/extern/Eigen2/Eigen/src/Array/Functors.h b/extern/Eigen2/Eigen/src/Array/Functors.h
deleted file mode 100644
index c2c325a788e..00000000000
--- a/extern/Eigen2/Eigen/src/Array/Functors.h
+++ /dev/null
@@ -1,309 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_ARRAY_FUNCTORS_H
-#define EIGEN_ARRAY_FUNCTORS_H
-
-/** \internal
- * \array_module
- *
- * \brief Template functor to add a scalar to a fixed other one
- *
- * \sa class CwiseUnaryOp, Array::operator+
- */
-/* If you wonder why doing the ei_pset1() in packetOp() is an optimization check ei_scalar_multiple_op */
-template<typename Scalar>
-struct ei_scalar_add_op {
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
- // FIXME default copy constructors seems bugged with std::complex<>
- inline ei_scalar_add_op(const ei_scalar_add_op& other) : m_other(other.m_other) { }
- inline ei_scalar_add_op(const Scalar& other) : m_other(other) { }
- inline Scalar operator() (const Scalar& a) const { return a + m_other; }
- inline const PacketScalar packetOp(const PacketScalar& a) const
- { return ei_padd(a, ei_pset1(m_other)); }
- const Scalar m_other;
-private:
- ei_scalar_add_op& operator=(const ei_scalar_add_op&);
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_add_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = ei_packet_traits<Scalar>::size>1 }; };
-
-/** \internal
- *
- * \array_module
- *
- * \brief Template functor to compute the square root of a scalar
- *
- * \sa class CwiseUnaryOp, Cwise::sqrt()
- */
-template<typename Scalar> struct ei_scalar_sqrt_op EIGEN_EMPTY_STRUCT {
- inline const Scalar operator() (const Scalar& a) const { return ei_sqrt(a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_sqrt_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
-
-/** \internal
- *
- * \array_module
- *
- * \brief Template functor to compute the exponential of a scalar
- *
- * \sa class CwiseUnaryOp, Cwise::exp()
- */
-template<typename Scalar> struct ei_scalar_exp_op EIGEN_EMPTY_STRUCT {
- inline const Scalar operator() (const Scalar& a) const { return ei_exp(a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_exp_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
-
-/** \internal
- *
- * \array_module
- *
- * \brief Template functor to compute the logarithm of a scalar
- *
- * \sa class CwiseUnaryOp, Cwise::log()
- */
-template<typename Scalar> struct ei_scalar_log_op EIGEN_EMPTY_STRUCT {
- inline const Scalar operator() (const Scalar& a) const { return ei_log(a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_log_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
-
-/** \internal
- *
- * \array_module
- *
- * \brief Template functor to compute the cosine of a scalar
- *
- * \sa class CwiseUnaryOp, Cwise::cos()
- */
-template<typename Scalar> struct ei_scalar_cos_op EIGEN_EMPTY_STRUCT {
- inline const Scalar operator() (const Scalar& a) const { return ei_cos(a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_cos_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
-
-/** \internal
- *
- * \array_module
- *
- * \brief Template functor to compute the sine of a scalar
- *
- * \sa class CwiseUnaryOp, Cwise::sin()
- */
-template<typename Scalar> struct ei_scalar_sin_op EIGEN_EMPTY_STRUCT {
- inline const Scalar operator() (const Scalar& a) const { return ei_sin(a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_sin_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
-
-/** \internal
- *
- * \array_module
- *
- * \brief Template functor to raise a scalar to a power
- *
- * \sa class CwiseUnaryOp, Cwise::pow
- */
-template<typename Scalar>
-struct ei_scalar_pow_op {
- // FIXME default copy constructors seems bugged with std::complex<>
- inline ei_scalar_pow_op(const ei_scalar_pow_op& other) : m_exponent(other.m_exponent) { }
- inline ei_scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {}
- inline Scalar operator() (const Scalar& a) const { return ei_pow(a, m_exponent); }
- const Scalar m_exponent;
-private:
- ei_scalar_pow_op& operator=(const ei_scalar_pow_op&);
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_pow_op<Scalar> >
-{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
-
-/** \internal
- *
- * \array_module
- *
- * \brief Template functor to compute the inverse of a scalar
- *
- * \sa class CwiseUnaryOp, Cwise::inverse()
- */
-template<typename Scalar>
-struct ei_scalar_inverse_op {
- inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; }
- template<typename PacketScalar>
- inline const PacketScalar packetOp(const PacketScalar& a) const
- { return ei_pdiv(ei_pset1(Scalar(1)),a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_inverse_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = int(ei_packet_traits<Scalar>::size)>1 }; };
-
-/** \internal
- *
- * \array_module
- *
- * \brief Template functor to compute the square of a scalar
- *
- * \sa class CwiseUnaryOp, Cwise::square()
- */
-template<typename Scalar>
-struct ei_scalar_square_op {
- inline Scalar operator() (const Scalar& a) const { return a*a; }
- template<typename PacketScalar>
- inline const PacketScalar packetOp(const PacketScalar& a) const
- { return ei_pmul(a,a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_square_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = int(ei_packet_traits<Scalar>::size)>1 }; };
-
-/** \internal
- *
- * \array_module
- *
- * \brief Template functor to compute the cube of a scalar
- *
- * \sa class CwiseUnaryOp, Cwise::cube()
- */
-template<typename Scalar>
-struct ei_scalar_cube_op {
- inline Scalar operator() (const Scalar& a) const { return a*a*a; }
- template<typename PacketScalar>
- inline const PacketScalar packetOp(const PacketScalar& a) const
- { return ei_pmul(a,ei_pmul(a,a)); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_cube_op<Scalar> >
-{ enum { Cost = 2*NumTraits<Scalar>::MulCost, PacketAccess = int(ei_packet_traits<Scalar>::size)>1 }; };
-
-// default ei_functor_traits for STL functors:
-
-template<typename T>
-struct ei_functor_traits<std::multiplies<T> >
-{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::divides<T> >
-{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::plus<T> >
-{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::minus<T> >
-{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::negate<T> >
-{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::logical_or<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::logical_and<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::logical_not<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::greater<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::less<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::greater_equal<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::less_equal<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::equal_to<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::not_equal_to<T> >
-{ enum { Cost = 1, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::binder2nd<T> >
-{ enum { Cost = ei_functor_traits<T>::Cost, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::binder1st<T> >
-{ enum { Cost = ei_functor_traits<T>::Cost, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::unary_negate<T> >
-{ enum { Cost = 1 + ei_functor_traits<T>::Cost, PacketAccess = false }; };
-
-template<typename T>
-struct ei_functor_traits<std::binary_negate<T> >
-{ enum { Cost = 1 + ei_functor_traits<T>::Cost, PacketAccess = false }; };
-
-#ifdef EIGEN_STDEXT_SUPPORT
-
-template<typename T0,typename T1>
-struct ei_functor_traits<std::project1st<T0,T1> >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-template<typename T0,typename T1>
-struct ei_functor_traits<std::project2nd<T0,T1> >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-template<typename T0,typename T1>
-struct ei_functor_traits<std::select2nd<std::pair<T0,T1> > >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-template<typename T0,typename T1>
-struct ei_functor_traits<std::select1st<std::pair<T0,T1> > >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-template<typename T0,typename T1>
-struct ei_functor_traits<std::unary_compose<T0,T1> >
-{ enum { Cost = ei_functor_traits<T0>::Cost + ei_functor_traits<T1>::Cost, PacketAccess = false }; };
-
-template<typename T0,typename T1,typename T2>
-struct ei_functor_traits<std::binary_compose<T0,T1,T2> >
-{ enum { Cost = ei_functor_traits<T0>::Cost + ei_functor_traits<T1>::Cost + ei_functor_traits<T2>::Cost, PacketAccess = false }; };
-
-#endif // EIGEN_STDEXT_SUPPORT
-
-#endif // EIGEN_ARRAY_FUNCTORS_H
diff --git a/extern/Eigen2/Eigen/src/Array/Norms.h b/extern/Eigen2/Eigen/src/Array/Norms.h
deleted file mode 100644
index 6b92e6a099d..00000000000
--- a/extern/Eigen2/Eigen/src/Array/Norms.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_ARRAY_NORMS_H
-#define EIGEN_ARRAY_NORMS_H
-
-template<typename Derived, int p>
-struct ei_lpNorm_selector
-{
- typedef typename NumTraits<typename ei_traits<Derived>::Scalar>::Real RealScalar;
- inline static RealScalar run(const MatrixBase<Derived>& m)
- {
- return ei_pow(m.cwise().abs().cwise().pow(p).sum(), RealScalar(1)/p);
- }
-};
-
-template<typename Derived>
-struct ei_lpNorm_selector<Derived, 1>
-{
- inline static typename NumTraits<typename ei_traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
- {
- return m.cwise().abs().sum();
- }
-};
-
-template<typename Derived>
-struct ei_lpNorm_selector<Derived, 2>
-{
- inline static typename NumTraits<typename ei_traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
- {
- return m.norm();
- }
-};
-
-template<typename Derived>
-struct ei_lpNorm_selector<Derived, Infinity>
-{
- inline static typename NumTraits<typename ei_traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
- {
- return m.cwise().abs().maxCoeff();
- }
-};
-
-/** \array_module
- *
- * \returns the \f$ \ell^p \f$ norm of *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values
- * of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^p\infty \f$
- * norm, that is the maximum of the absolute values of the coefficients of *this.
- *
- * \sa norm()
- */
-template<typename Derived>
-template<int p>
-inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<Derived>::lpNorm() const
-{
- return ei_lpNorm_selector<Derived, p>::run(*this);
-}
-
-#endif // EIGEN_ARRAY_NORMS_H
diff --git a/extern/Eigen2/Eigen/src/Array/PartialRedux.h b/extern/Eigen2/Eigen/src/Array/PartialRedux.h
deleted file mode 100644
index 3a052ca8a3d..00000000000
--- a/extern/Eigen2/Eigen/src/Array/PartialRedux.h
+++ /dev/null
@@ -1,347 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_PARTIAL_REDUX_H
-#define EIGEN_PARTIAL_REDUX_H
-
-/** \array_module \ingroup Array
- *
- * \class PartialReduxExpr
- *
- * \brief Generic expression of a partially reduxed matrix
- *
- * \param MatrixType the type of the matrix we are applying the redux operation
- * \param MemberOp type of the member functor
- * \param Direction indicates the direction of the redux (Vertical or Horizontal)
- *
- * This class represents an expression of a partial redux operator of a matrix.
- * It is the return type of PartialRedux functions,
- * and most of the time this is the only way it is used.
- *
- * \sa class PartialRedux
- */
-
-template< typename MatrixType, typename MemberOp, int Direction>
-class PartialReduxExpr;
-
-template<typename MatrixType, typename MemberOp, int Direction>
-struct ei_traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
-{
- typedef typename MemberOp::result_type Scalar;
- typedef typename MatrixType::Scalar InputScalar;
- typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
- typedef typename ei_cleantype<MatrixTypeNested>::type _MatrixTypeNested;
- enum {
- RowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::RowsAtCompileTime,
- ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime,
- MaxRowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime,
- Flags = (unsigned int)_MatrixTypeNested::Flags & HereditaryBits,
- TraversalSize = Direction==Vertical ? RowsAtCompileTime : ColsAtCompileTime
- };
- #if EIGEN_GNUC_AT_LEAST(3,4)
- typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
- #else
- typedef typename MemberOp::template Cost<InputScalar,TraversalSize> CostOpType;
- #endif
- enum {
- CoeffReadCost = TraversalSize * ei_traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value)
- };
-};
-
-template< typename MatrixType, typename MemberOp, int Direction>
-class PartialReduxExpr : ei_no_assignment_operator,
- public MatrixBase<PartialReduxExpr<MatrixType, MemberOp, Direction> >
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(PartialReduxExpr)
- typedef typename ei_traits<PartialReduxExpr>::MatrixTypeNested MatrixTypeNested;
- typedef typename ei_traits<PartialReduxExpr>::_MatrixTypeNested _MatrixTypeNested;
-
- PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp())
- : m_matrix(mat), m_functor(func) {}
-
- int rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); }
- int cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
-
- const Scalar coeff(int i, int j) const
- {
- if (Direction==Vertical)
- return m_functor(m_matrix.col(j));
- else
- return m_functor(m_matrix.row(i));
- }
-
- protected:
- const MatrixTypeNested m_matrix;
- const MemberOp m_functor;
-};
-
-#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \
- template <typename ResultType> \
- struct ei_member_##MEMBER EIGEN_EMPTY_STRUCT { \
- typedef ResultType result_type; \
- template<typename Scalar, int Size> struct Cost \
- { enum { value = COST }; }; \
- template<typename Derived> \
- inline ResultType operator()(const MatrixBase<Derived>& mat) const \
- { return mat.MEMBER(); } \
- }
-
-EIGEN_MEMBER_FUNCTOR(squaredNorm, Size * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
-EIGEN_MEMBER_FUNCTOR(norm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
-EIGEN_MEMBER_FUNCTOR(sum, (Size-1)*NumTraits<Scalar>::AddCost);
-EIGEN_MEMBER_FUNCTOR(minCoeff, (Size-1)*NumTraits<Scalar>::AddCost);
-EIGEN_MEMBER_FUNCTOR(maxCoeff, (Size-1)*NumTraits<Scalar>::AddCost);
-EIGEN_MEMBER_FUNCTOR(all, (Size-1)*NumTraits<Scalar>::AddCost);
-EIGEN_MEMBER_FUNCTOR(any, (Size-1)*NumTraits<Scalar>::AddCost);
-EIGEN_MEMBER_FUNCTOR(count, (Size-1)*NumTraits<Scalar>::AddCost);
-
-/** \internal */
-template <typename BinaryOp, typename Scalar>
-struct ei_member_redux {
- typedef typename ei_result_of<
- BinaryOp(Scalar)
- >::type result_type;
- template<typename _Scalar, int Size> struct Cost
- { enum { value = (Size-1) * ei_functor_traits<BinaryOp>::Cost }; };
- ei_member_redux(const BinaryOp func) : m_functor(func) {}
- template<typename Derived>
- inline result_type operator()(const MatrixBase<Derived>& mat) const
- { return mat.redux(m_functor); }
- const BinaryOp m_functor;
-private:
- ei_member_redux& operator=(const ei_member_redux&);
-};
-
-/** \array_module \ingroup Array
- *
- * \class PartialRedux
- *
- * \brief Pseudo expression providing partial reduction operations
- *
- * \param ExpressionType the type of the object on which to do partial reductions
- * \param Direction indicates the direction of the redux (Vertical or Horizontal)
- *
- * This class represents a pseudo expression with partial reduction features.
- * It is the return type of MatrixBase::colwise() and MatrixBase::rowwise()
- * and most of the time this is the only way it is used.
- *
- * Example: \include MatrixBase_colwise.cpp
- * Output: \verbinclude MatrixBase_colwise.out
- *
- * \sa MatrixBase::colwise(), MatrixBase::rowwise(), class PartialReduxExpr
- */
-template<typename ExpressionType, int Direction> class PartialRedux
-{
- public:
-
- typedef typename ei_traits<ExpressionType>::Scalar Scalar;
- typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
- ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
-
- template<template<typename _Scalar> class Functor> struct ReturnType
- {
- typedef PartialReduxExpr<ExpressionType,
- Functor<typename ei_traits<ExpressionType>::Scalar>,
- Direction
- > Type;
- };
-
- template<typename BinaryOp> struct ReduxReturnType
- {
- typedef PartialReduxExpr<ExpressionType,
- ei_member_redux<BinaryOp,typename ei_traits<ExpressionType>::Scalar>,
- Direction
- > Type;
- };
-
- typedef typename ExpressionType::PlainMatrixType CrossReturnType;
-
- inline PartialRedux(const ExpressionType& matrix) : m_matrix(matrix) {}
-
- /** \internal */
- inline const ExpressionType& _expression() const { return m_matrix; }
-
- template<typename BinaryOp>
- const typename ReduxReturnType<BinaryOp>::Type
- redux(const BinaryOp& func = BinaryOp()) const;
-
- /** \returns a row (or column) vector expression of the smallest coefficient
- * of each column (or row) of the referenced expression.
- *
- * Example: \include PartialRedux_minCoeff.cpp
- * Output: \verbinclude PartialRedux_minCoeff.out
- *
- * \sa MatrixBase::minCoeff() */
- const typename ReturnType<ei_member_minCoeff>::Type minCoeff() const
- { return _expression(); }
-
- /** \returns a row (or column) vector expression of the largest coefficient
- * of each column (or row) of the referenced expression.
- *
- * Example: \include PartialRedux_maxCoeff.cpp
- * Output: \verbinclude PartialRedux_maxCoeff.out
- *
- * \sa MatrixBase::maxCoeff() */
- const typename ReturnType<ei_member_maxCoeff>::Type maxCoeff() const
- { return _expression(); }
-
- /** \returns a row (or column) vector expression of the squared norm
- * of each column (or row) of the referenced expression.
- *
- * Example: \include PartialRedux_squaredNorm.cpp
- * Output: \verbinclude PartialRedux_squaredNorm.out
- *
- * \sa MatrixBase::squaredNorm() */
- const typename ReturnType<ei_member_squaredNorm>::Type squaredNorm() const
- { return _expression(); }
-
- /** \returns a row (or column) vector expression of the norm
- * of each column (or row) of the referenced expression.
- *
- * Example: \include PartialRedux_norm.cpp
- * Output: \verbinclude PartialRedux_norm.out
- *
- * \sa MatrixBase::norm() */
- const typename ReturnType<ei_member_norm>::Type norm() const
- { return _expression(); }
-
- /** \returns a row (or column) vector expression of the sum
- * of each column (or row) of the referenced expression.
- *
- * Example: \include PartialRedux_sum.cpp
- * Output: \verbinclude PartialRedux_sum.out
- *
- * \sa MatrixBase::sum() */
- const typename ReturnType<ei_member_sum>::Type sum() const
- { return _expression(); }
-
- /** \returns a row (or column) vector expression representing
- * whether \b all coefficients of each respective column (or row) are \c true.
- *
- * \sa MatrixBase::all() */
- const typename ReturnType<ei_member_all>::Type all() const
- { return _expression(); }
-
- /** \returns a row (or column) vector expression representing
- * whether \b at \b least one coefficient of each respective column (or row) is \c true.
- *
- * \sa MatrixBase::any() */
- const typename ReturnType<ei_member_any>::Type any() const
- { return _expression(); }
-
- /** \returns a row (or column) vector expression representing
- * the number of \c true coefficients of each respective column (or row).
- *
- * Example: \include PartialRedux_count.cpp
- * Output: \verbinclude PartialRedux_count.out
- *
- * \sa MatrixBase::count() */
- const PartialReduxExpr<ExpressionType, ei_member_count<int>, Direction> count() const
- { return _expression(); }
-
- /** \returns a 3x3 matrix expression of the cross product
- * of each column or row of the referenced expression with the \a other vector.
- *
- * \geometry_module
- *
- * \sa MatrixBase::cross() */
- template<typename OtherDerived>
- const CrossReturnType cross(const MatrixBase<OtherDerived>& other) const
- {
- EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(CrossReturnType,3,3)
- EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3)
- EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename OtherDerived::Scalar>::ret),
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-
- if(Direction==Vertical)
- return (CrossReturnType()
- << _expression().col(0).cross(other),
- _expression().col(1).cross(other),
- _expression().col(2).cross(other)).finished();
- else
- return (CrossReturnType()
- << _expression().row(0).cross(other),
- _expression().row(1).cross(other),
- _expression().row(2).cross(other)).finished();
- }
-
- protected:
- ExpressionTypeNested m_matrix;
-
- private:
- PartialRedux& operator=(const PartialRedux&);
-};
-
-/** \array_module
- *
- * \returns a PartialRedux wrapper of *this providing additional partial reduction operations
- *
- * Example: \include MatrixBase_colwise.cpp
- * Output: \verbinclude MatrixBase_colwise.out
- *
- * \sa rowwise(), class PartialRedux
- */
-template<typename Derived>
-inline const PartialRedux<Derived,Vertical>
-MatrixBase<Derived>::colwise() const
-{
- return derived();
-}
-
-/** \array_module
- *
- * \returns a PartialRedux wrapper of *this providing additional partial reduction operations
- *
- * Example: \include MatrixBase_rowwise.cpp
- * Output: \verbinclude MatrixBase_rowwise.out
- *
- * \sa colwise(), class PartialRedux
- */
-template<typename Derived>
-inline const PartialRedux<Derived,Horizontal>
-MatrixBase<Derived>::rowwise() const
-{
- return derived();
-}
-
-/** \returns a row or column vector expression of \c *this reduxed by \a func
- *
- * The template parameter \a BinaryOp is the type of the functor
- * of the custom redux operator. Note that func must be an associative operator.
- *
- * \sa class PartialRedux, MatrixBase::colwise(), MatrixBase::rowwise()
- */
-template<typename ExpressionType, int Direction>
-template<typename BinaryOp>
-const typename PartialRedux<ExpressionType,Direction>::template ReduxReturnType<BinaryOp>::Type
-PartialRedux<ExpressionType,Direction>::redux(const BinaryOp& func) const
-{
- return typename ReduxReturnType<BinaryOp>::Type(_expression(), func);
-}
-
-#endif // EIGEN_PARTIAL_REDUX_H
diff --git a/extern/Eigen2/Eigen/src/Cholesky/LDLT.h b/extern/Eigen2/Eigen/src/Cholesky/LDLT.h
deleted file mode 100644
index 205b78a6ded..00000000000
--- a/extern/Eigen2/Eigen/src/Cholesky/LDLT.h
+++ /dev/null
@@ -1,198 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_LDLT_H
-#define EIGEN_LDLT_H
-
-/** \ingroup cholesky_Module
- *
- * \class LDLT
- *
- * \brief Robust Cholesky decomposition of a matrix and associated features
- *
- * \param MatrixType the type of the matrix of which we are computing the LDL^T Cholesky decomposition
- *
- * This class performs a Cholesky decomposition without square root of a symmetric, positive definite
- * matrix A such that A = L D L^* = U^* D U, where L is lower triangular with a unit diagonal
- * and D is a diagonal matrix.
- *
- * Compared to a standard Cholesky decomposition, avoiding the square roots allows for faster and more
- * stable computation.
- *
- * Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
- * the strict lower part does not have to store correct values.
- *
- * \sa MatrixBase::ldlt(), class LLT
- */
-template<typename MatrixType> class LDLT
-{
- public:
-
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> VectorType;
-
- LDLT(const MatrixType& matrix)
- : m_matrix(matrix.rows(), matrix.cols())
- {
- compute(matrix);
- }
-
- /** \returns the lower triangular matrix L */
- inline Part<MatrixType, UnitLowerTriangular> matrixL(void) const { return m_matrix; }
-
- /** \returns the coefficients of the diagonal matrix D */
- inline DiagonalCoeffs<MatrixType> vectorD(void) const { return m_matrix.diagonal(); }
-
- /** \returns true if the matrix is positive definite */
- inline bool isPositiveDefinite(void) const { return m_isPositiveDefinite; }
-
- template<typename RhsDerived, typename ResultType>
- bool solve(const MatrixBase<RhsDerived> &b, ResultType *result) const;
-
- template<typename Derived>
- bool solveInPlace(MatrixBase<Derived> &bAndX) const;
-
- void compute(const MatrixType& matrix);
-
- protected:
- /** \internal
- * Used to compute and store the cholesky decomposition A = L D L^* = U^* D U.
- * The strict upper part is used during the decomposition, the strict lower
- * part correspond to the coefficients of L (its diagonal is equal to 1 and
- * is not stored), and the diagonal entries correspond to D.
- */
- MatrixType m_matrix;
-
- bool m_isPositiveDefinite;
-};
-
-/** Compute / recompute the LLT decomposition A = L D L^* = U^* D U of \a matrix
- */
-template<typename MatrixType>
-void LDLT<MatrixType>::compute(const MatrixType& a)
-{
- assert(a.rows()==a.cols());
- const int size = a.rows();
- m_matrix.resize(size, size);
- m_isPositiveDefinite = true;
- const RealScalar eps = ei_sqrt(precision<Scalar>());
-
- if (size<=1)
- {
- m_matrix = a;
- return;
- }
-
- // Let's preallocate a temporay vector to evaluate the matrix-vector product into it.
- // Unlike the standard LLT decomposition, here we cannot evaluate it to the destination
- // matrix because it a sub-row which is not compatible suitable for efficient packet evaluation.
- // (at least if we assume the matrix is col-major)
- Matrix<Scalar,MatrixType::RowsAtCompileTime,1> _temporary(size);
-
- // Note that, in this algorithm the rows of the strict upper part of m_matrix is used to store
- // column vector, thus the strange .conjugate() and .transpose()...
-
- m_matrix.row(0) = a.row(0).conjugate();
- m_matrix.col(0).end(size-1) = m_matrix.row(0).end(size-1) / m_matrix.coeff(0,0);
- for (int j = 1; j < size; ++j)
- {
- RealScalar tmp = ei_real(a.coeff(j,j) - (m_matrix.row(j).start(j) * m_matrix.col(j).start(j).conjugate()).coeff(0,0));
- m_matrix.coeffRef(j,j) = tmp;
-
- if (tmp < eps)
- {
- m_isPositiveDefinite = false;
- return;
- }
-
- int endSize = size-j-1;
- if (endSize>0)
- {
- _temporary.end(endSize) = ( m_matrix.block(j+1,0, endSize, j)
- * m_matrix.col(j).start(j).conjugate() ).lazy();
-
- m_matrix.row(j).end(endSize) = a.row(j).end(endSize).conjugate()
- - _temporary.end(endSize).transpose();
-
- m_matrix.col(j).end(endSize) = m_matrix.row(j).end(endSize) / tmp;
- }
- }
-}
-
-/** Computes the solution x of \f$ A x = b \f$ using the current decomposition of A.
- * The result is stored in \a result
- *
- * \returns true in case of success, false otherwise.
- *
- * In other words, it computes \f$ b = A^{-1} b \f$ with
- * \f$ {L^{*}}^{-1} D^{-1} L^{-1} b \f$ from right to left.
- *
- * \sa LDLT::solveInPlace(), MatrixBase::ldlt()
- */
-template<typename MatrixType>
-template<typename RhsDerived, typename ResultType>
-bool LDLT<MatrixType>
-::solve(const MatrixBase<RhsDerived> &b, ResultType *result) const
-{
- const int size = m_matrix.rows();
- ei_assert(size==b.rows() && "LLT::solve(): invalid number of rows of the right hand side matrix b");
- *result = b;
- return solveInPlace(*result);
-}
-
-/** This is the \em in-place version of solve().
- *
- * \param bAndX represents both the right-hand side matrix b and result x.
- *
- * This version avoids a copy when the right hand side matrix b is not
- * needed anymore.
- *
- * \sa LDLT::solve(), MatrixBase::ldlt()
- */
-template<typename MatrixType>
-template<typename Derived>
-bool LDLT<MatrixType>::solveInPlace(MatrixBase<Derived> &bAndX) const
-{
- const int size = m_matrix.rows();
- ei_assert(size==bAndX.rows());
- if (!m_isPositiveDefinite)
- return false;
- matrixL().solveTriangularInPlace(bAndX);
- bAndX = (m_matrix.cwise().inverse().template part<Diagonal>() * bAndX).lazy();
- m_matrix.adjoint().template part<UnitUpperTriangular>().solveTriangularInPlace(bAndX);
- return true;
-}
-
-/** \cholesky_module
- * \returns the Cholesky decomposition without square root of \c *this
- */
-template<typename Derived>
-inline const LDLT<typename MatrixBase<Derived>::PlainMatrixType>
-MatrixBase<Derived>::ldlt() const
-{
- return derived();
-}
-
-#endif // EIGEN_LDLT_H
diff --git a/extern/Eigen2/Eigen/src/Cholesky/LLT.h b/extern/Eigen2/Eigen/src/Cholesky/LLT.h
deleted file mode 100644
index 42c959f83a2..00000000000
--- a/extern/Eigen2/Eigen/src/Cholesky/LLT.h
+++ /dev/null
@@ -1,219 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_LLT_H
-#define EIGEN_LLT_H
-
-/** \ingroup cholesky_Module
- *
- * \class LLT
- *
- * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features
- *
- * \param MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition
- *
- * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite
- * matrix A such that A = LL^* = U^*U, where L is lower triangular.
- *
- * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b,
- * for that purpose, we recommend the Cholesky decomposition without square root which is more stable
- * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other
- * situations like generalised eigen problems with hermitian matrices.
- *
- * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices,
- * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations
- * has a solution.
- *
- * \sa MatrixBase::llt(), class LDLT
- */
- /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH)
- * Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
- * the strict lower part does not have to store correct values.
- */
-template<typename MatrixType> class LLT
-{
- private:
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> VectorType;
-
- enum {
- PacketSize = ei_packet_traits<Scalar>::size,
- AlignmentMask = int(PacketSize)-1
- };
-
- public:
-
- /**
- * \brief Default Constructor.
- *
- * The default constructor is useful in cases in which the user intends to
- * perform decompositions via LLT::compute(const MatrixType&).
- */
- LLT() : m_matrix(), m_isInitialized(false) {}
-
- LLT(const MatrixType& matrix)
- : m_matrix(matrix.rows(), matrix.cols()),
- m_isInitialized(false)
- {
- compute(matrix);
- }
-
- /** \returns the lower triangular matrix L */
- inline Part<MatrixType, LowerTriangular> matrixL(void) const
- {
- ei_assert(m_isInitialized && "LLT is not initialized.");
- return m_matrix;
- }
-
- /** \deprecated */
- inline bool isPositiveDefinite(void) const { return m_isInitialized && m_isPositiveDefinite; }
-
- template<typename RhsDerived, typename ResultType>
- bool solve(const MatrixBase<RhsDerived> &b, ResultType *result) const;
-
- template<typename Derived>
- bool solveInPlace(MatrixBase<Derived> &bAndX) const;
-
- void compute(const MatrixType& matrix);
-
- protected:
- /** \internal
- * Used to compute and store L
- * The strict upper part is not used and even not initialized.
- */
- MatrixType m_matrix;
- bool m_isInitialized;
- bool m_isPositiveDefinite;
-};
-
-/** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix
- */
-template<typename MatrixType>
-void LLT<MatrixType>::compute(const MatrixType& a)
-{
- assert(a.rows()==a.cols());
- m_isPositiveDefinite = true;
- const int size = a.rows();
- m_matrix.resize(size, size);
- // The biggest overall is the point of reference to which further diagonals
- // are compared; if any diagonal is negligible compared
- // to the largest overall, the algorithm bails. This cutoff is suggested
- // in "Analysis of the Cholesky Decomposition of a Semi-definite Matrix" by
- // Nicholas J. Higham. Also see "Accuracy and Stability of Numerical
- // Algorithms" page 217, also by Higham.
- const RealScalar cutoff = machine_epsilon<Scalar>() * size * a.diagonal().cwise().abs().maxCoeff();
- RealScalar x;
- x = ei_real(a.coeff(0,0));
- m_matrix.coeffRef(0,0) = ei_sqrt(x);
- if(size==1)
- {
- m_isInitialized = true;
- return;
- }
- m_matrix.col(0).end(size-1) = a.row(0).end(size-1).adjoint() / ei_real(m_matrix.coeff(0,0));
- for (int j = 1; j < size; ++j)
- {
- x = ei_real(a.coeff(j,j)) - m_matrix.row(j).start(j).squaredNorm();
- if (x < cutoff)
- {
- m_isPositiveDefinite = false;
- continue;
- }
-
- m_matrix.coeffRef(j,j) = x = ei_sqrt(x);
-
- int endSize = size-j-1;
- if (endSize>0) {
- // Note that when all matrix columns have good alignment, then the following
- // product is guaranteed to be optimal with respect to alignment.
- m_matrix.col(j).end(endSize) =
- (m_matrix.block(j+1, 0, endSize, j) * m_matrix.row(j).start(j).adjoint()).lazy();
-
- // FIXME could use a.col instead of a.row
- m_matrix.col(j).end(endSize) = (a.row(j).end(endSize).adjoint()
- - m_matrix.col(j).end(endSize) ) / x;
- }
- }
-
- m_isInitialized = true;
-}
-
-/** Computes the solution x of \f$ A x = b \f$ using the current decomposition of A.
- * The result is stored in \a result
- *
- * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD.
- *
- * In other words, it computes \f$ b = A^{-1} b \f$ with
- * \f$ {L^{*}}^{-1} L^{-1} b \f$ from right to left.
- *
- * Example: \include LLT_solve.cpp
- * Output: \verbinclude LLT_solve.out
- *
- * \sa LLT::solveInPlace(), MatrixBase::llt()
- */
-template<typename MatrixType>
-template<typename RhsDerived, typename ResultType>
-bool LLT<MatrixType>::solve(const MatrixBase<RhsDerived> &b, ResultType *result) const
-{
- ei_assert(m_isInitialized && "LLT is not initialized.");
- const int size = m_matrix.rows();
- ei_assert(size==b.rows() && "LLT::solve(): invalid number of rows of the right hand side matrix b");
- return solveInPlace((*result) = b);
-}
-
-/** This is the \em in-place version of solve().
- *
- * \param bAndX represents both the right-hand side matrix b and result x.
- *
- * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD.
- *
- * This version avoids a copy when the right hand side matrix b is not
- * needed anymore.
- *
- * \sa LLT::solve(), MatrixBase::llt()
- */
-template<typename MatrixType>
-template<typename Derived>
-bool LLT<MatrixType>::solveInPlace(MatrixBase<Derived> &bAndX) const
-{
- ei_assert(m_isInitialized && "LLT is not initialized.");
- const int size = m_matrix.rows();
- ei_assert(size==bAndX.rows());
- matrixL().solveTriangularInPlace(bAndX);
- m_matrix.adjoint().template part<UpperTriangular>().solveTriangularInPlace(bAndX);
- return true;
-}
-
-/** \cholesky_module
- * \returns the LLT decomposition of \c *this
- */
-template<typename Derived>
-inline const LLT<typename MatrixBase<Derived>::PlainMatrixType>
-MatrixBase<Derived>::llt() const
-{
- return LLT<PlainMatrixType>(derived());
-}
-
-#endif // EIGEN_LLT_H
diff --git a/extern/Eigen2/Eigen/src/Core/Assign.h b/extern/Eigen2/Eigen/src/Core/Assign.h
deleted file mode 100644
index 57205075596..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Assign.h
+++ /dev/null
@@ -1,445 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_ASSIGN_H
-#define EIGEN_ASSIGN_H
-
-/***************************************************************************
-* Part 1 : the logic deciding a strategy for vectorization and unrolling
-***************************************************************************/
-
-template <typename Derived, typename OtherDerived>
-struct ei_assign_traits
-{
-public:
- enum {
- DstIsAligned = Derived::Flags & AlignedBit,
- SrcIsAligned = OtherDerived::Flags & AlignedBit,
- SrcAlignment = DstIsAligned && SrcIsAligned ? Aligned : Unaligned
- };
-
-private:
- enum {
- InnerSize = int(Derived::Flags)&RowMajorBit
- ? Derived::ColsAtCompileTime
- : Derived::RowsAtCompileTime,
- InnerMaxSize = int(Derived::Flags)&RowMajorBit
- ? Derived::MaxColsAtCompileTime
- : Derived::MaxRowsAtCompileTime,
- PacketSize = ei_packet_traits<typename Derived::Scalar>::size
- };
-
- enum {
- MightVectorize = (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit)
- && ((int(Derived::Flags)&RowMajorBit)==(int(OtherDerived::Flags)&RowMajorBit)),
- MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0
- && int(DstIsAligned) && int(SrcIsAligned),
- MayLinearVectorize = MightVectorize && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit),
- MaySliceVectorize = MightVectorize && int(InnerMaxSize)>=3*PacketSize /* slice vectorization can be slow, so we only
- want it if the slices are big, which is indicated by InnerMaxSize rather than InnerSize, think of the case
- of a dynamic block in a fixed-size matrix */
- };
-
-public:
- enum {
- Vectorization = int(MayInnerVectorize) ? int(InnerVectorization)
- : int(MayLinearVectorize) ? int(LinearVectorization)
- : int(MaySliceVectorize) ? int(SliceVectorization)
- : int(NoVectorization)
- };
-
-private:
- enum {
- UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Vectorization) == int(NoVectorization) ? 1 : int(PacketSize)),
- MayUnrollCompletely = int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit),
- MayUnrollInner = int(InnerSize * OtherDerived::CoeffReadCost) <= int(UnrollingLimit)
- };
-
-public:
- enum {
- Unrolling = (int(Vectorization) == int(InnerVectorization) || int(Vectorization) == int(NoVectorization))
- ? (
- int(MayUnrollCompletely) ? int(CompleteUnrolling)
- : int(MayUnrollInner) ? int(InnerUnrolling)
- : int(NoUnrolling)
- )
- : int(Vectorization) == int(LinearVectorization)
- ? ( int(MayUnrollCompletely) && int(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) )
- : int(NoUnrolling)
- };
-};
-
-/***************************************************************************
-* Part 2 : meta-unrollers
-***************************************************************************/
-
-/***********************
-*** No vectorization ***
-***********************/
-
-template<typename Derived1, typename Derived2, int Index, int Stop>
-struct ei_assign_novec_CompleteUnrolling
-{
- enum {
- row = int(Derived1::Flags)&RowMajorBit
- ? Index / int(Derived1::ColsAtCompileTime)
- : Index % Derived1::RowsAtCompileTime,
- col = int(Derived1::Flags)&RowMajorBit
- ? Index % int(Derived1::ColsAtCompileTime)
- : Index / Derived1::RowsAtCompileTime
- };
-
- EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
- {
- dst.copyCoeff(row, col, src);
- ei_assign_novec_CompleteUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src);
- }
-};
-
-template<typename Derived1, typename Derived2, int Stop>
-struct ei_assign_novec_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {}
-};
-
-template<typename Derived1, typename Derived2, int Index, int Stop>
-struct ei_assign_novec_InnerUnrolling
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src, int row_or_col)
- {
- const bool rowMajor = int(Derived1::Flags)&RowMajorBit;
- const int row = rowMajor ? row_or_col : Index;
- const int col = rowMajor ? Index : row_or_col;
- dst.copyCoeff(row, col, src);
- ei_assign_novec_InnerUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src, row_or_col);
- }
-};
-
-template<typename Derived1, typename Derived2, int Stop>
-struct ei_assign_novec_InnerUnrolling<Derived1, Derived2, Stop, Stop>
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &, int) {}
-};
-
-/**************************
-*** Inner vectorization ***
-**************************/
-
-template<typename Derived1, typename Derived2, int Index, int Stop>
-struct ei_assign_innervec_CompleteUnrolling
-{
- enum {
- row = int(Derived1::Flags)&RowMajorBit
- ? Index / int(Derived1::ColsAtCompileTime)
- : Index % Derived1::RowsAtCompileTime,
- col = int(Derived1::Flags)&RowMajorBit
- ? Index % int(Derived1::ColsAtCompileTime)
- : Index / Derived1::RowsAtCompileTime,
- SrcAlignment = ei_assign_traits<Derived1,Derived2>::SrcAlignment
- };
-
- EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
- {
- dst.template copyPacket<Derived2, Aligned, SrcAlignment>(row, col, src);
- ei_assign_innervec_CompleteUnrolling<Derived1, Derived2,
- Index+ei_packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src);
- }
-};
-
-template<typename Derived1, typename Derived2, int Stop>
-struct ei_assign_innervec_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {}
-};
-
-template<typename Derived1, typename Derived2, int Index, int Stop>
-struct ei_assign_innervec_InnerUnrolling
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src, int row_or_col)
- {
- const int row = int(Derived1::Flags)&RowMajorBit ? row_or_col : Index;
- const int col = int(Derived1::Flags)&RowMajorBit ? Index : row_or_col;
- dst.template copyPacket<Derived2, Aligned, Aligned>(row, col, src);
- ei_assign_innervec_InnerUnrolling<Derived1, Derived2,
- Index+ei_packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src, row_or_col);
- }
-};
-
-template<typename Derived1, typename Derived2, int Stop>
-struct ei_assign_innervec_InnerUnrolling<Derived1, Derived2, Stop, Stop>
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &, int) {}
-};
-
-/***************************************************************************
-* Part 3 : implementation of all cases
-***************************************************************************/
-
-template<typename Derived1, typename Derived2,
- int Vectorization = ei_assign_traits<Derived1, Derived2>::Vectorization,
- int Unrolling = ei_assign_traits<Derived1, Derived2>::Unrolling>
-struct ei_assign_impl;
-
-/***********************
-*** No vectorization ***
-***********************/
-
-template<typename Derived1, typename Derived2>
-struct ei_assign_impl<Derived1, Derived2, NoVectorization, NoUnrolling>
-{
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- const int innerSize = dst.innerSize();
- const int outerSize = dst.outerSize();
- for(int j = 0; j < outerSize; ++j)
- for(int i = 0; i < innerSize; ++i)
- {
- if(int(Derived1::Flags)&RowMajorBit)
- dst.copyCoeff(j, i, src);
- else
- dst.copyCoeff(i, j, src);
- }
- }
-};
-
-template<typename Derived1, typename Derived2>
-struct ei_assign_impl<Derived1, Derived2, NoVectorization, CompleteUnrolling>
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
- {
- ei_assign_novec_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
- ::run(dst, src);
- }
-};
-
-template<typename Derived1, typename Derived2>
-struct ei_assign_impl<Derived1, Derived2, NoVectorization, InnerUnrolling>
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
- {
- const bool rowMajor = int(Derived1::Flags)&RowMajorBit;
- const int innerSize = rowMajor ? Derived1::ColsAtCompileTime : Derived1::RowsAtCompileTime;
- const int outerSize = dst.outerSize();
- for(int j = 0; j < outerSize; ++j)
- ei_assign_novec_InnerUnrolling<Derived1, Derived2, 0, innerSize>
- ::run(dst, src, j);
- }
-};
-
-/**************************
-*** Inner vectorization ***
-**************************/
-
-template<typename Derived1, typename Derived2>
-struct ei_assign_impl<Derived1, Derived2, InnerVectorization, NoUnrolling>
-{
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- const int innerSize = dst.innerSize();
- const int outerSize = dst.outerSize();
- const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
- for(int j = 0; j < outerSize; ++j)
- for(int i = 0; i < innerSize; i+=packetSize)
- {
- if(int(Derived1::Flags)&RowMajorBit)
- dst.template copyPacket<Derived2, Aligned, Aligned>(j, i, src);
- else
- dst.template copyPacket<Derived2, Aligned, Aligned>(i, j, src);
- }
- }
-};
-
-template<typename Derived1, typename Derived2>
-struct ei_assign_impl<Derived1, Derived2, InnerVectorization, CompleteUnrolling>
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
- {
- ei_assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
- ::run(dst, src);
- }
-};
-
-template<typename Derived1, typename Derived2>
-struct ei_assign_impl<Derived1, Derived2, InnerVectorization, InnerUnrolling>
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
- {
- const bool rowMajor = int(Derived1::Flags)&RowMajorBit;
- const int innerSize = rowMajor ? Derived1::ColsAtCompileTime : Derived1::RowsAtCompileTime;
- const int outerSize = dst.outerSize();
- for(int j = 0; j < outerSize; ++j)
- ei_assign_innervec_InnerUnrolling<Derived1, Derived2, 0, innerSize>
- ::run(dst, src, j);
- }
-};
-
-/***************************
-*** Linear vectorization ***
-***************************/
-
-template<typename Derived1, typename Derived2>
-struct ei_assign_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
-{
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- const int size = dst.size();
- const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
- const int alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
- : ei_alignmentOffset(&dst.coeffRef(0), size);
- const int alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
-
- for(int index = 0; index < alignedStart; ++index)
- dst.copyCoeff(index, src);
-
- for(int index = alignedStart; index < alignedEnd; index += packetSize)
- {
- dst.template copyPacket<Derived2, Aligned, ei_assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
- }
-
- for(int index = alignedEnd; index < size; ++index)
- dst.copyCoeff(index, src);
- }
-};
-
-template<typename Derived1, typename Derived2>
-struct ei_assign_impl<Derived1, Derived2, LinearVectorization, CompleteUnrolling>
-{
- EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
- {
- const int size = Derived1::SizeAtCompileTime;
- const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
- const int alignedSize = (size/packetSize)*packetSize;
-
- ei_assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, alignedSize>::run(dst, src);
- ei_assign_novec_CompleteUnrolling<Derived1, Derived2, alignedSize, size>::run(dst, src);
- }
-};
-
-/**************************
-*** Slice vectorization ***
-***************************/
-
-template<typename Derived1, typename Derived2>
-struct ei_assign_impl<Derived1, Derived2, SliceVectorization, NoUnrolling>
-{
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
- const int packetAlignedMask = packetSize - 1;
- const int innerSize = dst.innerSize();
- const int outerSize = dst.outerSize();
- const int alignedStep = (packetSize - dst.stride() % packetSize) & packetAlignedMask;
- int alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
- : ei_alignmentOffset(&dst.coeffRef(0,0), innerSize);
-
- for(int i = 0; i < outerSize; ++i)
- {
- const int alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
-
- // do the non-vectorizable part of the assignment
- for (int index = 0; index<alignedStart ; ++index)
- {
- if(Derived1::Flags&RowMajorBit)
- dst.copyCoeff(i, index, src);
- else
- dst.copyCoeff(index, i, src);
- }
-
- // do the vectorizable part of the assignment
- for (int index = alignedStart; index<alignedEnd; index+=packetSize)
- {
- if(Derived1::Flags&RowMajorBit)
- dst.template copyPacket<Derived2, Aligned, Unaligned>(i, index, src);
- else
- dst.template copyPacket<Derived2, Aligned, Unaligned>(index, i, src);
- }
-
- // do the non-vectorizable part of the assignment
- for (int index = alignedEnd; index<innerSize ; ++index)
- {
- if(Derived1::Flags&RowMajorBit)
- dst.copyCoeff(i, index, src);
- else
- dst.copyCoeff(index, i, src);
- }
-
- alignedStart = std::min<int>((alignedStart+alignedStep)%packetSize, innerSize);
- }
- }
-};
-
-/***************************************************************************
-* Part 4 : implementation of MatrixBase methods
-***************************************************************************/
-
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>
- ::lazyAssign(const MatrixBase<OtherDerived>& other)
-{
- EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
- EIGEN_STATIC_ASSERT((ei_is_same_type<typename Derived::Scalar, typename OtherDerived::Scalar>::ret),
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
- ei_assert(rows() == other.rows() && cols() == other.cols());
- ei_assign_impl<Derived, OtherDerived>::run(derived(),other.derived());
- return derived();
-}
-
-template<typename Derived, typename OtherDerived,
- bool EvalBeforeAssigning = (int(OtherDerived::Flags) & EvalBeforeAssigningBit) != 0,
- bool NeedToTranspose = Derived::IsVectorAtCompileTime
- && OtherDerived::IsVectorAtCompileTime
- && int(Derived::RowsAtCompileTime) == int(OtherDerived::ColsAtCompileTime)
- && int(Derived::ColsAtCompileTime) == int(OtherDerived::RowsAtCompileTime)
- && int(Derived::SizeAtCompileTime) != 1>
-struct ei_assign_selector;
-
-template<typename Derived, typename OtherDerived>
-struct ei_assign_selector<Derived,OtherDerived,false,false> {
- EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); }
-};
-template<typename Derived, typename OtherDerived>
-struct ei_assign_selector<Derived,OtherDerived,true,false> {
- EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); }
-};
-template<typename Derived, typename OtherDerived>
-struct ei_assign_selector<Derived,OtherDerived,false,true> {
- EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); }
-};
-template<typename Derived, typename OtherDerived>
-struct ei_assign_selector<Derived,OtherDerived,true,true> {
- EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); }
-};
-
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>
- ::operator=(const MatrixBase<OtherDerived>& other)
-{
- return ei_assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
-}
-
-#endif // EIGEN_ASSIGN_H
diff --git a/extern/Eigen2/Eigen/src/Core/Block.h b/extern/Eigen2/Eigen/src/Core/Block.h
deleted file mode 100644
index 7f422aa5c07..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Block.h
+++ /dev/null
@@ -1,752 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_BLOCK_H
-#define EIGEN_BLOCK_H
-
-/** \class Block
- *
- * \brief Expression of a fixed-size or dynamic-size block
- *
- * \param MatrixType the type of the object in which we are taking a block
- * \param BlockRows the number of rows of the block we are taking at compile time (optional)
- * \param BlockCols the number of columns of the block we are taking at compile time (optional)
- * \param _PacketAccess allows to enforce aligned loads and stores if set to ForceAligned.
- * The default is AsRequested. This parameter is internaly used by Eigen
- * in expressions such as \code mat.block() += other; \endcode and most of
- * the time this is the only way it is used.
- * \param _DirectAccessStatus \internal used for partial specialization
- *
- * This class represents an expression of either a fixed-size or dynamic-size block. It is the return
- * type of MatrixBase::block(int,int,int,int) and MatrixBase::block<int,int>(int,int) and
- * most of the time this is the only way it is used.
- *
- * However, if you want to directly maniputate block expressions,
- * for instance if you want to write a function returning such an expression, you
- * will need to use this class.
- *
- * Here is an example illustrating the dynamic case:
- * \include class_Block.cpp
- * Output: \verbinclude class_Block.out
- *
- * \note Even though this expression has dynamic size, in the case where \a MatrixType
- * has fixed size, this expression inherits a fixed maximal size which means that evaluating
- * it does not cause a dynamic memory allocation.
- *
- * Here is an example illustrating the fixed-size case:
- * \include class_FixedBlock.cpp
- * Output: \verbinclude class_FixedBlock.out
- *
- * \sa MatrixBase::block(int,int,int,int), MatrixBase::block(int,int), class VectorBlock
- */
-
-template<typename MatrixType, int BlockRows, int BlockCols, int _PacketAccess, int _DirectAccessStatus>
-struct ei_traits<Block<MatrixType, BlockRows, BlockCols, _PacketAccess, _DirectAccessStatus> >
-{
- typedef typename ei_traits<MatrixType>::Scalar Scalar;
- typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
- typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
- enum{
- RowsAtCompileTime = ei_traits<MatrixType>::RowsAtCompileTime == 1 ? 1 : BlockRows,
- ColsAtCompileTime = ei_traits<MatrixType>::ColsAtCompileTime == 1 ? 1 : BlockCols,
- MaxRowsAtCompileTime = RowsAtCompileTime == 1 ? 1
- : (BlockRows==Dynamic ? int(ei_traits<MatrixType>::MaxRowsAtCompileTime) : BlockRows),
- MaxColsAtCompileTime = ColsAtCompileTime == 1 ? 1
- : (BlockCols==Dynamic ? int(ei_traits<MatrixType>::MaxColsAtCompileTime) : BlockCols),
- RowMajor = int(ei_traits<MatrixType>::Flags)&RowMajorBit,
- InnerSize = RowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
- InnerMaxSize = RowMajor ? int(MaxColsAtCompileTime) : int(MaxRowsAtCompileTime),
- MaskPacketAccessBit = (InnerMaxSize == Dynamic || (InnerSize >= ei_packet_traits<Scalar>::size))
- ? PacketAccessBit : 0,
- FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
- Flags = (ei_traits<MatrixType>::Flags & (HereditaryBits | MaskPacketAccessBit | DirectAccessBit)) | FlagsLinearAccessBit,
- CoeffReadCost = ei_traits<MatrixType>::CoeffReadCost,
- PacketAccess = _PacketAccess
- };
- typedef typename ei_meta_if<int(PacketAccess)==ForceAligned,
- Block<MatrixType, BlockRows, BlockCols, _PacketAccess, _DirectAccessStatus>&,
- Block<MatrixType, BlockRows, BlockCols, ForceAligned, _DirectAccessStatus> >::ret AlignedDerivedType;
-};
-
-template<typename MatrixType, int BlockRows, int BlockCols, int PacketAccess, int _DirectAccessStatus> class Block
- : public MatrixBase<Block<MatrixType, BlockRows, BlockCols, PacketAccess, _DirectAccessStatus> >
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(Block)
-
- class InnerIterator;
-
- /** Column or Row constructor
- */
- inline Block(const MatrixType& matrix, int i)
- : m_matrix(matrix),
- // It is a row if and only if BlockRows==1 and BlockCols==MatrixType::ColsAtCompileTime,
- // and it is a column if and only if BlockRows==MatrixType::RowsAtCompileTime and BlockCols==1,
- // all other cases are invalid.
- // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
- m_startRow( (BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) ? i : 0),
- m_startCol( (BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
- m_blockRows(matrix.rows()), // if it is a row, then m_blockRows has a fixed-size of 1, so no pb to try to overwrite it
- m_blockCols(matrix.cols()) // same for m_blockCols
- {
- ei_assert( (i>=0) && (
- ((BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) && i<matrix.rows())
- ||((BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) && i<matrix.cols())));
- }
-
- /** Fixed-size constructor
- */
- inline Block(const MatrixType& matrix, int startRow, int startCol)
- : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol),
- m_blockRows(matrix.rows()), m_blockCols(matrix.cols())
- {
- EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
- ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= matrix.rows()
- && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= matrix.cols());
- }
-
- /** Dynamic-size constructor
- */
- inline Block(const MatrixType& matrix,
- int startRow, int startCol,
- int blockRows, int blockCols)
- : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol),
- m_blockRows(blockRows), m_blockCols(blockCols)
- {
- ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
- && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
- ei_assert(startRow >= 0 && blockRows >= 1 && startRow + blockRows <= matrix.rows()
- && startCol >= 0 && blockCols >= 1 && startCol + blockCols <= matrix.cols());
- }
-
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
-
- inline int rows() const { return m_blockRows.value(); }
- inline int cols() const { return m_blockCols.value(); }
-
- inline Scalar& coeffRef(int row, int col)
- {
- return m_matrix.const_cast_derived()
- .coeffRef(row + m_startRow.value(), col + m_startCol.value());
- }
-
- inline const Scalar coeff(int row, int col) const
- {
- return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
- }
-
- inline Scalar& coeffRef(int index)
- {
- return m_matrix.const_cast_derived()
- .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
- }
-
- inline const Scalar coeff(int index) const
- {
- return m_matrix
- .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
- }
-
- template<int LoadMode>
- inline PacketScalar packet(int row, int col) const
- {
- return m_matrix.template packet<Unaligned>
- (row + m_startRow.value(), col + m_startCol.value());
- }
-
- template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
- {
- m_matrix.const_cast_derived().template writePacket<Unaligned>
- (row + m_startRow.value(), col + m_startCol.value(), x);
- }
-
- template<int LoadMode>
- inline PacketScalar packet(int index) const
- {
- return m_matrix.template packet<Unaligned>
- (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
- }
-
- template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
- {
- m_matrix.const_cast_derived().template writePacket<Unaligned>
- (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), x);
- }
-
- protected:
-
- const typename MatrixType::Nested m_matrix;
- const ei_int_if_dynamic<MatrixType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
- const ei_int_if_dynamic<MatrixType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
- const ei_int_if_dynamic<RowsAtCompileTime> m_blockRows;
- const ei_int_if_dynamic<ColsAtCompileTime> m_blockCols;
-};
-
-/** \internal */
-template<typename MatrixType, int BlockRows, int BlockCols, int PacketAccess>
-class Block<MatrixType,BlockRows,BlockCols,PacketAccess,HasDirectAccess>
- : public MapBase<Block<MatrixType, BlockRows, BlockCols,PacketAccess,HasDirectAccess> >
-{
- public:
-
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Block, MapBase<Block>)
-
- class InnerIterator;
- typedef typename ei_traits<Block>::AlignedDerivedType AlignedDerivedType;
- friend class Block<MatrixType,BlockRows,BlockCols,PacketAccess==AsRequested?ForceAligned:AsRequested,HasDirectAccess>;
-
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
-
- AlignedDerivedType _convertToForceAligned()
- {
- return Block<MatrixType,BlockRows,BlockCols,ForceAligned,HasDirectAccess>
- (m_matrix, Base::m_data, Base::m_rows.value(), Base::m_cols.value());
- }
-
- /** Column or Row constructor
- */
- inline Block(const MatrixType& matrix, int i)
- : Base(&matrix.const_cast_derived().coeffRef(
- (BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) ? i : 0,
- (BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
- BlockRows==1 ? 1 : matrix.rows(),
- BlockCols==1 ? 1 : matrix.cols()),
- m_matrix(matrix)
- {
- ei_assert( (i>=0) && (
- ((BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) && i<matrix.rows())
- ||((BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) && i<matrix.cols())));
- }
-
- /** Fixed-size constructor
- */
- inline Block(const MatrixType& matrix, int startRow, int startCol)
- : Base(&matrix.const_cast_derived().coeffRef(startRow,startCol)), m_matrix(matrix)
- {
- ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= matrix.rows()
- && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= matrix.cols());
- }
-
- /** Dynamic-size constructor
- */
- inline Block(const MatrixType& matrix,
- int startRow, int startCol,
- int blockRows, int blockCols)
- : Base(&matrix.const_cast_derived().coeffRef(startRow,startCol), blockRows, blockCols),
- m_matrix(matrix)
- {
- ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
- && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
- ei_assert(startRow >= 0 && blockRows >= 1 && startRow + blockRows <= matrix.rows()
- && startCol >= 0 && blockCols >= 1 && startCol + blockCols <= matrix.cols());
- }
-
- inline int stride(void) const { return m_matrix.stride(); }
-
- protected:
-
- /** \internal used by allowAligned() */
- inline Block(const MatrixType& matrix, const Scalar* data, int blockRows, int blockCols)
- : Base(data, blockRows, blockCols), m_matrix(matrix)
- {}
-
- const typename MatrixType::Nested m_matrix;
-};
-
-/** \returns a dynamic-size expression of a block in *this.
- *
- * \param startRow the first row in the block
- * \param startCol the first column in the block
- * \param blockRows the number of rows in the block
- * \param blockCols the number of columns in the block
- *
- * \addexample BlockIntIntIntInt \label How to reference a sub-matrix (dynamic-size)
- *
- * Example: \include MatrixBase_block_int_int_int_int.cpp
- * Output: \verbinclude MatrixBase_block_int_int_int_int.out
- *
- * \note Even though the returned expression has dynamic size, in the case
- * when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
- * which means that evaluating it does not cause a dynamic memory allocation.
- *
- * \sa class Block, block(int,int)
- */
-template<typename Derived>
-inline typename BlockReturnType<Derived>::Type MatrixBase<Derived>
- ::block(int startRow, int startCol, int blockRows, int blockCols)
-{
- return typename BlockReturnType<Derived>::Type(derived(), startRow, startCol, blockRows, blockCols);
-}
-
-/** This is the const version of block(int,int,int,int). */
-template<typename Derived>
-inline const typename BlockReturnType<Derived>::Type MatrixBase<Derived>
- ::block(int startRow, int startCol, int blockRows, int blockCols) const
-{
- return typename BlockReturnType<Derived>::Type(derived(), startRow, startCol, blockRows, blockCols);
-}
-
-/** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this.
- *
- * \only_for_vectors
- *
- * \addexample SegmentIntInt \label How to reference a sub-vector (dynamic size)
- *
- * \param start the first coefficient in the segment
- * \param size the number of coefficients in the segment
- *
- * Example: \include MatrixBase_segment_int_int.cpp
- * Output: \verbinclude MatrixBase_segment_int_int.out
- *
- * \note Even though the returned expression has dynamic size, in the case
- * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
- * which means that evaluating it does not cause a dynamic memory allocation.
- *
- * \sa class Block, segment(int)
- */
-template<typename Derived>
-inline typename BlockReturnType<Derived>::SubVectorType MatrixBase<Derived>
- ::segment(int start, int size)
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return typename BlockReturnType<Derived>::SubVectorType(derived(), RowsAtCompileTime == 1 ? 0 : start,
- ColsAtCompileTime == 1 ? 0 : start,
- RowsAtCompileTime == 1 ? 1 : size,
- ColsAtCompileTime == 1 ? 1 : size);
-}
-
-/** This is the const version of segment(int,int).*/
-template<typename Derived>
-inline const typename BlockReturnType<Derived>::SubVectorType
-MatrixBase<Derived>::segment(int start, int size) const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return typename BlockReturnType<Derived>::SubVectorType(derived(), RowsAtCompileTime == 1 ? 0 : start,
- ColsAtCompileTime == 1 ? 0 : start,
- RowsAtCompileTime == 1 ? 1 : size,
- ColsAtCompileTime == 1 ? 1 : size);
-}
-
-/** \returns a dynamic-size expression of the first coefficients of *this.
- *
- * \only_for_vectors
- *
- * \param size the number of coefficients in the block
- *
- * \addexample BlockInt \label How to reference a sub-vector (fixed-size)
- *
- * Example: \include MatrixBase_start_int.cpp
- * Output: \verbinclude MatrixBase_start_int.out
- *
- * \note Even though the returned expression has dynamic size, in the case
- * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
- * which means that evaluating it does not cause a dynamic memory allocation.
- *
- * \sa class Block, block(int,int)
- */
-template<typename Derived>
-inline typename BlockReturnType<Derived,Dynamic>::SubVectorType
-MatrixBase<Derived>::start(int size)
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return Block<Derived,
- RowsAtCompileTime == 1 ? 1 : Dynamic,
- ColsAtCompileTime == 1 ? 1 : Dynamic>
- (derived(), 0, 0,
- RowsAtCompileTime == 1 ? 1 : size,
- ColsAtCompileTime == 1 ? 1 : size);
-}
-
-/** This is the const version of start(int).*/
-template<typename Derived>
-inline const typename BlockReturnType<Derived,Dynamic>::SubVectorType
-MatrixBase<Derived>::start(int size) const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return Block<Derived,
- RowsAtCompileTime == 1 ? 1 : Dynamic,
- ColsAtCompileTime == 1 ? 1 : Dynamic>
- (derived(), 0, 0,
- RowsAtCompileTime == 1 ? 1 : size,
- ColsAtCompileTime == 1 ? 1 : size);
-}
-
-/** \returns a dynamic-size expression of the last coefficients of *this.
- *
- * \only_for_vectors
- *
- * \param size the number of coefficients in the block
- *
- * \addexample BlockEnd \label How to reference the end of a vector (fixed-size)
- *
- * Example: \include MatrixBase_end_int.cpp
- * Output: \verbinclude MatrixBase_end_int.out
- *
- * \note Even though the returned expression has dynamic size, in the case
- * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
- * which means that evaluating it does not cause a dynamic memory allocation.
- *
- * \sa class Block, block(int,int)
- */
-template<typename Derived>
-inline typename BlockReturnType<Derived,Dynamic>::SubVectorType
-MatrixBase<Derived>::end(int size)
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return Block<Derived,
- RowsAtCompileTime == 1 ? 1 : Dynamic,
- ColsAtCompileTime == 1 ? 1 : Dynamic>
- (derived(),
- RowsAtCompileTime == 1 ? 0 : rows() - size,
- ColsAtCompileTime == 1 ? 0 : cols() - size,
- RowsAtCompileTime == 1 ? 1 : size,
- ColsAtCompileTime == 1 ? 1 : size);
-}
-
-/** This is the const version of end(int).*/
-template<typename Derived>
-inline const typename BlockReturnType<Derived,Dynamic>::SubVectorType
-MatrixBase<Derived>::end(int size) const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return Block<Derived,
- RowsAtCompileTime == 1 ? 1 : Dynamic,
- ColsAtCompileTime == 1 ? 1 : Dynamic>
- (derived(),
- RowsAtCompileTime == 1 ? 0 : rows() - size,
- ColsAtCompileTime == 1 ? 0 : cols() - size,
- RowsAtCompileTime == 1 ? 1 : size,
- ColsAtCompileTime == 1 ? 1 : size);
-}
-
-/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
- *
- * \only_for_vectors
- *
- * The template parameter \a Size is the number of coefficients in the block
- *
- * \param start the index of the first element of the sub-vector
- *
- * Example: \include MatrixBase_template_int_segment.cpp
- * Output: \verbinclude MatrixBase_template_int_segment.out
- *
- * \sa class Block
- */
-template<typename Derived>
-template<int Size>
-inline typename BlockReturnType<Derived,Size>::SubVectorType
-MatrixBase<Derived>::segment(int start)
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return Block<Derived, (RowsAtCompileTime == 1 ? 1 : Size),
- (ColsAtCompileTime == 1 ? 1 : Size)>
- (derived(), RowsAtCompileTime == 1 ? 0 : start,
- ColsAtCompileTime == 1 ? 0 : start);
-}
-
-/** This is the const version of segment<int>(int).*/
-template<typename Derived>
-template<int Size>
-inline const typename BlockReturnType<Derived,Size>::SubVectorType
-MatrixBase<Derived>::segment(int start) const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return Block<Derived, (RowsAtCompileTime == 1 ? 1 : Size),
- (ColsAtCompileTime == 1 ? 1 : Size)>
- (derived(), RowsAtCompileTime == 1 ? 0 : start,
- ColsAtCompileTime == 1 ? 0 : start);
-}
-
-/** \returns a fixed-size expression of the first coefficients of *this.
- *
- * \only_for_vectors
- *
- * The template parameter \a Size is the number of coefficients in the block
- *
- * \addexample BlockStart \label How to reference the start of a vector (fixed-size)
- *
- * Example: \include MatrixBase_template_int_start.cpp
- * Output: \verbinclude MatrixBase_template_int_start.out
- *
- * \sa class Block
- */
-template<typename Derived>
-template<int Size>
-inline typename BlockReturnType<Derived,Size>::SubVectorType
-MatrixBase<Derived>::start()
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return Block<Derived, (RowsAtCompileTime == 1 ? 1 : Size),
- (ColsAtCompileTime == 1 ? 1 : Size)>(derived(), 0, 0);
-}
-
-/** This is the const version of start<int>().*/
-template<typename Derived>
-template<int Size>
-inline const typename BlockReturnType<Derived,Size>::SubVectorType
-MatrixBase<Derived>::start() const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return Block<Derived, (RowsAtCompileTime == 1 ? 1 : Size),
- (ColsAtCompileTime == 1 ? 1 : Size)>(derived(), 0, 0);
-}
-
-/** \returns a fixed-size expression of the last coefficients of *this.
- *
- * \only_for_vectors
- *
- * The template parameter \a Size is the number of coefficients in the block
- *
- * Example: \include MatrixBase_template_int_end.cpp
- * Output: \verbinclude MatrixBase_template_int_end.out
- *
- * \sa class Block
- */
-template<typename Derived>
-template<int Size>
-inline typename BlockReturnType<Derived,Size>::SubVectorType
-MatrixBase<Derived>::end()
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return Block<Derived, RowsAtCompileTime == 1 ? 1 : Size,
- ColsAtCompileTime == 1 ? 1 : Size>
- (derived(),
- RowsAtCompileTime == 1 ? 0 : rows() - Size,
- ColsAtCompileTime == 1 ? 0 : cols() - Size);
-}
-
-/** This is the const version of end<int>.*/
-template<typename Derived>
-template<int Size>
-inline const typename BlockReturnType<Derived,Size>::SubVectorType
-MatrixBase<Derived>::end() const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return Block<Derived, RowsAtCompileTime == 1 ? 1 : Size,
- ColsAtCompileTime == 1 ? 1 : Size>
- (derived(),
- RowsAtCompileTime == 1 ? 0 : rows() - Size,
- ColsAtCompileTime == 1 ? 0 : cols() - Size);
-}
-
-/** \returns a dynamic-size expression of a corner of *this.
- *
- * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight,
- * \a Eigen::BottomLeft, \a Eigen::BottomRight.
- * \param cRows the number of rows in the corner
- * \param cCols the number of columns in the corner
- *
- * \addexample BlockCornerDynamicSize \label How to reference a sub-corner of a matrix
- *
- * Example: \include MatrixBase_corner_enum_int_int.cpp
- * Output: \verbinclude MatrixBase_corner_enum_int_int.out
- *
- * \note Even though the returned expression has dynamic size, in the case
- * when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
- * which means that evaluating it does not cause a dynamic memory allocation.
- *
- * \sa class Block, block(int,int,int,int)
- */
-template<typename Derived>
-inline typename BlockReturnType<Derived>::Type MatrixBase<Derived>
- ::corner(CornerType type, int cRows, int cCols)
-{
- switch(type)
- {
- default:
- ei_assert(false && "Bad corner type.");
- case TopLeft:
- return typename BlockReturnType<Derived>::Type(derived(), 0, 0, cRows, cCols);
- case TopRight:
- return typename BlockReturnType<Derived>::Type(derived(), 0, cols() - cCols, cRows, cCols);
- case BottomLeft:
- return typename BlockReturnType<Derived>::Type(derived(), rows() - cRows, 0, cRows, cCols);
- case BottomRight:
- return typename BlockReturnType<Derived>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
- }
-}
-
-/** This is the const version of corner(CornerType, int, int).*/
-template<typename Derived>
-inline const typename BlockReturnType<Derived>::Type
-MatrixBase<Derived>::corner(CornerType type, int cRows, int cCols) const
-{
- switch(type)
- {
- default:
- ei_assert(false && "Bad corner type.");
- case TopLeft:
- return typename BlockReturnType<Derived>::Type(derived(), 0, 0, cRows, cCols);
- case TopRight:
- return typename BlockReturnType<Derived>::Type(derived(), 0, cols() - cCols, cRows, cCols);
- case BottomLeft:
- return typename BlockReturnType<Derived>::Type(derived(), rows() - cRows, 0, cRows, cCols);
- case BottomRight:
- return typename BlockReturnType<Derived>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
- }
-}
-
-/** \returns a fixed-size expression of a corner of *this.
- *
- * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight,
- * \a Eigen::BottomLeft, \a Eigen::BottomRight.
- *
- * The template parameters CRows and CCols arethe number of rows and columns in the corner.
- *
- * Example: \include MatrixBase_template_int_int_corner_enum.cpp
- * Output: \verbinclude MatrixBase_template_int_int_corner_enum.out
- *
- * \sa class Block, block(int,int,int,int)
- */
-template<typename Derived>
-template<int CRows, int CCols>
-inline typename BlockReturnType<Derived, CRows, CCols>::Type
-MatrixBase<Derived>::corner(CornerType type)
-{
- switch(type)
- {
- default:
- ei_assert(false && "Bad corner type.");
- case TopLeft:
- return Block<Derived, CRows, CCols>(derived(), 0, 0);
- case TopRight:
- return Block<Derived, CRows, CCols>(derived(), 0, cols() - CCols);
- case BottomLeft:
- return Block<Derived, CRows, CCols>(derived(), rows() - CRows, 0);
- case BottomRight:
- return Block<Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
- }
-}
-
-/** This is the const version of corner<int, int>(CornerType).*/
-template<typename Derived>
-template<int CRows, int CCols>
-inline const typename BlockReturnType<Derived, CRows, CCols>::Type
-MatrixBase<Derived>::corner(CornerType type) const
-{
- switch(type)
- {
- default:
- ei_assert(false && "Bad corner type.");
- case TopLeft:
- return Block<Derived, CRows, CCols>(derived(), 0, 0);
- case TopRight:
- return Block<Derived, CRows, CCols>(derived(), 0, cols() - CCols);
- case BottomLeft:
- return Block<Derived, CRows, CCols>(derived(), rows() - CRows, 0);
- case BottomRight:
- return Block<Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
- }
-}
-
-/** \returns a fixed-size expression of a block in *this.
- *
- * The template parameters \a BlockRows and \a BlockCols are the number of
- * rows and columns in the block.
- *
- * \param startRow the first row in the block
- * \param startCol the first column in the block
- *
- * \addexample BlockSubMatrixFixedSize \label How to reference a sub-matrix (fixed-size)
- *
- * Example: \include MatrixBase_block_int_int.cpp
- * Output: \verbinclude MatrixBase_block_int_int.out
- *
- * \note since block is a templated member, the keyword template has to be used
- * if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode
- *
- * \sa class Block, block(int,int,int,int)
- */
-template<typename Derived>
-template<int BlockRows, int BlockCols>
-inline typename BlockReturnType<Derived, BlockRows, BlockCols>::Type
-MatrixBase<Derived>::block(int startRow, int startCol)
-{
- return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
-}
-
-/** This is the const version of block<>(int, int). */
-template<typename Derived>
-template<int BlockRows, int BlockCols>
-inline const typename BlockReturnType<Derived, BlockRows, BlockCols>::Type
-MatrixBase<Derived>::block(int startRow, int startCol) const
-{
- return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
-}
-
-/** \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0.
- *
- * \addexample BlockColumn \label How to reference a single column of a matrix
- *
- * Example: \include MatrixBase_col.cpp
- * Output: \verbinclude MatrixBase_col.out
- *
- * \sa row(), class Block */
-template<typename Derived>
-inline typename MatrixBase<Derived>::ColXpr
-MatrixBase<Derived>::col(int i)
-{
- return ColXpr(derived(), i);
-}
-
-/** This is the const version of col(). */
-template<typename Derived>
-inline const typename MatrixBase<Derived>::ColXpr
-MatrixBase<Derived>::col(int i) const
-{
- return ColXpr(derived(), i);
-}
-
-/** \returns an expression of the \a i-th row of *this. Note that the numbering starts at 0.
- *
- * \addexample BlockRow \label How to reference a single row of a matrix
- *
- * Example: \include MatrixBase_row.cpp
- * Output: \verbinclude MatrixBase_row.out
- *
- * \sa col(), class Block */
-template<typename Derived>
-inline typename MatrixBase<Derived>::RowXpr
-MatrixBase<Derived>::row(int i)
-{
- return RowXpr(derived(), i);
-}
-
-/** This is the const version of row(). */
-template<typename Derived>
-inline const typename MatrixBase<Derived>::RowXpr
-MatrixBase<Derived>::row(int i) const
-{
- return RowXpr(derived(), i);
-}
-
-#endif // EIGEN_BLOCK_H
diff --git a/extern/Eigen2/Eigen/src/Core/CacheFriendlyProduct.h b/extern/Eigen2/Eigen/src/Core/CacheFriendlyProduct.h
deleted file mode 100644
index b1362b0a80c..00000000000
--- a/extern/Eigen2/Eigen/src/Core/CacheFriendlyProduct.h
+++ /dev/null
@@ -1,753 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_CACHE_FRIENDLY_PRODUCT_H
-#define EIGEN_CACHE_FRIENDLY_PRODUCT_H
-
-template <int L2MemorySize,typename Scalar>
-struct ei_L2_block_traits {
- enum {width = 8 * ei_meta_sqrt<L2MemorySize/(64*sizeof(Scalar))>::ret };
-};
-
-#ifndef EIGEN_EXTERN_INSTANTIATIONS
-
-template<typename Scalar>
-static void ei_cache_friendly_product(
- int _rows, int _cols, int depth,
- bool _lhsRowMajor, const Scalar* _lhs, int _lhsStride,
- bool _rhsRowMajor, const Scalar* _rhs, int _rhsStride,
- bool resRowMajor, Scalar* res, int resStride)
-{
- const Scalar* EIGEN_RESTRICT lhs;
- const Scalar* EIGEN_RESTRICT rhs;
- int lhsStride, rhsStride, rows, cols;
- bool lhsRowMajor;
-
- if (resRowMajor)
- {
- lhs = _rhs;
- rhs = _lhs;
- lhsStride = _rhsStride;
- rhsStride = _lhsStride;
- cols = _rows;
- rows = _cols;
- lhsRowMajor = !_rhsRowMajor;
- ei_assert(_lhsRowMajor);
- }
- else
- {
- lhs = _lhs;
- rhs = _rhs;
- lhsStride = _lhsStride;
- rhsStride = _rhsStride;
- rows = _rows;
- cols = _cols;
- lhsRowMajor = _lhsRowMajor;
- ei_assert(!_rhsRowMajor);
- }
-
- typedef typename ei_packet_traits<Scalar>::type PacketType;
-
- enum {
- PacketSize = sizeof(PacketType)/sizeof(Scalar),
- #if (defined __i386__)
- // i386 architecture provides only 8 xmm registers,
- // so let's reduce the max number of rows processed at once.
- MaxBlockRows = 4,
- MaxBlockRows_ClampingMask = 0xFFFFFC,
- #else
- MaxBlockRows = 8,
- MaxBlockRows_ClampingMask = 0xFFFFF8,
- #endif
- // maximal size of the blocks fitted in L2 cache
- MaxL2BlockSize = ei_L2_block_traits<EIGEN_TUNE_FOR_CPU_CACHE_SIZE,Scalar>::width
- };
-
- const bool resIsAligned = (PacketSize==1) || (((resStride%PacketSize) == 0) && (size_t(res)%16==0));
-
- const int remainingSize = depth % PacketSize;
- const int size = depth - remainingSize; // third dimension of the product clamped to packet boundaries
- const int l2BlockRows = MaxL2BlockSize > rows ? rows : MaxL2BlockSize;
- const int l2BlockCols = MaxL2BlockSize > cols ? cols : MaxL2BlockSize;
- const int l2BlockSize = MaxL2BlockSize > size ? size : MaxL2BlockSize;
- const int l2BlockSizeAligned = (1 + std::max(l2BlockSize,l2BlockCols)/PacketSize)*PacketSize;
- const bool needRhsCopy = (PacketSize>1) && ((rhsStride%PacketSize!=0) || (size_t(rhs)%16!=0));
- Scalar* EIGEN_RESTRICT block = 0;
- const int allocBlockSize = l2BlockRows*size;
- block = ei_aligned_stack_new(Scalar, allocBlockSize);
- Scalar* EIGEN_RESTRICT rhsCopy
- = ei_aligned_stack_new(Scalar, l2BlockSizeAligned*l2BlockSizeAligned);
-
- // loops on each L2 cache friendly blocks of the result
- for(int l2i=0; l2i<rows; l2i+=l2BlockRows)
- {
- const int l2blockRowEnd = std::min(l2i+l2BlockRows, rows);
- const int l2blockRowEndBW = l2blockRowEnd & MaxBlockRows_ClampingMask; // end of the rows aligned to bw
- const int l2blockRemainingRows = l2blockRowEnd - l2blockRowEndBW; // number of remaining rows
- //const int l2blockRowEndBWPlusOne = l2blockRowEndBW + (l2blockRemainingRows?0:MaxBlockRows);
-
- // build a cache friendly blocky matrix
- int count = 0;
-
- // copy l2blocksize rows of m_lhs to blocks of ps x bw
- for(int l2k=0; l2k<size; l2k+=l2BlockSize)
- {
- const int l2blockSizeEnd = std::min(l2k+l2BlockSize, size);
-
- for (int i = l2i; i<l2blockRowEndBW/*PlusOne*/; i+=MaxBlockRows)
- {
- // TODO merge the "if l2blockRemainingRows" using something like:
- // const int blockRows = std::min(i+MaxBlockRows, rows) - i;
-
- for (int k=l2k; k<l2blockSizeEnd; k+=PacketSize)
- {
- // TODO write these loops using meta unrolling
- // negligible for large matrices but useful for small ones
- if (lhsRowMajor)
- {
- for (int w=0; w<MaxBlockRows; ++w)
- for (int s=0; s<PacketSize; ++s)
- block[count++] = lhs[(i+w)*lhsStride + (k+s)];
- }
- else
- {
- for (int w=0; w<MaxBlockRows; ++w)
- for (int s=0; s<PacketSize; ++s)
- block[count++] = lhs[(i+w) + (k+s)*lhsStride];
- }
- }
- }
- if (l2blockRemainingRows>0)
- {
- for (int k=l2k; k<l2blockSizeEnd; k+=PacketSize)
- {
- if (lhsRowMajor)
- {
- for (int w=0; w<l2blockRemainingRows; ++w)
- for (int s=0; s<PacketSize; ++s)
- block[count++] = lhs[(l2blockRowEndBW+w)*lhsStride + (k+s)];
- }
- else
- {
- for (int w=0; w<l2blockRemainingRows; ++w)
- for (int s=0; s<PacketSize; ++s)
- block[count++] = lhs[(l2blockRowEndBW+w) + (k+s)*lhsStride];
- }
- }
- }
- }
-
- for(int l2j=0; l2j<cols; l2j+=l2BlockCols)
- {
- int l2blockColEnd = std::min(l2j+l2BlockCols, cols);
-
- for(int l2k=0; l2k<size; l2k+=l2BlockSize)
- {
- // acumulate bw rows of lhs time a single column of rhs to a bw x 1 block of res
- int l2blockSizeEnd = std::min(l2k+l2BlockSize, size);
-
- // if not aligned, copy the rhs block
- if (needRhsCopy)
- for(int l1j=l2j; l1j<l2blockColEnd; l1j+=1)
- {
- ei_internal_assert(l2BlockSizeAligned*(l1j-l2j)+(l2blockSizeEnd-l2k) < l2BlockSizeAligned*l2BlockSizeAligned);
- memcpy(rhsCopy+l2BlockSizeAligned*(l1j-l2j),&(rhs[l1j*rhsStride+l2k]),(l2blockSizeEnd-l2k)*sizeof(Scalar));
- }
-
- // for each bw x 1 result's block
- for(int l1i=l2i; l1i<l2blockRowEndBW; l1i+=MaxBlockRows)
- {
- int offsetblock = l2k * (l2blockRowEnd-l2i) + (l1i-l2i)*(l2blockSizeEnd-l2k) - l2k*MaxBlockRows;
- const Scalar* EIGEN_RESTRICT localB = &block[offsetblock];
-
- for(int l1j=l2j; l1j<l2blockColEnd; l1j+=1)
- {
- const Scalar* EIGEN_RESTRICT rhsColumn;
- if (needRhsCopy)
- rhsColumn = &(rhsCopy[l2BlockSizeAligned*(l1j-l2j)-l2k]);
- else
- rhsColumn = &(rhs[l1j*rhsStride]);
-
- PacketType dst[MaxBlockRows];
- dst[3] = dst[2] = dst[1] = dst[0] = ei_pset1(Scalar(0.));
- if (MaxBlockRows==8)
- dst[7] = dst[6] = dst[5] = dst[4] = dst[0];
-
- PacketType tmp;
-
- for(int k=l2k; k<l2blockSizeEnd; k+=PacketSize)
- {
- tmp = ei_ploadu(&rhsColumn[k]);
- PacketType A0, A1, A2, A3, A4, A5;
- A0 = ei_pload(localB + k*MaxBlockRows);
- A1 = ei_pload(localB + k*MaxBlockRows+1*PacketSize);
- A2 = ei_pload(localB + k*MaxBlockRows+2*PacketSize);
- A3 = ei_pload(localB + k*MaxBlockRows+3*PacketSize);
- if (MaxBlockRows==8) A4 = ei_pload(localB + k*MaxBlockRows+4*PacketSize);
- if (MaxBlockRows==8) A5 = ei_pload(localB + k*MaxBlockRows+5*PacketSize);
- dst[0] = ei_pmadd(tmp, A0, dst[0]);
- if (MaxBlockRows==8) A0 = ei_pload(localB + k*MaxBlockRows+6*PacketSize);
- dst[1] = ei_pmadd(tmp, A1, dst[1]);
- if (MaxBlockRows==8) A1 = ei_pload(localB + k*MaxBlockRows+7*PacketSize);
- dst[2] = ei_pmadd(tmp, A2, dst[2]);
- dst[3] = ei_pmadd(tmp, A3, dst[3]);
- if (MaxBlockRows==8)
- {
- dst[4] = ei_pmadd(tmp, A4, dst[4]);
- dst[5] = ei_pmadd(tmp, A5, dst[5]);
- dst[6] = ei_pmadd(tmp, A0, dst[6]);
- dst[7] = ei_pmadd(tmp, A1, dst[7]);
- }
- }
-
- Scalar* EIGEN_RESTRICT localRes = &(res[l1i + l1j*resStride]);
-
- if (PacketSize>1 && resIsAligned)
- {
- // the result is aligned: let's do packet reduction
- ei_pstore(&(localRes[0]), ei_padd(ei_pload(&(localRes[0])), ei_preduxp(&dst[0])));
- if (PacketSize==2)
- ei_pstore(&(localRes[2]), ei_padd(ei_pload(&(localRes[2])), ei_preduxp(&(dst[2]))));
- if (MaxBlockRows==8)
- {
- ei_pstore(&(localRes[4]), ei_padd(ei_pload(&(localRes[4])), ei_preduxp(&(dst[4]))));
- if (PacketSize==2)
- ei_pstore(&(localRes[6]), ei_padd(ei_pload(&(localRes[6])), ei_preduxp(&(dst[6]))));
- }
- }
- else
- {
- // not aligned => per coeff packet reduction
- localRes[0] += ei_predux(dst[0]);
- localRes[1] += ei_predux(dst[1]);
- localRes[2] += ei_predux(dst[2]);
- localRes[3] += ei_predux(dst[3]);
- if (MaxBlockRows==8)
- {
- localRes[4] += ei_predux(dst[4]);
- localRes[5] += ei_predux(dst[5]);
- localRes[6] += ei_predux(dst[6]);
- localRes[7] += ei_predux(dst[7]);
- }
- }
- }
- }
- if (l2blockRemainingRows>0)
- {
- int offsetblock = l2k * (l2blockRowEnd-l2i) + (l2blockRowEndBW-l2i)*(l2blockSizeEnd-l2k) - l2k*l2blockRemainingRows;
- const Scalar* localB = &block[offsetblock];
-
- for(int l1j=l2j; l1j<l2blockColEnd; l1j+=1)
- {
- const Scalar* EIGEN_RESTRICT rhsColumn;
- if (needRhsCopy)
- rhsColumn = &(rhsCopy[l2BlockSizeAligned*(l1j-l2j)-l2k]);
- else
- rhsColumn = &(rhs[l1j*rhsStride]);
-
- PacketType dst[MaxBlockRows];
- dst[3] = dst[2] = dst[1] = dst[0] = ei_pset1(Scalar(0.));
- if (MaxBlockRows==8)
- dst[7] = dst[6] = dst[5] = dst[4] = dst[0];
-
- // let's declare a few other temporary registers
- PacketType tmp;
-
- for(int k=l2k; k<l2blockSizeEnd; k+=PacketSize)
- {
- tmp = ei_pload(&rhsColumn[k]);
-
- dst[0] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows ])), dst[0]);
- if (l2blockRemainingRows>=2) dst[1] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+ PacketSize])), dst[1]);
- if (l2blockRemainingRows>=3) dst[2] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+2*PacketSize])), dst[2]);
- if (l2blockRemainingRows>=4) dst[3] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+3*PacketSize])), dst[3]);
- if (MaxBlockRows==8)
- {
- if (l2blockRemainingRows>=5) dst[4] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+4*PacketSize])), dst[4]);
- if (l2blockRemainingRows>=6) dst[5] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+5*PacketSize])), dst[5]);
- if (l2blockRemainingRows>=7) dst[6] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+6*PacketSize])), dst[6]);
- if (l2blockRemainingRows>=8) dst[7] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+7*PacketSize])), dst[7]);
- }
- }
-
- Scalar* EIGEN_RESTRICT localRes = &(res[l2blockRowEndBW + l1j*resStride]);
-
- // process the remaining rows once at a time
- localRes[0] += ei_predux(dst[0]);
- if (l2blockRemainingRows>=2) localRes[1] += ei_predux(dst[1]);
- if (l2blockRemainingRows>=3) localRes[2] += ei_predux(dst[2]);
- if (l2blockRemainingRows>=4) localRes[3] += ei_predux(dst[3]);
- if (MaxBlockRows==8)
- {
- if (l2blockRemainingRows>=5) localRes[4] += ei_predux(dst[4]);
- if (l2blockRemainingRows>=6) localRes[5] += ei_predux(dst[5]);
- if (l2blockRemainingRows>=7) localRes[6] += ei_predux(dst[6]);
- if (l2blockRemainingRows>=8) localRes[7] += ei_predux(dst[7]);
- }
-
- }
- }
- }
- }
- }
- if (PacketSize>1 && remainingSize)
- {
- if (lhsRowMajor)
- {
- for (int j=0; j<cols; ++j)
- for (int i=0; i<rows; ++i)
- {
- Scalar tmp = lhs[i*lhsStride+size] * rhs[j*rhsStride+size];
- // FIXME this loop get vectorized by the compiler !
- for (int k=1; k<remainingSize; ++k)
- tmp += lhs[i*lhsStride+size+k] * rhs[j*rhsStride+size+k];
- res[i+j*resStride] += tmp;
- }
- }
- else
- {
- for (int j=0; j<cols; ++j)
- for (int i=0; i<rows; ++i)
- {
- Scalar tmp = lhs[i+size*lhsStride] * rhs[j*rhsStride+size];
- for (int k=1; k<remainingSize; ++k)
- tmp += lhs[i+(size+k)*lhsStride] * rhs[j*rhsStride+size+k];
- res[i+j*resStride] += tmp;
- }
- }
- }
-
- ei_aligned_stack_delete(Scalar, block, allocBlockSize);
- ei_aligned_stack_delete(Scalar, rhsCopy, l2BlockSizeAligned*l2BlockSizeAligned);
-}
-
-#endif // EIGEN_EXTERN_INSTANTIATIONS
-
-/* Optimized col-major matrix * vector product:
- * This algorithm processes 4 columns at onces that allows to both reduce
- * the number of load/stores of the result by a factor 4 and to reduce
- * the instruction dependency. Moreover, we know that all bands have the
- * same alignment pattern.
- * TODO: since rhs gets evaluated only once, no need to evaluate it
- */
-template<typename Scalar, typename RhsType>
-static EIGEN_DONT_INLINE void ei_cache_friendly_product_colmajor_times_vector(
- int size,
- const Scalar* lhs, int lhsStride,
- const RhsType& rhs,
- Scalar* res)
-{
- #ifdef _EIGEN_ACCUMULATE_PACKETS
- #error _EIGEN_ACCUMULATE_PACKETS has already been defined
- #endif
- #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) \
- ei_pstore(&res[j], \
- ei_padd(ei_pload(&res[j]), \
- ei_padd( \
- ei_padd(ei_pmul(ptmp0,EIGEN_CAT(ei_ploa , A0)(&lhs0[j])), \
- ei_pmul(ptmp1,EIGEN_CAT(ei_ploa , A13)(&lhs1[j]))), \
- ei_padd(ei_pmul(ptmp2,EIGEN_CAT(ei_ploa , A2)(&lhs2[j])), \
- ei_pmul(ptmp3,EIGEN_CAT(ei_ploa , A13)(&lhs3[j]))) )))
-
- typedef typename ei_packet_traits<Scalar>::type Packet;
- const int PacketSize = sizeof(Packet)/sizeof(Scalar);
-
- enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
- const int columnsAtOnce = 4;
- const int peels = 2;
- const int PacketAlignedMask = PacketSize-1;
- const int PeelAlignedMask = PacketSize*peels-1;
-
- // How many coeffs of the result do we have to skip to be aligned.
- // Here we assume data are at least aligned on the base scalar type that is mandatory anyway.
- const int alignedStart = ei_alignmentOffset(res,size);
- const int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0;
- const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
-
- const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0;
- int alignmentPattern = alignmentStep==0 ? AllAligned
- : alignmentStep==(PacketSize/2) ? EvenAligned
- : FirstAligned;
-
- // we cannot assume the first element is aligned because of sub-matrices
- const int lhsAlignmentOffset = ei_alignmentOffset(lhs,size);
-
- // find how many columns do we have to skip to be aligned with the result (if possible)
- int skipColumns = 0;
- if (PacketSize>1)
- {
- ei_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(Packet)==0 || size<PacketSize);
-
- while (skipColumns<PacketSize &&
- alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%PacketSize))
- ++skipColumns;
- if (skipColumns==PacketSize)
- {
- // nothing can be aligned, no need to skip any column
- alignmentPattern = NoneAligned;
- skipColumns = 0;
- }
- else
- {
- skipColumns = std::min(skipColumns,rhs.size());
- // note that the skiped columns are processed later.
- }
-
- ei_internal_assert((alignmentPattern==NoneAligned) || (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(Packet))==0);
- }
-
- int offset1 = (FirstAligned && alignmentStep==1?3:1);
- int offset3 = (FirstAligned && alignmentStep==1?1:3);
-
- int columnBound = ((rhs.size()-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
- for (int i=skipColumns; i<columnBound; i+=columnsAtOnce)
- {
- Packet ptmp0 = ei_pset1(rhs[i]), ptmp1 = ei_pset1(rhs[i+offset1]),
- ptmp2 = ei_pset1(rhs[i+2]), ptmp3 = ei_pset1(rhs[i+offset3]);
-
- // this helps a lot generating better binary code
- const Scalar *lhs0 = lhs + i*lhsStride, *lhs1 = lhs + (i+offset1)*lhsStride,
- *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
-
- if (PacketSize>1)
- {
- /* explicit vectorization */
- // process initial unaligned coeffs
- for (int j=0; j<alignedStart; ++j)
- res[j] += ei_pfirst(ptmp0)*lhs0[j] + ei_pfirst(ptmp1)*lhs1[j] + ei_pfirst(ptmp2)*lhs2[j] + ei_pfirst(ptmp3)*lhs3[j];
-
- if (alignedSize>alignedStart)
- {
- switch(alignmentPattern)
- {
- case AllAligned:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
- _EIGEN_ACCUMULATE_PACKETS(d,d,d);
- break;
- case EvenAligned:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
- _EIGEN_ACCUMULATE_PACKETS(d,du,d);
- break;
- case FirstAligned:
- if(peels>1)
- {
- Packet A00, A01, A02, A03, A10, A11, A12, A13;
-
- A01 = ei_pload(&lhs1[alignedStart-1]);
- A02 = ei_pload(&lhs2[alignedStart-2]);
- A03 = ei_pload(&lhs3[alignedStart-3]);
-
- for (int j = alignedStart; j<peeledSize; j+=peels*PacketSize)
- {
- A11 = ei_pload(&lhs1[j-1+PacketSize]); ei_palign<1>(A01,A11);
- A12 = ei_pload(&lhs2[j-2+PacketSize]); ei_palign<2>(A02,A12);
- A13 = ei_pload(&lhs3[j-3+PacketSize]); ei_palign<3>(A03,A13);
-
- A00 = ei_pload (&lhs0[j]);
- A10 = ei_pload (&lhs0[j+PacketSize]);
- A00 = ei_pmadd(ptmp0, A00, ei_pload(&res[j]));
- A10 = ei_pmadd(ptmp0, A10, ei_pload(&res[j+PacketSize]));
-
- A00 = ei_pmadd(ptmp1, A01, A00);
- A01 = ei_pload(&lhs1[j-1+2*PacketSize]); ei_palign<1>(A11,A01);
- A00 = ei_pmadd(ptmp2, A02, A00);
- A02 = ei_pload(&lhs2[j-2+2*PacketSize]); ei_palign<2>(A12,A02);
- A00 = ei_pmadd(ptmp3, A03, A00);
- ei_pstore(&res[j],A00);
- A03 = ei_pload(&lhs3[j-3+2*PacketSize]); ei_palign<3>(A13,A03);
- A10 = ei_pmadd(ptmp1, A11, A10);
- A10 = ei_pmadd(ptmp2, A12, A10);
- A10 = ei_pmadd(ptmp3, A13, A10);
- ei_pstore(&res[j+PacketSize],A10);
- }
- }
- for (int j = peeledSize; j<alignedSize; j+=PacketSize)
- _EIGEN_ACCUMULATE_PACKETS(d,du,du);
- break;
- default:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
- _EIGEN_ACCUMULATE_PACKETS(du,du,du);
- break;
- }
- }
- } // end explicit vectorization
-
- /* process remaining coeffs (or all if there is no explicit vectorization) */
- for (int j=alignedSize; j<size; ++j)
- res[j] += ei_pfirst(ptmp0)*lhs0[j] + ei_pfirst(ptmp1)*lhs1[j] + ei_pfirst(ptmp2)*lhs2[j] + ei_pfirst(ptmp3)*lhs3[j];
- }
-
- // process remaining first and last columns (at most columnsAtOnce-1)
- int end = rhs.size();
- int start = columnBound;
- do
- {
- for (int i=start; i<end; ++i)
- {
- Packet ptmp0 = ei_pset1(rhs[i]);
- const Scalar* lhs0 = lhs + i*lhsStride;
-
- if (PacketSize>1)
- {
- /* explicit vectorization */
- // process first unaligned result's coeffs
- for (int j=0; j<alignedStart; ++j)
- res[j] += ei_pfirst(ptmp0) * lhs0[j];
-
- // process aligned result's coeffs
- if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0)
- for (int j = alignedStart;j<alignedSize;j+=PacketSize)
- ei_pstore(&res[j], ei_pmadd(ptmp0,ei_pload(&lhs0[j]),ei_pload(&res[j])));
- else
- for (int j = alignedStart;j<alignedSize;j+=PacketSize)
- ei_pstore(&res[j], ei_pmadd(ptmp0,ei_ploadu(&lhs0[j]),ei_pload(&res[j])));
- }
-
- // process remaining scalars (or all if no explicit vectorization)
- for (int j=alignedSize; j<size; ++j)
- res[j] += ei_pfirst(ptmp0) * lhs0[j];
- }
- if (skipColumns)
- {
- start = 0;
- end = skipColumns;
- skipColumns = 0;
- }
- else
- break;
- } while(PacketSize>1);
- #undef _EIGEN_ACCUMULATE_PACKETS
-}
-
-// TODO add peeling to mask unaligned load/stores
-template<typename Scalar, typename ResType>
-static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector(
- const Scalar* lhs, int lhsStride,
- const Scalar* rhs, int rhsSize,
- ResType& res)
-{
- #ifdef _EIGEN_ACCUMULATE_PACKETS
- #error _EIGEN_ACCUMULATE_PACKETS has already been defined
- #endif
-
- #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) {\
- Packet b = ei_pload(&rhs[j]); \
- ptmp0 = ei_pmadd(b, EIGEN_CAT(ei_ploa,A0) (&lhs0[j]), ptmp0); \
- ptmp1 = ei_pmadd(b, EIGEN_CAT(ei_ploa,A13)(&lhs1[j]), ptmp1); \
- ptmp2 = ei_pmadd(b, EIGEN_CAT(ei_ploa,A2) (&lhs2[j]), ptmp2); \
- ptmp3 = ei_pmadd(b, EIGEN_CAT(ei_ploa,A13)(&lhs3[j]), ptmp3); }
-
- typedef typename ei_packet_traits<Scalar>::type Packet;
- const int PacketSize = sizeof(Packet)/sizeof(Scalar);
-
- enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
- const int rowsAtOnce = 4;
- const int peels = 2;
- const int PacketAlignedMask = PacketSize-1;
- const int PeelAlignedMask = PacketSize*peels-1;
- const int size = rhsSize;
-
- // How many coeffs of the result do we have to skip to be aligned.
- // Here we assume data are at least aligned on the base scalar type that is mandatory anyway.
- const int alignedStart = ei_alignmentOffset(rhs, size);
- const int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0;
- const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
-
- const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0;
- int alignmentPattern = alignmentStep==0 ? AllAligned
- : alignmentStep==(PacketSize/2) ? EvenAligned
- : FirstAligned;
-
- // we cannot assume the first element is aligned because of sub-matrices
- const int lhsAlignmentOffset = ei_alignmentOffset(lhs,size);
-
- // find how many rows do we have to skip to be aligned with rhs (if possible)
- int skipRows = 0;
- if (PacketSize>1)
- {
- ei_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(Packet)==0 || size<PacketSize);
-
- while (skipRows<PacketSize &&
- alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%PacketSize))
- ++skipRows;
- if (skipRows==PacketSize)
- {
- // nothing can be aligned, no need to skip any column
- alignmentPattern = NoneAligned;
- skipRows = 0;
- }
- else
- {
- skipRows = std::min(skipRows,res.size());
- // note that the skiped columns are processed later.
- }
- ei_internal_assert((alignmentPattern==NoneAligned) || PacketSize==1
- || (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(Packet))==0);
- }
-
- int offset1 = (FirstAligned && alignmentStep==1?3:1);
- int offset3 = (FirstAligned && alignmentStep==1?1:3);
-
- int rowBound = ((res.size()-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
- for (int i=skipRows; i<rowBound; i+=rowsAtOnce)
- {
- Scalar tmp0 = Scalar(0), tmp1 = Scalar(0), tmp2 = Scalar(0), tmp3 = Scalar(0);
-
- // this helps the compiler generating good binary code
- const Scalar *lhs0 = lhs + i*lhsStride, *lhs1 = lhs + (i+offset1)*lhsStride,
- *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
-
- if (PacketSize>1)
- {
- /* explicit vectorization */
- Packet ptmp0 = ei_pset1(Scalar(0)), ptmp1 = ei_pset1(Scalar(0)), ptmp2 = ei_pset1(Scalar(0)), ptmp3 = ei_pset1(Scalar(0));
-
- // process initial unaligned coeffs
- // FIXME this loop get vectorized by the compiler !
- for (int j=0; j<alignedStart; ++j)
- {
- Scalar b = rhs[j];
- tmp0 += b*lhs0[j]; tmp1 += b*lhs1[j]; tmp2 += b*lhs2[j]; tmp3 += b*lhs3[j];
- }
-
- if (alignedSize>alignedStart)
- {
- switch(alignmentPattern)
- {
- case AllAligned:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
- _EIGEN_ACCUMULATE_PACKETS(d,d,d);
- break;
- case EvenAligned:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
- _EIGEN_ACCUMULATE_PACKETS(d,du,d);
- break;
- case FirstAligned:
- if (peels>1)
- {
- /* Here we proccess 4 rows with with two peeled iterations to hide
- * tghe overhead of unaligned loads. Moreover unaligned loads are handled
- * using special shift/move operations between the two aligned packets
- * overlaping the desired unaligned packet. This is *much* more efficient
- * than basic unaligned loads.
- */
- Packet A01, A02, A03, b, A11, A12, A13;
- A01 = ei_pload(&lhs1[alignedStart-1]);
- A02 = ei_pload(&lhs2[alignedStart-2]);
- A03 = ei_pload(&lhs3[alignedStart-3]);
-
- for (int j = alignedStart; j<peeledSize; j+=peels*PacketSize)
- {
- b = ei_pload(&rhs[j]);
- A11 = ei_pload(&lhs1[j-1+PacketSize]); ei_palign<1>(A01,A11);
- A12 = ei_pload(&lhs2[j-2+PacketSize]); ei_palign<2>(A02,A12);
- A13 = ei_pload(&lhs3[j-3+PacketSize]); ei_palign<3>(A03,A13);
-
- ptmp0 = ei_pmadd(b, ei_pload (&lhs0[j]), ptmp0);
- ptmp1 = ei_pmadd(b, A01, ptmp1);
- A01 = ei_pload(&lhs1[j-1+2*PacketSize]); ei_palign<1>(A11,A01);
- ptmp2 = ei_pmadd(b, A02, ptmp2);
- A02 = ei_pload(&lhs2[j-2+2*PacketSize]); ei_palign<2>(A12,A02);
- ptmp3 = ei_pmadd(b, A03, ptmp3);
- A03 = ei_pload(&lhs3[j-3+2*PacketSize]); ei_palign<3>(A13,A03);
-
- b = ei_pload(&rhs[j+PacketSize]);
- ptmp0 = ei_pmadd(b, ei_pload (&lhs0[j+PacketSize]), ptmp0);
- ptmp1 = ei_pmadd(b, A11, ptmp1);
- ptmp2 = ei_pmadd(b, A12, ptmp2);
- ptmp3 = ei_pmadd(b, A13, ptmp3);
- }
- }
- for (int j = peeledSize; j<alignedSize; j+=PacketSize)
- _EIGEN_ACCUMULATE_PACKETS(d,du,du);
- break;
- default:
- for (int j = alignedStart; j<alignedSize; j+=PacketSize)
- _EIGEN_ACCUMULATE_PACKETS(du,du,du);
- break;
- }
- tmp0 += ei_predux(ptmp0);
- tmp1 += ei_predux(ptmp1);
- tmp2 += ei_predux(ptmp2);
- tmp3 += ei_predux(ptmp3);
- }
- } // end explicit vectorization
-
- // process remaining coeffs (or all if no explicit vectorization)
- // FIXME this loop get vectorized by the compiler !
- for (int j=alignedSize; j<size; ++j)
- {
- Scalar b = rhs[j];
- tmp0 += b*lhs0[j]; tmp1 += b*lhs1[j]; tmp2 += b*lhs2[j]; tmp3 += b*lhs3[j];
- }
- res[i] += tmp0; res[i+offset1] += tmp1; res[i+2] += tmp2; res[i+offset3] += tmp3;
- }
-
- // process remaining first and last rows (at most columnsAtOnce-1)
- int end = res.size();
- int start = rowBound;
- do
- {
- for (int i=start; i<end; ++i)
- {
- Scalar tmp0 = Scalar(0);
- Packet ptmp0 = ei_pset1(tmp0);
- const Scalar* lhs0 = lhs + i*lhsStride;
- // process first unaligned result's coeffs
- // FIXME this loop get vectorized by the compiler !
- for (int j=0; j<alignedStart; ++j)
- tmp0 += rhs[j] * lhs0[j];
-
- if (alignedSize>alignedStart)
- {
- // process aligned rhs coeffs
- if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0)
- for (int j = alignedStart;j<alignedSize;j+=PacketSize)
- ptmp0 = ei_pmadd(ei_pload(&rhs[j]), ei_pload(&lhs0[j]), ptmp0);
- else
- for (int j = alignedStart;j<alignedSize;j+=PacketSize)
- ptmp0 = ei_pmadd(ei_pload(&rhs[j]), ei_ploadu(&lhs0[j]), ptmp0);
- tmp0 += ei_predux(ptmp0);
- }
-
- // process remaining scalars
- // FIXME this loop get vectorized by the compiler !
- for (int j=alignedSize; j<size; ++j)
- tmp0 += rhs[j] * lhs0[j];
- res[i] += tmp0;
- }
- if (skipRows)
- {
- start = 0;
- end = skipRows;
- skipRows = 0;
- }
- else
- break;
- } while(PacketSize>1);
-
- #undef _EIGEN_ACCUMULATE_PACKETS
-}
-
-#endif // EIGEN_CACHE_FRIENDLY_PRODUCT_H
diff --git a/extern/Eigen2/Eigen/src/Core/Coeffs.h b/extern/Eigen2/Eigen/src/Core/Coeffs.h
deleted file mode 100644
index 23a84228b24..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Coeffs.h
+++ /dev/null
@@ -1,384 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_COEFFS_H
-#define EIGEN_COEFFS_H
-
-/** Short version: don't use this function, use
- * \link operator()(int,int) const \endlink instead.
- *
- * Long version: this function is similar to
- * \link operator()(int,int) const \endlink, but without the assertion.
- * Use this for limiting the performance cost of debugging code when doing
- * repeated coefficient access. Only use this when it is guaranteed that the
- * parameters \a row and \a col are in range.
- *
- * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
- * function equivalent to \link operator()(int,int) const \endlink.
- *
- * \sa operator()(int,int) const, coeffRef(int,int), coeff(int) const
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
- ::coeff(int row, int col) const
-{
- ei_internal_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
- return derived().coeff(row, col);
-}
-
-/** \returns the coefficient at given the given row and column.
- *
- * \sa operator()(int,int), operator[](int) const
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
- ::operator()(int row, int col) const
-{
- ei_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
- return derived().coeff(row, col);
-}
-
-/** Short version: don't use this function, use
- * \link operator()(int,int) \endlink instead.
- *
- * Long version: this function is similar to
- * \link operator()(int,int) \endlink, but without the assertion.
- * Use this for limiting the performance cost of debugging code when doing
- * repeated coefficient access. Only use this when it is guaranteed that the
- * parameters \a row and \a col are in range.
- *
- * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
- * function equivalent to \link operator()(int,int) \endlink.
- *
- * \sa operator()(int,int), coeff(int, int) const, coeffRef(int)
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
- ::coeffRef(int row, int col)
-{
- ei_internal_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
- return derived().coeffRef(row, col);
-}
-
-/** \returns a reference to the coefficient at given the given row and column.
- *
- * \sa operator()(int,int) const, operator[](int)
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
- ::operator()(int row, int col)
-{
- ei_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
- return derived().coeffRef(row, col);
-}
-
-/** Short version: don't use this function, use
- * \link operator[](int) const \endlink instead.
- *
- * Long version: this function is similar to
- * \link operator[](int) const \endlink, but without the assertion.
- * Use this for limiting the performance cost of debugging code when doing
- * repeated coefficient access. Only use this when it is guaranteed that the
- * parameter \a index is in range.
- *
- * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
- * function equivalent to \link operator[](int) const \endlink.
- *
- * \sa operator[](int) const, coeffRef(int), coeff(int,int) const
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
- ::coeff(int index) const
-{
- ei_internal_assert(index >= 0 && index < size());
- return derived().coeff(index);
-}
-
-/** \returns the coefficient at given index.
- *
- * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
- *
- * \sa operator[](int), operator()(int,int) const, x() const, y() const,
- * z() const, w() const
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
- ::operator[](int index) const
-{
- ei_assert(index >= 0 && index < size());
- return derived().coeff(index);
-}
-
-/** \returns the coefficient at given index.
- *
- * This is synonymous to operator[](int) const.
- *
- * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
- *
- * \sa operator[](int), operator()(int,int) const, x() const, y() const,
- * z() const, w() const
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
- ::operator()(int index) const
-{
- ei_assert(index >= 0 && index < size());
- return derived().coeff(index);
-}
-
-/** Short version: don't use this function, use
- * \link operator[](int) \endlink instead.
- *
- * Long version: this function is similar to
- * \link operator[](int) \endlink, but without the assertion.
- * Use this for limiting the performance cost of debugging code when doing
- * repeated coefficient access. Only use this when it is guaranteed that the
- * parameters \a row and \a col are in range.
- *
- * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
- * function equivalent to \link operator[](int) \endlink.
- *
- * \sa operator[](int), coeff(int) const, coeffRef(int,int)
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
- ::coeffRef(int index)
-{
- ei_internal_assert(index >= 0 && index < size());
- return derived().coeffRef(index);
-}
-
-/** \returns a reference to the coefficient at given index.
- *
- * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
- *
- * \sa operator[](int) const, operator()(int,int), x(), y(), z(), w()
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
- ::operator[](int index)
-{
- ei_assert(index >= 0 && index < size());
- return derived().coeffRef(index);
-}
-
-/** \returns a reference to the coefficient at given index.
- *
- * This is synonymous to operator[](int).
- *
- * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
- *
- * \sa operator[](int) const, operator()(int,int), x(), y(), z(), w()
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
- ::operator()(int index)
-{
- ei_assert(index >= 0 && index < size());
- return derived().coeffRef(index);
-}
-
-/** equivalent to operator[](0). */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
- ::x() const { return (*this)[0]; }
-
-/** equivalent to operator[](1). */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
- ::y() const { return (*this)[1]; }
-
-/** equivalent to operator[](2). */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
- ::z() const { return (*this)[2]; }
-
-/** equivalent to operator[](3). */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
- ::w() const { return (*this)[3]; }
-
-/** equivalent to operator[](0). */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
- ::x() { return (*this)[0]; }
-
-/** equivalent to operator[](1). */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
- ::y() { return (*this)[1]; }
-
-/** equivalent to operator[](2). */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
- ::z() { return (*this)[2]; }
-
-/** equivalent to operator[](3). */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
- ::w() { return (*this)[3]; }
-
-/** \returns the packet of coefficients starting at the given row and column. It is your responsibility
- * to ensure that a packet really starts there. This method is only available on expressions having the
- * PacketAccessBit.
- *
- * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
- * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
- * starting at an address which is a multiple of the packet size.
- */
-template<typename Derived>
-template<int LoadMode>
-EIGEN_STRONG_INLINE typename ei_packet_traits<typename ei_traits<Derived>::Scalar>::type
-MatrixBase<Derived>::packet(int row, int col) const
-{
- ei_internal_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
- return derived().template packet<LoadMode>(row,col);
-}
-
-/** Stores the given packet of coefficients, at the given row and column of this expression. It is your responsibility
- * to ensure that a packet really starts there. This method is only available on expressions having the
- * PacketAccessBit.
- *
- * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
- * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
- * starting at an address which is a multiple of the packet size.
- */
-template<typename Derived>
-template<int StoreMode>
-EIGEN_STRONG_INLINE void MatrixBase<Derived>::writePacket
-(int row, int col, const typename ei_packet_traits<typename ei_traits<Derived>::Scalar>::type& x)
-{
- ei_internal_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
- derived().template writePacket<StoreMode>(row,col,x);
-}
-
-/** \returns the packet of coefficients starting at the given index. It is your responsibility
- * to ensure that a packet really starts there. This method is only available on expressions having the
- * PacketAccessBit and the LinearAccessBit.
- *
- * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
- * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
- * starting at an address which is a multiple of the packet size.
- */
-template<typename Derived>
-template<int LoadMode>
-EIGEN_STRONG_INLINE typename ei_packet_traits<typename ei_traits<Derived>::Scalar>::type
-MatrixBase<Derived>::packet(int index) const
-{
- ei_internal_assert(index >= 0 && index < size());
- return derived().template packet<LoadMode>(index);
-}
-
-/** Stores the given packet of coefficients, at the given index in this expression. It is your responsibility
- * to ensure that a packet really starts there. This method is only available on expressions having the
- * PacketAccessBit and the LinearAccessBit.
- *
- * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
- * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
- * starting at an address which is a multiple of the packet size.
- */
-template<typename Derived>
-template<int StoreMode>
-EIGEN_STRONG_INLINE void MatrixBase<Derived>::writePacket
-(int index, const typename ei_packet_traits<typename ei_traits<Derived>::Scalar>::type& x)
-{
- ei_internal_assert(index >= 0 && index < size());
- derived().template writePacket<StoreMode>(index,x);
-}
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
-
-/** \internal Copies the coefficient at position (row,col) of other into *this.
- *
- * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
- * with usual assignments.
- *
- * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
- */
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE void MatrixBase<Derived>::copyCoeff(int row, int col, const MatrixBase<OtherDerived>& other)
-{
- ei_internal_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
- derived().coeffRef(row, col) = other.derived().coeff(row, col);
-}
-
-/** \internal Copies the coefficient at the given index of other into *this.
- *
- * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
- * with usual assignments.
- *
- * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
- */
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE void MatrixBase<Derived>::copyCoeff(int index, const MatrixBase<OtherDerived>& other)
-{
- ei_internal_assert(index >= 0 && index < size());
- derived().coeffRef(index) = other.derived().coeff(index);
-}
-
-/** \internal Copies the packet at position (row,col) of other into *this.
- *
- * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
- * with usual assignments.
- *
- * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
- */
-template<typename Derived>
-template<typename OtherDerived, int StoreMode, int LoadMode>
-EIGEN_STRONG_INLINE void MatrixBase<Derived>::copyPacket(int row, int col, const MatrixBase<OtherDerived>& other)
-{
- ei_internal_assert(row >= 0 && row < rows()
- && col >= 0 && col < cols());
- derived().template writePacket<StoreMode>(row, col,
- other.derived().template packet<LoadMode>(row, col));
-}
-
-/** \internal Copies the packet at the given index of other into *this.
- *
- * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
- * with usual assignments.
- *
- * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
- */
-template<typename Derived>
-template<typename OtherDerived, int StoreMode, int LoadMode>
-EIGEN_STRONG_INLINE void MatrixBase<Derived>::copyPacket(int index, const MatrixBase<OtherDerived>& other)
-{
- ei_internal_assert(index >= 0 && index < size());
- derived().template writePacket<StoreMode>(index,
- other.derived().template packet<LoadMode>(index));
-}
-
-#endif
-
-#endif // EIGEN_COEFFS_H
diff --git a/extern/Eigen2/Eigen/src/Core/CwiseBinaryOp.h b/extern/Eigen2/Eigen/src/Core/CwiseBinaryOp.h
deleted file mode 100644
index c4223e2204e..00000000000
--- a/extern/Eigen2/Eigen/src/Core/CwiseBinaryOp.h
+++ /dev/null
@@ -1,304 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_CWISE_BINARY_OP_H
-#define EIGEN_CWISE_BINARY_OP_H
-
-/** \class CwiseBinaryOp
- *
- * \brief Generic expression of a coefficient-wise operator between two matrices or vectors
- *
- * \param BinaryOp template functor implementing the operator
- * \param Lhs the type of the left-hand side
- * \param Rhs the type of the right-hand side
- *
- * This class represents an expression of a generic binary operator of two matrices or vectors.
- * It is the return type of the operator+, operator-, and the Cwise methods, and most
- * of the time this is the only way it is used.
- *
- * However, if you want to write a function returning such an expression, you
- * will need to use this class.
- *
- * \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
- */
-template<typename BinaryOp, typename Lhs, typename Rhs>
-struct ei_traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
-{
- // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor),
- // we still want to handle the case when the result type is different.
- typedef typename ei_result_of<
- BinaryOp(
- typename Lhs::Scalar,
- typename Rhs::Scalar
- )
- >::type Scalar;
- typedef typename Lhs::Nested LhsNested;
- typedef typename Rhs::Nested RhsNested;
- typedef typename ei_unref<LhsNested>::type _LhsNested;
- typedef typename ei_unref<RhsNested>::type _RhsNested;
- enum {
- LhsCoeffReadCost = _LhsNested::CoeffReadCost,
- RhsCoeffReadCost = _RhsNested::CoeffReadCost,
- LhsFlags = _LhsNested::Flags,
- RhsFlags = _RhsNested::Flags,
- RowsAtCompileTime = Lhs::RowsAtCompileTime,
- ColsAtCompileTime = Lhs::ColsAtCompileTime,
- MaxRowsAtCompileTime = Lhs::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = Lhs::MaxColsAtCompileTime,
- Flags = (int(LhsFlags) | int(RhsFlags)) & (
- HereditaryBits
- | (int(LhsFlags) & int(RhsFlags) & (LinearAccessBit | AlignedBit))
- | (ei_functor_traits<BinaryOp>::PacketAccess && ((int(LhsFlags) & RowMajorBit)==(int(RhsFlags) & RowMajorBit))
- ? (int(LhsFlags) & int(RhsFlags) & PacketAccessBit) : 0)),
- CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + ei_functor_traits<BinaryOp>::Cost
- };
-};
-
-template<typename BinaryOp, typename Lhs, typename Rhs>
-class CwiseBinaryOp : ei_no_assignment_operator,
- public MatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)
- typedef typename ei_traits<CwiseBinaryOp>::LhsNested LhsNested;
- typedef typename ei_traits<CwiseBinaryOp>::RhsNested RhsNested;
-
- EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp())
- : m_lhs(lhs), m_rhs(rhs), m_functor(func)
- {
- // we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor
- // that would take two operands of different types. If there were such an example, then this check should be
- // moved to the BinaryOp functors, on a per-case basis. This would however require a change in the BinaryOp functors, as
- // currently they take only one typename Scalar template parameter.
- // It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
- // So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
- // add together a float matrix and a double matrix.
- EIGEN_STATIC_ASSERT((ei_functor_allows_mixing_real_and_complex<BinaryOp>::ret
- ? int(ei_is_same_type<typename Lhs::RealScalar, typename Rhs::RealScalar>::ret)
- : int(ei_is_same_type<typename Lhs::Scalar, typename Rhs::Scalar>::ret)),
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
- // require the sizes to match
- EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
- ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols());
- }
-
- EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_lhs.cols(); }
-
- EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const
- {
- return m_functor(m_lhs.coeff(row, col), m_rhs.coeff(row, col));
- }
-
- template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const
- {
- return m_functor.packetOp(m_lhs.template packet<LoadMode>(row, col), m_rhs.template packet<LoadMode>(row, col));
- }
-
- EIGEN_STRONG_INLINE const Scalar coeff(int index) const
- {
- return m_functor(m_lhs.coeff(index), m_rhs.coeff(index));
- }
-
- template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int index) const
- {
- return m_functor.packetOp(m_lhs.template packet<LoadMode>(index), m_rhs.template packet<LoadMode>(index));
- }
-
- protected:
- const LhsNested m_lhs;
- const RhsNested m_rhs;
- const BinaryOp m_functor;
-};
-
-/**\returns an expression of the difference of \c *this and \a other
- *
- * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-().
- *
- * \sa class CwiseBinaryOp, MatrixBase::operator-=(), Cwise::operator-()
- */
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const CwiseBinaryOp<ei_scalar_difference_op<typename ei_traits<Derived>::Scalar>,
- Derived, OtherDerived>
-MatrixBase<Derived>::operator-(const MatrixBase<OtherDerived> &other) const
-{
- return CwiseBinaryOp<ei_scalar_difference_op<Scalar>,
- Derived, OtherDerived>(derived(), other.derived());
-}
-
-/** replaces \c *this by \c *this - \a other.
- *
- * \returns a reference to \c *this
- */
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
-MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
-{
- return *this = *this - other;
-}
-
-/** \relates MatrixBase
- *
- * \returns an expression of the sum of \c *this and \a other
- *
- * \note If you want to add a given scalar to all coefficients, see Cwise::operator+().
- *
- * \sa class CwiseBinaryOp, MatrixBase::operator+=(), Cwise::operator+()
- */
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const CwiseBinaryOp<ei_scalar_sum_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
-MatrixBase<Derived>::operator+(const MatrixBase<OtherDerived> &other) const
-{
- return CwiseBinaryOp<ei_scalar_sum_op<Scalar>, Derived, OtherDerived>(derived(), other.derived());
-}
-
-/** replaces \c *this by \c *this + \a other.
- *
- * \returns a reference to \c *this
- */
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
-MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
-{
- return *this = *this + other;
-}
-
-/** \returns an expression of the Schur product (coefficient wise product) of *this and \a other
- *
- * Example: \include Cwise_product.cpp
- * Output: \verbinclude Cwise_product.out
- *
- * \sa class CwiseBinaryOp, operator/(), square()
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE
-Cwise<ExpressionType>::operator*(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived());
-}
-
-/** \returns an expression of the coefficient-wise quotient of *this and \a other
- *
- * Example: \include Cwise_quotient.cpp
- * Output: \verbinclude Cwise_quotient.out
- *
- * \sa class CwiseBinaryOp, operator*(), inverse()
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
-Cwise<ExpressionType>::operator/(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived());
-}
-
-/** Replaces this expression by its coefficient-wise product with \a other.
- *
- * Example: \include Cwise_times_equal.cpp
- * Output: \verbinclude Cwise_times_equal.out
- *
- * \sa operator*(), operator/=()
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-inline ExpressionType& Cwise<ExpressionType>::operator*=(const MatrixBase<OtherDerived> &other)
-{
- return m_matrix.const_cast_derived() = *this * other;
-}
-
-/** Replaces this expression by its coefficient-wise quotient by \a other.
- *
- * Example: \include Cwise_slash_equal.cpp
- * Output: \verbinclude Cwise_slash_equal.out
- *
- * \sa operator/(), operator*=()
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-inline ExpressionType& Cwise<ExpressionType>::operator/=(const MatrixBase<OtherDerived> &other)
-{
- return m_matrix.const_cast_derived() = *this / other;
-}
-
-/** \returns an expression of the coefficient-wise min of *this and \a other
- *
- * Example: \include Cwise_min.cpp
- * Output: \verbinclude Cwise_min.out
- *
- * \sa class CwiseBinaryOp
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)
-Cwise<ExpressionType>::min(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)(_expression(), other.derived());
-}
-
-/** \returns an expression of the coefficient-wise max of *this and \a other
- *
- * Example: \include Cwise_max.cpp
- * Output: \verbinclude Cwise_max.out
- *
- * \sa class CwiseBinaryOp
- */
-template<typename ExpressionType>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)
-Cwise<ExpressionType>::max(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)(_expression(), other.derived());
-}
-
-/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other
- *
- * The template parameter \a CustomBinaryOp is the type of the functor
- * of the custom operator (see class CwiseBinaryOp for an example)
- *
- * \addexample CustomCwiseBinaryFunctors \label How to use custom coeff wise binary functors
- *
- * Here is an example illustrating the use of custom functors:
- * \include class_CwiseBinaryOp.cpp
- * Output: \verbinclude class_CwiseBinaryOp.out
- *
- * \sa class CwiseBinaryOp, MatrixBase::operator+, MatrixBase::operator-, Cwise::operator*, Cwise::operator/
- */
-template<typename Derived>
-template<typename CustomBinaryOp, typename OtherDerived>
-EIGEN_STRONG_INLINE const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
-MatrixBase<Derived>::binaryExpr(const MatrixBase<OtherDerived> &other, const CustomBinaryOp& func) const
-{
- return CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>(derived(), other.derived(), func);
-}
-
-#endif // EIGEN_CWISE_BINARY_OP_H
diff --git a/extern/Eigen2/Eigen/src/Core/CwiseUnaryOp.h b/extern/Eigen2/Eigen/src/Core/CwiseUnaryOp.h
deleted file mode 100644
index 076d568e023..00000000000
--- a/extern/Eigen2/Eigen/src/Core/CwiseUnaryOp.h
+++ /dev/null
@@ -1,229 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_CWISE_UNARY_OP_H
-#define EIGEN_CWISE_UNARY_OP_H
-
-/** \class CwiseUnaryOp
- *
- * \brief Generic expression of a coefficient-wise unary operator of a matrix or a vector
- *
- * \param UnaryOp template functor implementing the operator
- * \param MatrixType the type of the matrix we are applying the unary operator
- *
- * This class represents an expression of a generic unary operator of a matrix or a vector.
- * It is the return type of the unary operator-, of a matrix or a vector, and most
- * of the time this is the only way it is used.
- *
- * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
- */
-template<typename UnaryOp, typename MatrixType>
-struct ei_traits<CwiseUnaryOp<UnaryOp, MatrixType> >
- : ei_traits<MatrixType>
-{
- typedef typename ei_result_of<
- UnaryOp(typename MatrixType::Scalar)
- >::type Scalar;
- typedef typename MatrixType::Nested MatrixTypeNested;
- typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
- enum {
- Flags = (_MatrixTypeNested::Flags & (
- HereditaryBits | LinearAccessBit | AlignedBit
- | (ei_functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0))),
- CoeffReadCost = _MatrixTypeNested::CoeffReadCost + ei_functor_traits<UnaryOp>::Cost
- };
-};
-
-template<typename UnaryOp, typename MatrixType>
-class CwiseUnaryOp : ei_no_assignment_operator,
- public MatrixBase<CwiseUnaryOp<UnaryOp, MatrixType> >
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)
-
- inline CwiseUnaryOp(const MatrixType& mat, const UnaryOp& func = UnaryOp())
- : m_matrix(mat), m_functor(func) {}
-
- EIGEN_STRONG_INLINE int rows() const { return m_matrix.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_matrix.cols(); }
-
- EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const
- {
- return m_functor(m_matrix.coeff(row, col));
- }
-
- template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const
- {
- return m_functor.packetOp(m_matrix.template packet<LoadMode>(row, col));
- }
-
- EIGEN_STRONG_INLINE const Scalar coeff(int index) const
- {
- return m_functor(m_matrix.coeff(index));
- }
-
- template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int index) const
- {
- return m_functor.packetOp(m_matrix.template packet<LoadMode>(index));
- }
-
- protected:
- const typename MatrixType::Nested m_matrix;
- const UnaryOp m_functor;
-};
-
-/** \returns an expression of a custom coefficient-wise unary operator \a func of *this
- *
- * The template parameter \a CustomUnaryOp is the type of the functor
- * of the custom unary operator.
- *
- * \addexample CustomCwiseUnaryFunctors \label How to use custom coeff wise unary functors
- *
- * Example:
- * \include class_CwiseUnaryOp.cpp
- * Output: \verbinclude class_CwiseUnaryOp.out
- *
- * \sa class CwiseUnaryOp, class CwiseBinarOp, MatrixBase::operator-, Cwise::abs
- */
-template<typename Derived>
-template<typename CustomUnaryOp>
-EIGEN_STRONG_INLINE const CwiseUnaryOp<CustomUnaryOp, Derived>
-MatrixBase<Derived>::unaryExpr(const CustomUnaryOp& func) const
-{
- return CwiseUnaryOp<CustomUnaryOp, Derived>(derived(), func);
-}
-
-/** \returns an expression of the opposite of \c *this
- */
-template<typename Derived>
-EIGEN_STRONG_INLINE const CwiseUnaryOp<ei_scalar_opposite_op<typename ei_traits<Derived>::Scalar>,Derived>
-MatrixBase<Derived>::operator-() const
-{
- return derived();
-}
-
-/** \returns an expression of the coefficient-wise absolute value of \c *this
- *
- * Example: \include Cwise_abs.cpp
- * Output: \verbinclude Cwise_abs.out
- *
- * \sa abs2()
- */
-template<typename ExpressionType>
-EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op)
-Cwise<ExpressionType>::abs() const
-{
- return _expression();
-}
-
-/** \returns an expression of the coefficient-wise squared absolute value of \c *this
- *
- * Example: \include Cwise_abs2.cpp
- * Output: \verbinclude Cwise_abs2.out
- *
- * \sa abs(), square()
- */
-template<typename ExpressionType>
-EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op)
-Cwise<ExpressionType>::abs2() const
-{
- return _expression();
-}
-
-/** \returns an expression of the complex conjugate of \c *this.
- *
- * \sa adjoint() */
-template<typename Derived>
-EIGEN_STRONG_INLINE typename MatrixBase<Derived>::ConjugateReturnType
-MatrixBase<Derived>::conjugate() const
-{
- return ConjugateReturnType(derived());
-}
-
-/** \returns an expression of the real part of \c *this.
- *
- * \sa imag() */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::RealReturnType
-MatrixBase<Derived>::real() const { return derived(); }
-
-/** \returns an expression of the imaginary part of \c *this.
- *
- * \sa real() */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ImagReturnType
-MatrixBase<Derived>::imag() const { return derived(); }
-
-/** \returns an expression of *this with the \a Scalar type casted to
- * \a NewScalar.
- *
- * The template parameter \a NewScalar is the type we are casting the scalars to.
- *
- * \sa class CwiseUnaryOp
- */
-template<typename Derived>
-template<typename NewType>
-EIGEN_STRONG_INLINE const CwiseUnaryOp<ei_scalar_cast_op<typename ei_traits<Derived>::Scalar, NewType>, Derived>
-MatrixBase<Derived>::cast() const
-{
- return derived();
-}
-
-/** \relates MatrixBase */
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ScalarMultipleReturnType
-MatrixBase<Derived>::operator*(const Scalar& scalar) const
-{
- return CwiseUnaryOp<ei_scalar_multiple_op<Scalar>, Derived>
- (derived(), ei_scalar_multiple_op<Scalar>(scalar));
-}
-
-/** \relates MatrixBase */
-template<typename Derived>
-EIGEN_STRONG_INLINE const CwiseUnaryOp<ei_scalar_quotient1_op<typename ei_traits<Derived>::Scalar>, Derived>
-MatrixBase<Derived>::operator/(const Scalar& scalar) const
-{
- return CwiseUnaryOp<ei_scalar_quotient1_op<Scalar>, Derived>
- (derived(), ei_scalar_quotient1_op<Scalar>(scalar));
-}
-
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-MatrixBase<Derived>::operator*=(const Scalar& other)
-{
- return *this = *this * other;
-}
-
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-MatrixBase<Derived>::operator/=(const Scalar& other)
-{
- return *this = *this / other;
-}
-
-#endif // EIGEN_CWISE_UNARY_OP_H
diff --git a/extern/Eigen2/Eigen/src/Core/DiagonalCoeffs.h b/extern/Eigen2/Eigen/src/Core/DiagonalCoeffs.h
deleted file mode 100644
index 767fe5fb7c0..00000000000
--- a/extern/Eigen2/Eigen/src/Core/DiagonalCoeffs.h
+++ /dev/null
@@ -1,124 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_DIAGONALCOEFFS_H
-#define EIGEN_DIAGONALCOEFFS_H
-
-/** \class DiagonalCoeffs
- *
- * \brief Expression of the main diagonal of a matrix
- *
- * \param MatrixType the type of the object in which we are taking the main diagonal
- *
- * The matrix is not required to be square.
- *
- * This class represents an expression of the main diagonal of a square matrix.
- * It is the return type of MatrixBase::diagonal() and most of the time this is
- * the only way it is used.
- *
- * \sa MatrixBase::diagonal()
- */
-template<typename MatrixType>
-struct ei_traits<DiagonalCoeffs<MatrixType> >
-{
- typedef typename MatrixType::Scalar Scalar;
- typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
- typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
- enum {
- RowsAtCompileTime = int(MatrixType::SizeAtCompileTime) == Dynamic ? Dynamic
- : EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime,
- MatrixType::ColsAtCompileTime),
- ColsAtCompileTime = 1,
- MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
- : EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime,
- MatrixType::MaxColsAtCompileTime),
- MaxColsAtCompileTime = 1,
- Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit),
- CoeffReadCost = _MatrixTypeNested::CoeffReadCost
- };
-};
-
-template<typename MatrixType> class DiagonalCoeffs
- : public MatrixBase<DiagonalCoeffs<MatrixType> >
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(DiagonalCoeffs)
-
- inline DiagonalCoeffs(const MatrixType& matrix) : m_matrix(matrix) {}
-
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(DiagonalCoeffs)
-
- inline int rows() const { return std::min(m_matrix.rows(), m_matrix.cols()); }
- inline int cols() const { return 1; }
-
- inline Scalar& coeffRef(int row, int)
- {
- return m_matrix.const_cast_derived().coeffRef(row, row);
- }
-
- inline const Scalar coeff(int row, int) const
- {
- return m_matrix.coeff(row, row);
- }
-
- inline Scalar& coeffRef(int index)
- {
- return m_matrix.const_cast_derived().coeffRef(index, index);
- }
-
- inline const Scalar coeff(int index) const
- {
- return m_matrix.coeff(index, index);
- }
-
- protected:
-
- const typename MatrixType::Nested m_matrix;
-};
-
-/** \returns an expression of the main diagonal of the matrix \c *this
- *
- * \c *this is not required to be square.
- *
- * Example: \include MatrixBase_diagonal.cpp
- * Output: \verbinclude MatrixBase_diagonal.out
- *
- * \sa class DiagonalCoeffs */
-template<typename Derived>
-inline DiagonalCoeffs<Derived>
-MatrixBase<Derived>::diagonal()
-{
- return DiagonalCoeffs<Derived>(derived());
-}
-
-/** This is the const version of diagonal(). */
-template<typename Derived>
-inline const DiagonalCoeffs<Derived>
-MatrixBase<Derived>::diagonal() const
-{
- return DiagonalCoeffs<Derived>(derived());
-}
-
-#endif // EIGEN_DIAGONALCOEFFS_H
diff --git a/extern/Eigen2/Eigen/src/Core/DiagonalMatrix.h b/extern/Eigen2/Eigen/src/Core/DiagonalMatrix.h
deleted file mode 100644
index 01f01fdf259..00000000000
--- a/extern/Eigen2/Eigen/src/Core/DiagonalMatrix.h
+++ /dev/null
@@ -1,144 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_DIAGONALMATRIX_H
-#define EIGEN_DIAGONALMATRIX_H
-
-/** \class DiagonalMatrix
- * \nonstableyet
- *
- * \brief Expression of a diagonal matrix
- *
- * \param CoeffsVectorType the type of the vector of diagonal coefficients
- *
- * This class is an expression of a diagonal matrix with given vector of diagonal
- * coefficients. It is the return
- * type of MatrixBase::diagonal(const OtherDerived&) and most of the time this is
- * the only way it is used.
- *
- * \sa MatrixBase::diagonal(const OtherDerived&)
- */
-template<typename CoeffsVectorType>
-struct ei_traits<DiagonalMatrix<CoeffsVectorType> >
-{
- typedef typename CoeffsVectorType::Scalar Scalar;
- typedef typename ei_nested<CoeffsVectorType>::type CoeffsVectorTypeNested;
- typedef typename ei_unref<CoeffsVectorTypeNested>::type _CoeffsVectorTypeNested;
- enum {
- RowsAtCompileTime = CoeffsVectorType::SizeAtCompileTime,
- ColsAtCompileTime = CoeffsVectorType::SizeAtCompileTime,
- MaxRowsAtCompileTime = CoeffsVectorType::MaxSizeAtCompileTime,
- MaxColsAtCompileTime = CoeffsVectorType::MaxSizeAtCompileTime,
- Flags = (_CoeffsVectorTypeNested::Flags & HereditaryBits) | Diagonal,
- CoeffReadCost = _CoeffsVectorTypeNested::CoeffReadCost
- };
-};
-
-template<typename CoeffsVectorType>
-class DiagonalMatrix : ei_no_assignment_operator,
- public MatrixBase<DiagonalMatrix<CoeffsVectorType> >
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(DiagonalMatrix)
- typedef CoeffsVectorType _CoeffsVectorType;
-
- // needed to evaluate a DiagonalMatrix<Xpr> to a DiagonalMatrix<NestByValue<Vector> >
- template<typename OtherCoeffsVectorType>
- inline DiagonalMatrix(const DiagonalMatrix<OtherCoeffsVectorType>& other) : m_coeffs(other.diagonal())
- {
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(CoeffsVectorType);
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherCoeffsVectorType);
- ei_assert(m_coeffs.size() > 0);
- }
-
- inline DiagonalMatrix(const CoeffsVectorType& coeffs) : m_coeffs(coeffs)
- {
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(CoeffsVectorType);
- ei_assert(coeffs.size() > 0);
- }
-
- inline int rows() const { return m_coeffs.size(); }
- inline int cols() const { return m_coeffs.size(); }
-
- inline const Scalar coeff(int row, int col) const
- {
- return row == col ? m_coeffs.coeff(row) : static_cast<Scalar>(0);
- }
-
- inline const CoeffsVectorType& diagonal() const { return m_coeffs; }
-
- protected:
- const typename CoeffsVectorType::Nested m_coeffs;
-};
-
-/** \nonstableyet
- * \returns an expression of a diagonal matrix with *this as vector of diagonal coefficients
- *
- * \only_for_vectors
- *
- * \addexample AsDiagonalExample \label How to build a diagonal matrix from a vector
- *
- * Example: \include MatrixBase_asDiagonal.cpp
- * Output: \verbinclude MatrixBase_asDiagonal.out
- *
- * \sa class DiagonalMatrix, isDiagonal()
- **/
-template<typename Derived>
-inline const DiagonalMatrix<Derived>
-MatrixBase<Derived>::asDiagonal() const
-{
- return derived();
-}
-
-/** \nonstableyet
- * \returns true if *this is approximately equal to a diagonal matrix,
- * within the precision given by \a prec.
- *
- * Example: \include MatrixBase_isDiagonal.cpp
- * Output: \verbinclude MatrixBase_isDiagonal.out
- *
- * \sa asDiagonal()
- */
-template<typename Derived>
-bool MatrixBase<Derived>::isDiagonal
-(RealScalar prec) const
-{
- if(cols() != rows()) return false;
- RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
- for(int j = 0; j < cols(); ++j)
- {
- RealScalar absOnDiagonal = ei_abs(coeff(j,j));
- if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
- }
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i < j; ++i)
- {
- if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;
- if(!ei_isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;
- }
- return true;
-}
-
-#endif // EIGEN_DIAGONALMATRIX_H
diff --git a/extern/Eigen2/Eigen/src/Core/DiagonalProduct.h b/extern/Eigen2/Eigen/src/Core/DiagonalProduct.h
deleted file mode 100644
index f33a26f98b0..00000000000
--- a/extern/Eigen2/Eigen/src/Core/DiagonalProduct.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_DIAGONALPRODUCT_H
-#define EIGEN_DIAGONALPRODUCT_H
-
-/** \internal Specialization of ei_nested for DiagonalMatrix.
- * Unlike ei_nested, if the argument is a DiagonalMatrix and if it must be evaluated,
- * then it evaluated to a DiagonalMatrix having its own argument evaluated.
- */
-template<typename T, int N> struct ei_nested_diagonal : ei_nested<T,N> {};
-template<typename T, int N> struct ei_nested_diagonal<DiagonalMatrix<T>,N >
- : ei_nested<DiagonalMatrix<T>, N, DiagonalMatrix<NestByValue<typename ei_plain_matrix_type<T>::type> > >
-{};
-
-// specialization of ProductReturnType
-template<typename Lhs, typename Rhs>
-struct ProductReturnType<Lhs,Rhs,DiagonalProduct>
-{
- typedef typename ei_nested_diagonal<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
- typedef typename ei_nested_diagonal<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
-
- typedef Product<LhsNested, RhsNested, DiagonalProduct> Type;
-};
-
-template<typename LhsNested, typename RhsNested>
-struct ei_traits<Product<LhsNested, RhsNested, DiagonalProduct> >
-{
- // clean the nested types:
- typedef typename ei_cleantype<LhsNested>::type _LhsNested;
- typedef typename ei_cleantype<RhsNested>::type _RhsNested;
- typedef typename _LhsNested::Scalar Scalar;
-
- enum {
- LhsFlags = _LhsNested::Flags,
- RhsFlags = _RhsNested::Flags,
- RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
- ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
- MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
-
- LhsIsDiagonal = (_LhsNested::Flags&Diagonal)==Diagonal,
- RhsIsDiagonal = (_RhsNested::Flags&Diagonal)==Diagonal,
-
- CanVectorizeRhs = (!RhsIsDiagonal) && (RhsFlags & RowMajorBit) && (RhsFlags & PacketAccessBit)
- && (ColsAtCompileTime % ei_packet_traits<Scalar>::size == 0),
-
- CanVectorizeLhs = (!LhsIsDiagonal) && (!(LhsFlags & RowMajorBit)) && (LhsFlags & PacketAccessBit)
- && (RowsAtCompileTime % ei_packet_traits<Scalar>::size == 0),
-
- RemovedBits = ~((RhsFlags & RowMajorBit) && (!CanVectorizeLhs) ? 0 : RowMajorBit),
-
- Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
- | (((CanVectorizeLhs&&RhsIsDiagonal) || (CanVectorizeRhs&&LhsIsDiagonal)) ? PacketAccessBit : 0),
-
- CoeffReadCost = NumTraits<Scalar>::MulCost + _LhsNested::CoeffReadCost + _RhsNested::CoeffReadCost
- };
-};
-
-template<typename LhsNested, typename RhsNested> class Product<LhsNested, RhsNested, DiagonalProduct> : ei_no_assignment_operator,
- public MatrixBase<Product<LhsNested, RhsNested, DiagonalProduct> >
-{
- typedef typename ei_traits<Product>::_LhsNested _LhsNested;
- typedef typename ei_traits<Product>::_RhsNested _RhsNested;
-
- enum {
- RhsIsDiagonal = (_RhsNested::Flags&Diagonal)==Diagonal
- };
-
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
-
- template<typename Lhs, typename Rhs>
- inline Product(const Lhs& lhs, const Rhs& rhs)
- : m_lhs(lhs), m_rhs(rhs)
- {
- ei_assert(lhs.cols() == rhs.rows());
- }
-
- inline int rows() const { return m_lhs.rows(); }
- inline int cols() const { return m_rhs.cols(); }
-
- const Scalar coeff(int row, int col) const
- {
- const int unique = RhsIsDiagonal ? col : row;
- return m_lhs.coeff(row, unique) * m_rhs.coeff(unique, col);
- }
-
- template<int LoadMode>
- const PacketScalar packet(int row, int col) const
- {
- if (RhsIsDiagonal)
- {
- return ei_pmul(m_lhs.template packet<LoadMode>(row, col), ei_pset1(m_rhs.coeff(col, col)));
- }
- else
- {
- return ei_pmul(ei_pset1(m_lhs.coeff(row, row)), m_rhs.template packet<LoadMode>(row, col));
- }
- }
-
- protected:
- const LhsNested m_lhs;
- const RhsNested m_rhs;
-};
-
-#endif // EIGEN_DIAGONALPRODUCT_H
diff --git a/extern/Eigen2/Eigen/src/Core/Dot.h b/extern/Eigen2/Eigen/src/Core/Dot.h
deleted file mode 100644
index 5838af70d4a..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Dot.h
+++ /dev/null
@@ -1,361 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_DOT_H
-#define EIGEN_DOT_H
-
-/***************************************************************************
-* Part 1 : the logic deciding a strategy for vectorization and unrolling
-***************************************************************************/
-
-template<typename Derived1, typename Derived2>
-struct ei_dot_traits
-{
-public:
- enum {
- Vectorization = (int(Derived1::Flags)&int(Derived2::Flags)&ActualPacketAccessBit)
- && (int(Derived1::Flags)&int(Derived2::Flags)&LinearAccessBit)
- ? LinearVectorization
- : NoVectorization
- };
-
-private:
- typedef typename Derived1::Scalar Scalar;
- enum {
- PacketSize = ei_packet_traits<Scalar>::size,
- Cost = Derived1::SizeAtCompileTime * (Derived1::CoeffReadCost + Derived2::CoeffReadCost + NumTraits<Scalar>::MulCost)
- + (Derived1::SizeAtCompileTime-1) * NumTraits<Scalar>::AddCost,
- UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Vectorization) == int(NoVectorization) ? 1 : int(PacketSize))
- };
-
-public:
- enum {
- Unrolling = Cost <= UnrollingLimit
- ? CompleteUnrolling
- : NoUnrolling
- };
-};
-
-/***************************************************************************
-* Part 2 : unrollers
-***************************************************************************/
-
-/*** no vectorization ***/
-
-template<typename Derived1, typename Derived2, int Start, int Length>
-struct ei_dot_novec_unroller
-{
- enum {
- HalfLength = Length/2
- };
-
- typedef typename Derived1::Scalar Scalar;
-
- inline static Scalar run(const Derived1& v1, const Derived2& v2)
- {
- return ei_dot_novec_unroller<Derived1, Derived2, Start, HalfLength>::run(v1, v2)
- + ei_dot_novec_unroller<Derived1, Derived2, Start+HalfLength, Length-HalfLength>::run(v1, v2);
- }
-};
-
-template<typename Derived1, typename Derived2, int Start>
-struct ei_dot_novec_unroller<Derived1, Derived2, Start, 1>
-{
- typedef typename Derived1::Scalar Scalar;
-
- inline static Scalar run(const Derived1& v1, const Derived2& v2)
- {
- return v1.coeff(Start) * ei_conj(v2.coeff(Start));
- }
-};
-
-/*** vectorization ***/
-
-template<typename Derived1, typename Derived2, int Index, int Stop,
- bool LastPacket = (Stop-Index == ei_packet_traits<typename Derived1::Scalar>::size)>
-struct ei_dot_vec_unroller
-{
- typedef typename Derived1::Scalar Scalar;
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
-
- enum {
- row1 = Derived1::RowsAtCompileTime == 1 ? 0 : Index,
- col1 = Derived1::RowsAtCompileTime == 1 ? Index : 0,
- row2 = Derived2::RowsAtCompileTime == 1 ? 0 : Index,
- col2 = Derived2::RowsAtCompileTime == 1 ? Index : 0
- };
-
- inline static PacketScalar run(const Derived1& v1, const Derived2& v2)
- {
- return ei_pmadd(
- v1.template packet<Aligned>(row1, col1),
- v2.template packet<Aligned>(row2, col2),
- ei_dot_vec_unroller<Derived1, Derived2, Index+ei_packet_traits<Scalar>::size, Stop>::run(v1, v2)
- );
- }
-};
-
-template<typename Derived1, typename Derived2, int Index, int Stop>
-struct ei_dot_vec_unroller<Derived1, Derived2, Index, Stop, true>
-{
- enum {
- row1 = Derived1::RowsAtCompileTime == 1 ? 0 : Index,
- col1 = Derived1::RowsAtCompileTime == 1 ? Index : 0,
- row2 = Derived2::RowsAtCompileTime == 1 ? 0 : Index,
- col2 = Derived2::RowsAtCompileTime == 1 ? Index : 0,
- alignment1 = (Derived1::Flags & AlignedBit) ? Aligned : Unaligned,
- alignment2 = (Derived2::Flags & AlignedBit) ? Aligned : Unaligned
- };
-
- typedef typename Derived1::Scalar Scalar;
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
-
- inline static PacketScalar run(const Derived1& v1, const Derived2& v2)
- {
- return ei_pmul(v1.template packet<alignment1>(row1, col1), v2.template packet<alignment2>(row2, col2));
- }
-};
-
-/***************************************************************************
-* Part 3 : implementation of all cases
-***************************************************************************/
-
-template<typename Derived1, typename Derived2,
- int Vectorization = ei_dot_traits<Derived1, Derived2>::Vectorization,
- int Unrolling = ei_dot_traits<Derived1, Derived2>::Unrolling
->
-struct ei_dot_impl;
-
-template<typename Derived1, typename Derived2>
-struct ei_dot_impl<Derived1, Derived2, NoVectorization, NoUnrolling>
-{
- typedef typename Derived1::Scalar Scalar;
- static Scalar run(const Derived1& v1, const Derived2& v2)
- {
- ei_assert(v1.size()>0 && "you are using a non initialized vector");
- Scalar res;
- res = v1.coeff(0) * ei_conj(v2.coeff(0));
- for(int i = 1; i < v1.size(); ++i)
- res += v1.coeff(i) * ei_conj(v2.coeff(i));
- return res;
- }
-};
-
-template<typename Derived1, typename Derived2>
-struct ei_dot_impl<Derived1, Derived2, NoVectorization, CompleteUnrolling>
- : public ei_dot_novec_unroller<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
-{};
-
-template<typename Derived1, typename Derived2>
-struct ei_dot_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
-{
- typedef typename Derived1::Scalar Scalar;
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
-
- static Scalar run(const Derived1& v1, const Derived2& v2)
- {
- const int size = v1.size();
- const int packetSize = ei_packet_traits<Scalar>::size;
- const int alignedSize = (size/packetSize)*packetSize;
- enum {
- alignment1 = (Derived1::Flags & AlignedBit) ? Aligned : Unaligned,
- alignment2 = (Derived2::Flags & AlignedBit) ? Aligned : Unaligned
- };
- Scalar res;
-
- // do the vectorizable part of the sum
- if(size >= packetSize)
- {
- PacketScalar packet_res = ei_pmul(
- v1.template packet<alignment1>(0),
- v2.template packet<alignment2>(0)
- );
- for(int index = packetSize; index<alignedSize; index += packetSize)
- {
- packet_res = ei_pmadd(
- v1.template packet<alignment1>(index),
- v2.template packet<alignment2>(index),
- packet_res
- );
- }
- res = ei_predux(packet_res);
-
- // now we must do the rest without vectorization.
- if(alignedSize == size) return res;
- }
- else // too small to vectorize anything.
- // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
- {
- res = Scalar(0);
- }
-
- // do the remainder of the vector
- for(int index = alignedSize; index < size; ++index)
- {
- res += v1.coeff(index) * v2.coeff(index);
- }
-
- return res;
- }
-};
-
-template<typename Derived1, typename Derived2>
-struct ei_dot_impl<Derived1, Derived2, LinearVectorization, CompleteUnrolling>
-{
- typedef typename Derived1::Scalar Scalar;
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
- enum {
- PacketSize = ei_packet_traits<Scalar>::size,
- Size = Derived1::SizeAtCompileTime,
- VectorizationSize = (Size / PacketSize) * PacketSize
- };
- static Scalar run(const Derived1& v1, const Derived2& v2)
- {
- Scalar res = ei_predux(ei_dot_vec_unroller<Derived1, Derived2, 0, VectorizationSize>::run(v1, v2));
- if (VectorizationSize != Size)
- res += ei_dot_novec_unroller<Derived1, Derived2, VectorizationSize, Size-VectorizationSize>::run(v1, v2);
- return res;
- }
-};
-
-/***************************************************************************
-* Part 4 : implementation of MatrixBase methods
-***************************************************************************/
-
-/** \returns the dot product of *this with other.
- *
- * \only_for_vectors
- *
- * \note If the scalar type is complex numbers, then this function returns the hermitian
- * (sesquilinear) dot product, linear in the first variable and conjugate-linear in the
- * second variable.
- *
- * \sa squaredNorm(), norm()
- */
-template<typename Derived>
-template<typename OtherDerived>
-typename ei_traits<Derived>::Scalar
-MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
- EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
- EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename OtherDerived::Scalar>::ret),
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-
- ei_assert(size() == other.size());
-
- return ei_dot_impl<Derived, OtherDerived>::run(derived(), other.derived());
-}
-
-/** \returns the squared \em l2 norm of *this, i.e., for vectors, the dot product of *this with itself.
- *
- * \sa dot(), norm()
- */
-template<typename Derived>
-inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
-{
- return ei_real((*this).cwise().abs2().sum());
-}
-
-/** \returns the \em l2 norm of *this, i.e., for vectors, the square root of the dot product of *this with itself.
- *
- * \sa dot(), squaredNorm()
- */
-template<typename Derived>
-inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
-{
- return ei_sqrt(squaredNorm());
-}
-
-/** \returns an expression of the quotient of *this by its own norm.
- *
- * \only_for_vectors
- *
- * \sa norm(), normalize()
- */
-template<typename Derived>
-inline const typename MatrixBase<Derived>::PlainMatrixType
-MatrixBase<Derived>::normalized() const
-{
- typedef typename ei_nested<Derived>::type Nested;
- typedef typename ei_unref<Nested>::type _Nested;
- _Nested n(derived());
- return n / n.norm();
-}
-
-/** Normalizes the vector, i.e. divides it by its own norm.
- *
- * \only_for_vectors
- *
- * \sa norm(), normalized()
- */
-template<typename Derived>
-inline void MatrixBase<Derived>::normalize()
-{
- *this /= norm();
-}
-
-/** \returns true if *this is approximately orthogonal to \a other,
- * within the precision given by \a prec.
- *
- * Example: \include MatrixBase_isOrthogonal.cpp
- * Output: \verbinclude MatrixBase_isOrthogonal.out
- */
-template<typename Derived>
-template<typename OtherDerived>
-bool MatrixBase<Derived>::isOrthogonal
-(const MatrixBase<OtherDerived>& other, RealScalar prec) const
-{
- typename ei_nested<Derived,2>::type nested(derived());
- typename ei_nested<OtherDerived,2>::type otherNested(other.derived());
- return ei_abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
-}
-
-/** \returns true if *this is approximately an unitary matrix,
- * within the precision given by \a prec. In the case where the \a Scalar
- * type is real numbers, a unitary matrix is an orthogonal matrix, whence the name.
- *
- * \note This can be used to check whether a family of vectors forms an orthonormal basis.
- * Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an
- * orthonormal basis.
- *
- * Example: \include MatrixBase_isUnitary.cpp
- * Output: \verbinclude MatrixBase_isUnitary.out
- */
-template<typename Derived>
-bool MatrixBase<Derived>::isUnitary(RealScalar prec) const
-{
- typename Derived::Nested nested(derived());
- for(int i = 0; i < cols(); ++i)
- {
- if(!ei_isApprox(nested.col(i).squaredNorm(), static_cast<Scalar>(1), prec))
- return false;
- for(int j = 0; j < i; ++j)
- if(!ei_isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast<Scalar>(1), prec))
- return false;
- }
- return true;
-}
-#endif // EIGEN_DOT_H
diff --git a/extern/Eigen2/Eigen/src/Core/Functors.h b/extern/Eigen2/Eigen/src/Core/Functors.h
deleted file mode 100644
index 969cad78d8f..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Functors.h
+++ /dev/null
@@ -1,378 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_FUNCTORS_H
-#define EIGEN_FUNCTORS_H
-
-// associative functors:
-
-/** \internal
- * \brief Template functor to compute the sum of two scalars
- *
- * \sa class CwiseBinaryOp, MatrixBase::operator+, class PartialRedux, MatrixBase::sum()
- */
-template<typename Scalar> struct ei_scalar_sum_op EIGEN_EMPTY_STRUCT {
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; }
- template<typename PacketScalar>
- EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
- { return ei_padd(a,b); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_sum_op<Scalar> > {
- enum {
- Cost = NumTraits<Scalar>::AddCost,
- PacketAccess = ei_packet_traits<Scalar>::size>1
- };
-};
-
-/** \internal
- * \brief Template functor to compute the product of two scalars
- *
- * \sa class CwiseBinaryOp, Cwise::operator*(), class PartialRedux, MatrixBase::redux()
- */
-template<typename Scalar> struct ei_scalar_product_op EIGEN_EMPTY_STRUCT {
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a * b; }
- template<typename PacketScalar>
- EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
- { return ei_pmul(a,b); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_product_op<Scalar> > {
- enum {
- Cost = NumTraits<Scalar>::MulCost,
- PacketAccess = ei_packet_traits<Scalar>::size>1
- };
-};
-
-/** \internal
- * \brief Template functor to compute the min of two scalars
- *
- * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class PartialRedux, MatrixBase::minCoeff()
- */
-template<typename Scalar> struct ei_scalar_min_op EIGEN_EMPTY_STRUCT {
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return std::min(a, b); }
- template<typename PacketScalar>
- EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
- { return ei_pmin(a,b); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_min_op<Scalar> > {
- enum {
- Cost = NumTraits<Scalar>::AddCost,
- PacketAccess = ei_packet_traits<Scalar>::size>1
- };
-};
-
-/** \internal
- * \brief Template functor to compute the max of two scalars
- *
- * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class PartialRedux, MatrixBase::maxCoeff()
- */
-template<typename Scalar> struct ei_scalar_max_op EIGEN_EMPTY_STRUCT {
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return std::max(a, b); }
- template<typename PacketScalar>
- EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
- { return ei_pmax(a,b); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_max_op<Scalar> > {
- enum {
- Cost = NumTraits<Scalar>::AddCost,
- PacketAccess = ei_packet_traits<Scalar>::size>1
- };
-};
-
-
-// other binary functors:
-
-/** \internal
- * \brief Template functor to compute the difference of two scalars
- *
- * \sa class CwiseBinaryOp, MatrixBase::operator-
- */
-template<typename Scalar> struct ei_scalar_difference_op EIGEN_EMPTY_STRUCT {
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; }
- template<typename PacketScalar>
- EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
- { return ei_psub(a,b); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_difference_op<Scalar> > {
- enum {
- Cost = NumTraits<Scalar>::AddCost,
- PacketAccess = ei_packet_traits<Scalar>::size>1
- };
-};
-
-/** \internal
- * \brief Template functor to compute the quotient of two scalars
- *
- * \sa class CwiseBinaryOp, Cwise::operator/()
- */
-template<typename Scalar> struct ei_scalar_quotient_op EIGEN_EMPTY_STRUCT {
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a / b; }
- template<typename PacketScalar>
- EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
- { return ei_pdiv(a,b); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_quotient_op<Scalar> > {
- enum {
- Cost = 2 * NumTraits<Scalar>::MulCost,
- PacketAccess = ei_packet_traits<Scalar>::size>1
- #if (defined EIGEN_VECTORIZE_SSE)
- && NumTraits<Scalar>::HasFloatingPoint
- #endif
- };
-};
-
-// unary functors:
-
-/** \internal
- * \brief Template functor to compute the opposite of a scalar
- *
- * \sa class CwiseUnaryOp, MatrixBase::operator-
- */
-template<typename Scalar> struct ei_scalar_opposite_op EIGEN_EMPTY_STRUCT {
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_opposite_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false }; };
-
-/** \internal
- * \brief Template functor to compute the absolute value of a scalar
- *
- * \sa class CwiseUnaryOp, Cwise::abs
- */
-template<typename Scalar> struct ei_scalar_abs_op EIGEN_EMPTY_STRUCT {
- typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return ei_abs(a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_abs_op<Scalar> >
-{
- enum {
- Cost = NumTraits<Scalar>::AddCost,
- PacketAccess = false // this could actually be vectorized with SSSE3.
- };
-};
-
-/** \internal
- * \brief Template functor to compute the squared absolute value of a scalar
- *
- * \sa class CwiseUnaryOp, Cwise::abs2
- */
-template<typename Scalar> struct ei_scalar_abs2_op EIGEN_EMPTY_STRUCT {
- typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return ei_abs2(a); }
- template<typename PacketScalar>
- EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a) const
- { return ei_pmul(a,a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_abs2_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = int(ei_packet_traits<Scalar>::size)>1 }; };
-
-/** \internal
- * \brief Template functor to compute the conjugate of a complex value
- *
- * \sa class CwiseUnaryOp, MatrixBase::conjugate()
- */
-template<typename Scalar> struct ei_scalar_conjugate_op EIGEN_EMPTY_STRUCT {
- EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return ei_conj(a); }
- template<typename PacketScalar>
- EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a) const { return a; }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_conjugate_op<Scalar> >
-{
- enum {
- Cost = NumTraits<Scalar>::IsComplex ? NumTraits<Scalar>::AddCost : 0,
- PacketAccess = int(ei_packet_traits<Scalar>::size)>1
- };
-};
-
-/** \internal
- * \brief Template functor to cast a scalar to another type
- *
- * \sa class CwiseUnaryOp, MatrixBase::cast()
- */
-template<typename Scalar, typename NewType>
-struct ei_scalar_cast_op EIGEN_EMPTY_STRUCT {
- typedef NewType result_type;
- EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return static_cast<NewType>(a); }
-};
-template<typename Scalar, typename NewType>
-struct ei_functor_traits<ei_scalar_cast_op<Scalar,NewType> >
-{ enum { Cost = ei_is_same_type<Scalar, NewType>::ret ? 0 : NumTraits<NewType>::AddCost, PacketAccess = false }; };
-
-/** \internal
- * \brief Template functor to extract the real part of a complex
- *
- * \sa class CwiseUnaryOp, MatrixBase::real()
- */
-template<typename Scalar>
-struct ei_scalar_real_op EIGEN_EMPTY_STRUCT {
- typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return ei_real(a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_real_op<Scalar> >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-/** \internal
- * \brief Template functor to extract the imaginary part of a complex
- *
- * \sa class CwiseUnaryOp, MatrixBase::imag()
- */
-template<typename Scalar>
-struct ei_scalar_imag_op EIGEN_EMPTY_STRUCT {
- typedef typename NumTraits<Scalar>::Real result_type;
- EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return ei_imag(a); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_imag_op<Scalar> >
-{ enum { Cost = 0, PacketAccess = false }; };
-
-/** \internal
- * \brief Template functor to multiply a scalar by a fixed other one
- *
- * \sa class CwiseUnaryOp, MatrixBase::operator*, MatrixBase::operator/
- */
-/* NOTE why doing the ei_pset1() in packetOp *is* an optimization ?
- * indeed it seems better to declare m_other as a PacketScalar and do the ei_pset1() once
- * in the constructor. However, in practice:
- * - GCC does not like m_other as a PacketScalar and generate a load every time it needs it
- * - one the other hand GCC is able to moves the ei_pset1() away the loop :)
- * - simpler code ;)
- * (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y)
- */
-template<typename Scalar>
-struct ei_scalar_multiple_op {
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
- // FIXME default copy constructors seems bugged with std::complex<>
- EIGEN_STRONG_INLINE ei_scalar_multiple_op(const ei_scalar_multiple_op& other) : m_other(other.m_other) { }
- EIGEN_STRONG_INLINE ei_scalar_multiple_op(const Scalar& other) : m_other(other) { }
- EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; }
- EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a) const
- { return ei_pmul(a, ei_pset1(m_other)); }
- const Scalar m_other;
-private:
- ei_scalar_multiple_op& operator=(const ei_scalar_multiple_op&);
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_multiple_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = ei_packet_traits<Scalar>::size>1 }; };
-
-template<typename Scalar, bool HasFloatingPoint>
-struct ei_scalar_quotient1_impl {
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
- // FIXME default copy constructors seems bugged with std::complex<>
- EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const ei_scalar_quotient1_impl& other) : m_other(other.m_other) { }
- EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const Scalar& other) : m_other(static_cast<Scalar>(1) / other) {}
- EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; }
- EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a) const
- { return ei_pmul(a, ei_pset1(m_other)); }
- const Scalar m_other;
-private:
- ei_scalar_quotient1_impl& operator=(const ei_scalar_quotient1_impl&);
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_quotient1_impl<Scalar,true> >
-{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = ei_packet_traits<Scalar>::size>1 }; };
-
-template<typename Scalar>
-struct ei_scalar_quotient1_impl<Scalar,false> {
- // FIXME default copy constructors seems bugged with std::complex<>
- EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const ei_scalar_quotient1_impl& other) : m_other(other.m_other) { }
- EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const Scalar& other) : m_other(other) {}
- EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; }
- const Scalar m_other;
-private:
- ei_scalar_quotient1_impl& operator=(const ei_scalar_quotient1_impl&);
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_quotient1_impl<Scalar,false> >
-{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
-
-/** \internal
- * \brief Template functor to divide a scalar by a fixed other one
- *
- * This functor is used to implement the quotient of a matrix by
- * a scalar where the scalar type is not necessarily a floating point type.
- *
- * \sa class CwiseUnaryOp, MatrixBase::operator/
- */
-template<typename Scalar>
-struct ei_scalar_quotient1_op : ei_scalar_quotient1_impl<Scalar, NumTraits<Scalar>::HasFloatingPoint > {
- EIGEN_STRONG_INLINE ei_scalar_quotient1_op(const Scalar& other)
- : ei_scalar_quotient1_impl<Scalar, NumTraits<Scalar>::HasFloatingPoint >(other) {}
-private:
- ei_scalar_quotient1_op& operator=(const ei_scalar_quotient1_op&);
-};
-
-// nullary functors
-
-template<typename Scalar>
-struct ei_scalar_constant_op {
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
- EIGEN_STRONG_INLINE ei_scalar_constant_op(const ei_scalar_constant_op& other) : m_other(other.m_other) { }
- EIGEN_STRONG_INLINE ei_scalar_constant_op(const Scalar& other) : m_other(other) { }
- EIGEN_STRONG_INLINE const Scalar operator() (int, int = 0) const { return m_other; }
- EIGEN_STRONG_INLINE const PacketScalar packetOp() const { return ei_pset1(m_other); }
- const Scalar m_other;
-private:
- ei_scalar_constant_op& operator=(const ei_scalar_constant_op&);
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_constant_op<Scalar> >
-{ enum { Cost = 1, PacketAccess = ei_packet_traits<Scalar>::size>1, IsRepeatable = true }; };
-
-template<typename Scalar> struct ei_scalar_identity_op EIGEN_EMPTY_STRUCT {
- EIGEN_STRONG_INLINE ei_scalar_identity_op(void) {}
- EIGEN_STRONG_INLINE const Scalar operator() (int row, int col) const { return row==col ? Scalar(1) : Scalar(0); }
-};
-template<typename Scalar>
-struct ei_functor_traits<ei_scalar_identity_op<Scalar> >
-{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };
-
-// allow to add new functors and specializations of ei_functor_traits from outside Eigen.
-// this macro is really needed because ei_functor_traits must be specialized after it is declared but before it is used...
-#ifdef EIGEN_FUNCTORS_PLUGIN
-#include EIGEN_FUNCTORS_PLUGIN
-#endif
-
-// all functors allow linear access, except ei_scalar_identity_op. So we fix here a quick meta
-// to indicate whether a functor allows linear access, just always answering 'yes' except for
-// ei_scalar_identity_op.
-template<typename Functor> struct ei_functor_has_linear_access { enum { ret = 1 }; };
-template<typename Scalar> struct ei_functor_has_linear_access<ei_scalar_identity_op<Scalar> > { enum { ret = 0 }; };
-
-// in CwiseBinaryOp, we require the Lhs and Rhs to have the same scalar type, except for multiplication
-// where we only require them to have the same _real_ scalar type so one may multiply, say, float by complex<float>.
-template<typename Functor> struct ei_functor_allows_mixing_real_and_complex { enum { ret = 0 }; };
-template<typename Scalar> struct ei_functor_allows_mixing_real_and_complex<ei_scalar_product_op<Scalar> > { enum { ret = 1 }; };
-
-#endif // EIGEN_FUNCTORS_H
diff --git a/extern/Eigen2/Eigen/src/Core/Fuzzy.h b/extern/Eigen2/Eigen/src/Core/Fuzzy.h
deleted file mode 100644
index 1285542966c..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Fuzzy.h
+++ /dev/null
@@ -1,234 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_FUZZY_H
-#define EIGEN_FUZZY_H
-
-#ifndef EIGEN_LEGACY_COMPARES
-
-/** \returns \c true if \c *this is approximately equal to \a other, within the precision
- * determined by \a prec.
- *
- * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
- * are considered to be approximately equal within precision \f$ p \f$ if
- * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
- * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
- * L2 norm).
- *
- * \note Because of the multiplicativeness of this comparison, one can't use this function
- * to check whether \c *this is approximately equal to the zero matrix or vector.
- * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix
- * or vector. If you want to test whether \c *this is zero, use ei_isMuchSmallerThan(const
- * RealScalar&, RealScalar) instead.
- *
- * \sa ei_isMuchSmallerThan(const RealScalar&, RealScalar) const
- */
-template<typename Derived>
-template<typename OtherDerived>
-bool MatrixBase<Derived>::isApprox(
- const MatrixBase<OtherDerived>& other,
- typename NumTraits<Scalar>::Real prec
-) const
-{
- const typename ei_nested<Derived,2>::type nested(derived());
- const typename ei_nested<OtherDerived,2>::type otherNested(other.derived());
- return (nested - otherNested).cwise().abs2().sum() <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
-}
-
-/** \returns \c true if the norm of \c *this is much smaller than \a other,
- * within the precision determined by \a prec.
- *
- * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
- * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if
- * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f]
- *
- * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason,
- * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm
- * of a reference matrix of same dimensions.
- *
- * \sa isApprox(), isMuchSmallerThan(const MatrixBase<OtherDerived>&, RealScalar) const
- */
-template<typename Derived>
-bool MatrixBase<Derived>::isMuchSmallerThan(
- const typename NumTraits<Scalar>::Real& other,
- typename NumTraits<Scalar>::Real prec
-) const
-{
- return cwise().abs2().sum() <= prec * prec * other * other;
-}
-
-/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other,
- * within the precision determined by \a prec.
- *
- * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
- * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if
- * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f]
- * For matrices, the comparison is done using the Hilbert-Schmidt norm.
- *
- * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const
- */
-template<typename Derived>
-template<typename OtherDerived>
-bool MatrixBase<Derived>::isMuchSmallerThan(
- const MatrixBase<OtherDerived>& other,
- typename NumTraits<Scalar>::Real prec
-) const
-{
- return this->cwise().abs2().sum() <= prec * prec * other.cwise().abs2().sum();
-}
-
-#else
-
-template<typename Derived, typename OtherDerived=Derived, bool IsVector=Derived::IsVectorAtCompileTime>
-struct ei_fuzzy_selector;
-
-/** \returns \c true if \c *this is approximately equal to \a other, within the precision
- * determined by \a prec.
- *
- * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
- * are considered to be approximately equal within precision \f$ p \f$ if
- * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
- * For matrices, the comparison is done on all columns.
- *
- * \note Because of the multiplicativeness of this comparison, one can't use this function
- * to check whether \c *this is approximately equal to the zero matrix or vector.
- * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix
- * or vector. If you want to test whether \c *this is zero, use ei_isMuchSmallerThan(const
- * RealScalar&, RealScalar) instead.
- *
- * \sa ei_isMuchSmallerThan(const RealScalar&, RealScalar) const
- */
-template<typename Derived>
-template<typename OtherDerived>
-bool MatrixBase<Derived>::isApprox(
- const MatrixBase<OtherDerived>& other,
- typename NumTraits<Scalar>::Real prec
-) const
-{
- return ei_fuzzy_selector<Derived,OtherDerived>::isApprox(derived(), other.derived(), prec);
-}
-
-/** \returns \c true if the norm of \c *this is much smaller than \a other,
- * within the precision determined by \a prec.
- *
- * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
- * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if
- * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f]
- * For matrices, the comparison is done on all columns.
- *
- * \sa isApprox(), isMuchSmallerThan(const MatrixBase<OtherDerived>&, RealScalar) const
- */
-template<typename Derived>
-bool MatrixBase<Derived>::isMuchSmallerThan(
- const typename NumTraits<Scalar>::Real& other,
- typename NumTraits<Scalar>::Real prec
-) const
-{
- return ei_fuzzy_selector<Derived>::isMuchSmallerThan(derived(), other, prec);
-}
-
-/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other,
- * within the precision determined by \a prec.
- *
- * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
- * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if
- * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f]
- * For matrices, the comparison is done on all columns.
- *
- * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const
- */
-template<typename Derived>
-template<typename OtherDerived>
-bool MatrixBase<Derived>::isMuchSmallerThan(
- const MatrixBase<OtherDerived>& other,
- typename NumTraits<Scalar>::Real prec
-) const
-{
- return ei_fuzzy_selector<Derived,OtherDerived>::isMuchSmallerThan(derived(), other.derived(), prec);
-}
-
-
-template<typename Derived, typename OtherDerived>
-struct ei_fuzzy_selector<Derived,OtherDerived,true>
-{
- typedef typename Derived::RealScalar RealScalar;
- static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec)
- {
- EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
- ei_assert(self.size() == other.size());
- return((self - other).squaredNorm() <= std::min(self.squaredNorm(), other.squaredNorm()) * prec * prec);
- }
- static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec)
- {
- return(self.squaredNorm() <= ei_abs2(other * prec));
- }
- static bool isMuchSmallerThan(const Derived& self, const OtherDerived& other, RealScalar prec)
- {
- EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
- ei_assert(self.size() == other.size());
- return(self.squaredNorm() <= other.squaredNorm() * prec * prec);
- }
-};
-
-template<typename Derived, typename OtherDerived>
-struct ei_fuzzy_selector<Derived,OtherDerived,false>
-{
- typedef typename Derived::RealScalar RealScalar;
- static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec)
- {
- EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
- ei_assert(self.rows() == other.rows() && self.cols() == other.cols());
- typename Derived::Nested nested(self);
- typename OtherDerived::Nested otherNested(other);
- for(int i = 0; i < self.cols(); ++i)
- if((nested.col(i) - otherNested.col(i)).squaredNorm()
- > std::min(nested.col(i).squaredNorm(), otherNested.col(i).squaredNorm()) * prec * prec)
- return false;
- return true;
- }
- static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec)
- {
- typename Derived::Nested nested(self);
- for(int i = 0; i < self.cols(); ++i)
- if(nested.col(i).squaredNorm() > ei_abs2(other * prec))
- return false;
- return true;
- }
- static bool isMuchSmallerThan(const Derived& self, const OtherDerived& other, RealScalar prec)
- {
- EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
- ei_assert(self.rows() == other.rows() && self.cols() == other.cols());
- typename Derived::Nested nested(self);
- typename OtherDerived::Nested otherNested(other);
- for(int i = 0; i < self.cols(); ++i)
- if(nested.col(i).squaredNorm() > otherNested.col(i).squaredNorm() * prec * prec)
- return false;
- return true;
- }
-};
-
-#endif
-
-#endif // EIGEN_FUZZY_H
diff --git a/extern/Eigen2/Eigen/src/Core/GenericPacketMath.h b/extern/Eigen2/Eigen/src/Core/GenericPacketMath.h
deleted file mode 100644
index b0eee29f70f..00000000000
--- a/extern/Eigen2/Eigen/src/Core/GenericPacketMath.h
+++ /dev/null
@@ -1,150 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_GENERIC_PACKET_MATH_H
-#define EIGEN_GENERIC_PACKET_MATH_H
-
-/** \internal
- * \file GenericPacketMath.h
- *
- * Default implementation for types not supported by the vectorization.
- * In practice these functions are provided to make easier the writing
- * of generic vectorized code.
- */
-
-/** \internal \returns a + b (coeff-wise) */
-template<typename Packet> inline Packet
-ei_padd(const Packet& a,
- const Packet& b) { return a+b; }
-
-/** \internal \returns a - b (coeff-wise) */
-template<typename Packet> inline Packet
-ei_psub(const Packet& a,
- const Packet& b) { return a-b; }
-
-/** \internal \returns a * b (coeff-wise) */
-template<typename Packet> inline Packet
-ei_pmul(const Packet& a,
- const Packet& b) { return a*b; }
-
-/** \internal \returns a / b (coeff-wise) */
-template<typename Packet> inline Packet
-ei_pdiv(const Packet& a,
- const Packet& b) { return a/b; }
-
-/** \internal \returns the min of \a a and \a b (coeff-wise) */
-template<typename Packet> inline Packet
-ei_pmin(const Packet& a,
- const Packet& b) { return std::min(a, b); }
-
-/** \internal \returns the max of \a a and \a b (coeff-wise) */
-template<typename Packet> inline Packet
-ei_pmax(const Packet& a,
- const Packet& b) { return std::max(a, b); }
-
-/** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */
-template<typename Scalar> inline typename ei_packet_traits<Scalar>::type
-ei_pload(const Scalar* from) { return *from; }
-
-/** \internal \returns a packet version of \a *from, (un-aligned load) */
-template<typename Scalar> inline typename ei_packet_traits<Scalar>::type
-ei_ploadu(const Scalar* from) { return *from; }
-
-/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */
-template<typename Scalar> inline typename ei_packet_traits<Scalar>::type
-ei_pset1(const Scalar& a) { return a; }
-
-/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */
-template<typename Scalar, typename Packet> inline void ei_pstore(Scalar* to, const Packet& from)
-{ (*to) = from; }
-
-/** \internal copy the packet \a from to \a *to, (un-aligned store) */
-template<typename Scalar, typename Packet> inline void ei_pstoreu(Scalar* to, const Packet& from)
-{ (*to) = from; }
-
-/** \internal \returns the first element of a packet */
-template<typename Packet> inline typename ei_unpacket_traits<Packet>::type ei_pfirst(const Packet& a)
-{ return a; }
-
-/** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */
-template<typename Packet> inline Packet
-ei_preduxp(const Packet* vecs) { return vecs[0]; }
-
-/** \internal \returns the sum of the elements of \a a*/
-template<typename Packet> inline typename ei_unpacket_traits<Packet>::type ei_predux(const Packet& a)
-{ return a; }
-
-
-/***************************************************************************
-* The following functions might not have to be overwritten for vectorized types
-***************************************************************************/
-
-/** \internal \returns a * b + c (coeff-wise) */
-template<typename Packet> inline Packet
-ei_pmadd(const Packet& a,
- const Packet& b,
- const Packet& c)
-{ return ei_padd(ei_pmul(a, b),c); }
-
-/** \internal \returns a packet version of \a *from.
- * \If LoadMode equals Aligned, \a from must be 16 bytes aligned */
-template<typename Scalar, int LoadMode>
-inline typename ei_packet_traits<Scalar>::type ei_ploadt(const Scalar* from)
-{
- if(LoadMode == Aligned)
- return ei_pload(from);
- else
- return ei_ploadu(from);
-}
-
-/** \internal copy the packet \a from to \a *to.
- * If StoreMode equals Aligned, \a to must be 16 bytes aligned */
-template<typename Scalar, typename Packet, int LoadMode>
-inline void ei_pstoret(Scalar* to, const Packet& from)
-{
- if(LoadMode == Aligned)
- ei_pstore(to, from);
- else
- ei_pstoreu(to, from);
-}
-
-/** \internal default implementation of ei_palign() allowing partial specialization */
-template<int Offset,typename PacketType>
-struct ei_palign_impl
-{
- // by default data are aligned, so there is nothing to be done :)
- inline static void run(PacketType&, const PacketType&) {}
-};
-
-/** \internal update \a first using the concatenation of the \a Offset last elements
- * of \a first and packet_size minus \a Offset first elements of \a second */
-template<int Offset,typename PacketType>
-inline void ei_palign(PacketType& first, const PacketType& second)
-{
- ei_palign_impl<Offset,PacketType>::run(first,second);
-}
-
-#endif // EIGEN_GENERIC_PACKET_MATH_H
-
diff --git a/extern/Eigen2/Eigen/src/Core/Map.h b/extern/Eigen2/Eigen/src/Core/Map.h
deleted file mode 100644
index 5f44a87e685..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Map.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_MAP_H
-#define EIGEN_MAP_H
-
-/** \class Map
- *
- * \brief A matrix or vector expression mapping an existing array of data.
- *
- * \param MatrixType the equivalent matrix type of the mapped data
- * \param _PacketAccess allows to enforce aligned loads and stores if set to ForceAligned.
- * The default is AsRequested. This parameter is internaly used by Eigen
- * in expressions such as \code Map<...>(...) += other; \endcode and most
- * of the time this is the only way it is used.
- *
- * This class represents a matrix or vector expression mapping an existing array of data.
- * It can be used to let Eigen interface without any overhead with non-Eigen data structures,
- * such as plain C arrays or structures from other libraries.
- *
- * This class is the return type of Matrix::Map() but can also be used directly.
- *
- * \sa Matrix::Map()
- */
-template<typename MatrixType, int _PacketAccess>
-struct ei_traits<Map<MatrixType, _PacketAccess> > : public ei_traits<MatrixType>
-{
- enum {
- PacketAccess = _PacketAccess,
- Flags = ei_traits<MatrixType>::Flags & ~AlignedBit
- };
- typedef typename ei_meta_if<int(PacketAccess)==ForceAligned,
- Map<MatrixType, _PacketAccess>&,
- Map<MatrixType, ForceAligned> >::ret AlignedDerivedType;
-};
-
-template<typename MatrixType, int PacketAccess> class Map
- : public MapBase<Map<MatrixType, PacketAccess> >
-{
- public:
-
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Map, MapBase<Map>)
- typedef typename ei_traits<Map>::AlignedDerivedType AlignedDerivedType;
-
- inline int stride() const { return this->innerSize(); }
-
- AlignedDerivedType _convertToForceAligned()
- {
- return Map<MatrixType,ForceAligned>(Base::m_data, Base::m_rows.value(), Base::m_cols.value());
- }
-
- inline Map(const Scalar* data) : Base(data) {}
-
- inline Map(const Scalar* data, int size) : Base(data, size) {}
-
- inline Map(const Scalar* data, int rows, int cols) : Base(data, rows, cols) {}
-
- inline void resize(int rows, int cols)
- {
- EIGEN_ONLY_USED_FOR_DEBUG(rows);
- EIGEN_ONLY_USED_FOR_DEBUG(cols);
- ei_assert(rows == this->rows());
- ei_assert(cols == this->cols());
- }
-
- inline void resize(int size)
- {
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(MatrixType)
- EIGEN_ONLY_USED_FOR_DEBUG(size);
- ei_assert(size == this->size());
- }
-
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
-};
-
-/** Constructor copying an existing array of data.
- * Only for fixed-size matrices and vectors.
- * \param data The array of data to copy
- *
- * \sa Matrix::Map(const Scalar *)
- */
-template<typename _Scalar, int _Rows, int _Cols, int _StorageOrder, int _MaxRows, int _MaxCols>
-inline Matrix<_Scalar, _Rows, _Cols, _StorageOrder, _MaxRows, _MaxCols>
- ::Matrix(const Scalar *data)
-{
- _set_noalias(Eigen::Map<Matrix>(data));
-}
-
-#endif // EIGEN_MAP_H
diff --git a/extern/Eigen2/Eigen/src/Core/MapBase.h b/extern/Eigen2/Eigen/src/Core/MapBase.h
deleted file mode 100644
index c923bc34034..00000000000
--- a/extern/Eigen2/Eigen/src/Core/MapBase.h
+++ /dev/null
@@ -1,202 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_MAPBASE_H
-#define EIGEN_MAPBASE_H
-
-/** \class MapBase
- *
- * \brief Base class for Map and Block expression with direct access
- *
- * Expression classes inheriting MapBase must define the constant \c PacketAccess,
- * and type \c AlignedDerivedType in their respective ei_traits<> specialization structure.
- * The value of \c PacketAccess can be either:
- * - \b ForceAligned which enforces both aligned loads and stores
- * - \b AsRequested which is the default behavior
- * The type \c AlignedDerivedType should correspond to the equivalent expression type
- * with \c PacketAccess being \c ForceAligned.
- *
- * \sa class Map, class Block
- */
-template<typename Derived> class MapBase
- : public MatrixBase<Derived>
-{
- public:
-
- typedef MatrixBase<Derived> Base;
- enum {
- IsRowMajor = (int(ei_traits<Derived>::Flags) & RowMajorBit) ? 1 : 0,
- PacketAccess = ei_traits<Derived>::PacketAccess,
- RowsAtCompileTime = ei_traits<Derived>::RowsAtCompileTime,
- ColsAtCompileTime = ei_traits<Derived>::ColsAtCompileTime,
- SizeAtCompileTime = Base::SizeAtCompileTime
- };
-
- typedef typename ei_traits<Derived>::AlignedDerivedType AlignedDerivedType;
- typedef typename ei_traits<Derived>::Scalar Scalar;
- typedef typename Base::PacketScalar PacketScalar;
- using Base::derived;
-
- inline int rows() const { return m_rows.value(); }
- inline int cols() const { return m_cols.value(); }
-
- inline int stride() const { return derived().stride(); }
- inline const Scalar* data() const { return m_data; }
-
- template<bool IsForceAligned,typename Dummy> struct force_aligned_impl {
- AlignedDerivedType static run(MapBase& a) { return a.derived(); }
- };
-
- template<typename Dummy> struct force_aligned_impl<false,Dummy> {
- AlignedDerivedType static run(MapBase& a) { return a.derived()._convertToForceAligned(); }
- };
-
- /** \returns an expression equivalent to \c *this but having the \c PacketAccess constant
- * set to \c ForceAligned. Must be reimplemented by the derived class. */
- AlignedDerivedType forceAligned()
- {
- return force_aligned_impl<int(PacketAccess)==int(ForceAligned),Derived>::run(*this);
- }
-
- inline const Scalar& coeff(int row, int col) const
- {
- if(IsRowMajor)
- return m_data[col + row * stride()];
- else // column-major
- return m_data[row + col * stride()];
- }
-
- inline Scalar& coeffRef(int row, int col)
- {
- if(IsRowMajor)
- return const_cast<Scalar*>(m_data)[col + row * stride()];
- else // column-major
- return const_cast<Scalar*>(m_data)[row + col * stride()];
- }
-
- inline const Scalar coeff(int index) const
- {
- ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit));
- if ( ((RowsAtCompileTime == 1) == IsRowMajor) )
- return m_data[index];
- else
- return m_data[index*stride()];
- }
-
- inline Scalar& coeffRef(int index)
- {
- ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit));
- if ( ((RowsAtCompileTime == 1) == IsRowMajor) )
- return const_cast<Scalar*>(m_data)[index];
- else
- return const_cast<Scalar*>(m_data)[index*stride()];
- }
-
- template<int LoadMode>
- inline PacketScalar packet(int row, int col) const
- {
- return ei_ploadt<Scalar, int(PacketAccess) == ForceAligned ? Aligned : LoadMode>
- (m_data + (IsRowMajor ? col + row * stride()
- : row + col * stride()));
- }
-
- template<int LoadMode>
- inline PacketScalar packet(int index) const
- {
- return ei_ploadt<Scalar, int(PacketAccess) == ForceAligned ? Aligned : LoadMode>(m_data + index);
- }
-
- template<int StoreMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
- {
- ei_pstoret<Scalar, PacketScalar, int(PacketAccess) == ForceAligned ? Aligned : StoreMode>
- (const_cast<Scalar*>(m_data) + (IsRowMajor ? col + row * stride()
- : row + col * stride()), x);
- }
-
- template<int StoreMode>
- inline void writePacket(int index, const PacketScalar& x)
- {
- ei_pstoret<Scalar, PacketScalar, int(PacketAccess) == ForceAligned ? Aligned : StoreMode>
- (const_cast<Scalar*>(m_data) + index, x);
- }
-
- inline MapBase(const Scalar* data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
- {
- EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
- }
-
- inline MapBase(const Scalar* data, int size)
- : m_data(data),
- m_rows(RowsAtCompileTime == Dynamic ? size : RowsAtCompileTime),
- m_cols(ColsAtCompileTime == Dynamic ? size : ColsAtCompileTime)
- {
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- ei_assert(size > 0 || data == 0);
- ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
- }
-
- inline MapBase(const Scalar* data, int rows, int cols)
- : m_data(data), m_rows(rows), m_cols(cols)
- {
- ei_assert( (data == 0)
- || ( rows > 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
- && cols > 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)));
- }
-
- Derived& operator=(const MapBase& other)
- {
- return Base::operator=(other);
- }
-
- template<typename OtherDerived>
- Derived& operator=(const MatrixBase<OtherDerived>& other)
- {
- return Base::operator=(other);
- }
-
- using Base::operator*=;
-
- template<typename OtherDerived>
- Derived& operator+=(const MatrixBase<OtherDerived>& other)
- { return derived() = forceAligned() + other; }
-
- template<typename OtherDerived>
- Derived& operator-=(const MatrixBase<OtherDerived>& other)
- { return derived() = forceAligned() - other; }
-
- Derived& operator*=(const Scalar& other)
- { return derived() = forceAligned() * other; }
-
- Derived& operator/=(const Scalar& other)
- { return derived() = forceAligned() / other; }
-
- protected:
- const Scalar* EIGEN_RESTRICT m_data;
- const ei_int_if_dynamic<RowsAtCompileTime> m_rows;
- const ei_int_if_dynamic<ColsAtCompileTime> m_cols;
-};
-
-#endif // EIGEN_MAPBASE_H
diff --git a/extern/Eigen2/Eigen/src/Core/MathFunctions.h b/extern/Eigen2/Eigen/src/Core/MathFunctions.h
deleted file mode 100644
index 1ee64af02c6..00000000000
--- a/extern/Eigen2/Eigen/src/Core/MathFunctions.h
+++ /dev/null
@@ -1,295 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_MATHFUNCTIONS_H
-#define EIGEN_MATHFUNCTIONS_H
-
-template<typename T> inline typename NumTraits<T>::Real precision();
-template<typename T> inline typename NumTraits<T>::Real machine_epsilon();
-template<typename T> inline T ei_random(T a, T b);
-template<typename T> inline T ei_random();
-template<typename T> inline T ei_random_amplitude()
-{
- if(NumTraits<T>::HasFloatingPoint) return static_cast<T>(1);
- else return static_cast<T>(10);
-}
-
-template<typename T> inline T ei_hypot(T x, T y)
-{
- T _x = ei_abs(x);
- T _y = ei_abs(y);
- T p = std::max(_x, _y);
- T q = std::min(_x, _y);
- T qp = q/p;
- return p * ei_sqrt(T(1) + qp*qp);
-}
-
-/**************
-*** int ***
-**************/
-
-template<> inline int precision<int>() { return 0; }
-template<> inline int machine_epsilon<int>() { return 0; }
-inline int ei_real(int x) { return x; }
-inline int ei_imag(int) { return 0; }
-inline int ei_conj(int x) { return x; }
-inline int ei_abs(int x) { return abs(x); }
-inline int ei_abs2(int x) { return x*x; }
-inline int ei_sqrt(int) { ei_assert(false); return 0; }
-inline int ei_exp(int) { ei_assert(false); return 0; }
-inline int ei_log(int) { ei_assert(false); return 0; }
-inline int ei_sin(int) { ei_assert(false); return 0; }
-inline int ei_cos(int) { ei_assert(false); return 0; }
-inline int ei_atan2(int, int) { ei_assert(false); return 0; }
-inline int ei_pow(int x, int y) { return int(std::pow(double(x), y)); }
-
-template<> inline int ei_random(int a, int b)
-{
- // We can't just do rand()%n as only the high-order bits are really random
- return a + static_cast<int>((b-a+1) * (rand() / (RAND_MAX + 1.0)));
-}
-template<> inline int ei_random()
-{
- return ei_random<int>(-ei_random_amplitude<int>(), ei_random_amplitude<int>());
-}
-inline bool ei_isMuchSmallerThan(int a, int, int = precision<int>())
-{
- return a == 0;
-}
-inline bool ei_isApprox(int a, int b, int = precision<int>())
-{
- return a == b;
-}
-inline bool ei_isApproxOrLessThan(int a, int b, int = precision<int>())
-{
- return a <= b;
-}
-
-/**************
-*** float ***
-**************/
-
-template<> inline float precision<float>() { return 1e-5f; }
-template<> inline float machine_epsilon<float>() { return 1.192e-07f; }
-inline float ei_real(float x) { return x; }
-inline float ei_imag(float) { return 0.f; }
-inline float ei_conj(float x) { return x; }
-inline float ei_abs(float x) { return std::abs(x); }
-inline float ei_abs2(float x) { return x*x; }
-inline float ei_sqrt(float x) { return std::sqrt(x); }
-inline float ei_exp(float x) { return std::exp(x); }
-inline float ei_log(float x) { return std::log(x); }
-inline float ei_sin(float x) { return std::sin(x); }
-inline float ei_cos(float x) { return std::cos(x); }
-inline float ei_atan2(float y, float x) { return std::atan2(y,x); }
-inline float ei_pow(float x, float y) { return std::pow(x, y); }
-
-template<> inline float ei_random(float a, float b)
-{
-#ifdef EIGEN_NICE_RANDOM
- int i;
- do { i = ei_random<int>(256*int(a),256*int(b));
- } while(i==0);
- return float(i)/256.f;
-#else
- return a + (b-a) * float(std::rand()) / float(RAND_MAX);
-#endif
-}
-template<> inline float ei_random()
-{
- return ei_random<float>(-ei_random_amplitude<float>(), ei_random_amplitude<float>());
-}
-inline bool ei_isMuchSmallerThan(float a, float b, float prec = precision<float>())
-{
- return ei_abs(a) <= ei_abs(b) * prec;
-}
-inline bool ei_isApprox(float a, float b, float prec = precision<float>())
-{
- return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec;
-}
-inline bool ei_isApproxOrLessThan(float a, float b, float prec = precision<float>())
-{
- return a <= b || ei_isApprox(a, b, prec);
-}
-
-/**************
-*** double ***
-**************/
-
-template<> inline double precision<double>() { return 1e-11; }
-template<> inline double machine_epsilon<double>() { return 2.220e-16; }
-
-inline double ei_real(double x) { return x; }
-inline double ei_imag(double) { return 0.; }
-inline double ei_conj(double x) { return x; }
-inline double ei_abs(double x) { return std::abs(x); }
-inline double ei_abs2(double x) { return x*x; }
-inline double ei_sqrt(double x) { return std::sqrt(x); }
-inline double ei_exp(double x) { return std::exp(x); }
-inline double ei_log(double x) { return std::log(x); }
-inline double ei_sin(double x) { return std::sin(x); }
-inline double ei_cos(double x) { return std::cos(x); }
-inline double ei_atan2(double y, double x) { return std::atan2(y,x); }
-inline double ei_pow(double x, double y) { return std::pow(x, y); }
-
-template<> inline double ei_random(double a, double b)
-{
-#ifdef EIGEN_NICE_RANDOM
- int i;
- do { i= ei_random<int>(256*int(a),256*int(b));
- } while(i==0);
- return i/256.;
-#else
- return a + (b-a) * std::rand() / RAND_MAX;
-#endif
-}
-template<> inline double ei_random()
-{
- return ei_random<double>(-ei_random_amplitude<double>(), ei_random_amplitude<double>());
-}
-inline bool ei_isMuchSmallerThan(double a, double b, double prec = precision<double>())
-{
- return ei_abs(a) <= ei_abs(b) * prec;
-}
-inline bool ei_isApprox(double a, double b, double prec = precision<double>())
-{
- return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec;
-}
-inline bool ei_isApproxOrLessThan(double a, double b, double prec = precision<double>())
-{
- return a <= b || ei_isApprox(a, b, prec);
-}
-
-/*********************
-*** complex<float> ***
-*********************/
-
-template<> inline float precision<std::complex<float> >() { return precision<float>(); }
-template<> inline float machine_epsilon<std::complex<float> >() { return machine_epsilon<float>(); }
-inline float ei_real(const std::complex<float>& x) { return std::real(x); }
-inline float ei_imag(const std::complex<float>& x) { return std::imag(x); }
-inline std::complex<float> ei_conj(const std::complex<float>& x) { return std::conj(x); }
-inline float ei_abs(const std::complex<float>& x) { return std::abs(x); }
-inline float ei_abs2(const std::complex<float>& x) { return std::norm(x); }
-inline std::complex<float> ei_exp(std::complex<float> x) { return std::exp(x); }
-inline std::complex<float> ei_sin(std::complex<float> x) { return std::sin(x); }
-inline std::complex<float> ei_cos(std::complex<float> x) { return std::cos(x); }
-inline std::complex<float> ei_atan2(std::complex<float>, std::complex<float> ) { ei_assert(false); return 0; }
-
-template<> inline std::complex<float> ei_random()
-{
- return std::complex<float>(ei_random<float>(), ei_random<float>());
-}
-inline bool ei_isMuchSmallerThan(const std::complex<float>& a, const std::complex<float>& b, float prec = precision<float>())
-{
- return ei_abs2(a) <= ei_abs2(b) * prec * prec;
-}
-inline bool ei_isMuchSmallerThan(const std::complex<float>& a, float b, float prec = precision<float>())
-{
- return ei_abs2(a) <= ei_abs2(b) * prec * prec;
-}
-inline bool ei_isApprox(const std::complex<float>& a, const std::complex<float>& b, float prec = precision<float>())
-{
- return ei_isApprox(ei_real(a), ei_real(b), prec)
- && ei_isApprox(ei_imag(a), ei_imag(b), prec);
-}
-// ei_isApproxOrLessThan wouldn't make sense for complex numbers
-
-/**********************
-*** complex<double> ***
-**********************/
-
-template<> inline double precision<std::complex<double> >() { return precision<double>(); }
-template<> inline double machine_epsilon<std::complex<double> >() { return machine_epsilon<double>(); }
-inline double ei_real(const std::complex<double>& x) { return std::real(x); }
-inline double ei_imag(const std::complex<double>& x) { return std::imag(x); }
-inline std::complex<double> ei_conj(const std::complex<double>& x) { return std::conj(x); }
-inline double ei_abs(const std::complex<double>& x) { return std::abs(x); }
-inline double ei_abs2(const std::complex<double>& x) { return std::norm(x); }
-inline std::complex<double> ei_exp(std::complex<double> x) { return std::exp(x); }
-inline std::complex<double> ei_sin(std::complex<double> x) { return std::sin(x); }
-inline std::complex<double> ei_cos(std::complex<double> x) { return std::cos(x); }
-inline std::complex<double> ei_atan2(std::complex<double>, std::complex<double>) { ei_assert(false); return 0; }
-
-template<> inline std::complex<double> ei_random()
-{
- return std::complex<double>(ei_random<double>(), ei_random<double>());
-}
-inline bool ei_isMuchSmallerThan(const std::complex<double>& a, const std::complex<double>& b, double prec = precision<double>())
-{
- return ei_abs2(a) <= ei_abs2(b) * prec * prec;
-}
-inline bool ei_isMuchSmallerThan(const std::complex<double>& a, double b, double prec = precision<double>())
-{
- return ei_abs2(a) <= ei_abs2(b) * prec * prec;
-}
-inline bool ei_isApprox(const std::complex<double>& a, const std::complex<double>& b, double prec = precision<double>())
-{
- return ei_isApprox(ei_real(a), ei_real(b), prec)
- && ei_isApprox(ei_imag(a), ei_imag(b), prec);
-}
-// ei_isApproxOrLessThan wouldn't make sense for complex numbers
-
-
-/******************
-*** long double ***
-******************/
-
-template<> inline long double precision<long double>() { return precision<double>(); }
-template<> inline long double machine_epsilon<long double>() { return 1.084e-19l; }
-inline long double ei_real(long double x) { return x; }
-inline long double ei_imag(long double) { return 0.; }
-inline long double ei_conj(long double x) { return x; }
-inline long double ei_abs(long double x) { return std::abs(x); }
-inline long double ei_abs2(long double x) { return x*x; }
-inline long double ei_sqrt(long double x) { return std::sqrt(x); }
-inline long double ei_exp(long double x) { return std::exp(x); }
-inline long double ei_log(long double x) { return std::log(x); }
-inline long double ei_sin(long double x) { return std::sin(x); }
-inline long double ei_cos(long double x) { return std::cos(x); }
-inline long double ei_atan2(long double y, long double x) { return std::atan2(y,x); }
-inline long double ei_pow(long double x, long double y) { return std::pow(x, y); }
-
-template<> inline long double ei_random(long double a, long double b)
-{
- return ei_random<double>(static_cast<double>(a),static_cast<double>(b));
-}
-template<> inline long double ei_random()
-{
- return ei_random<double>(-ei_random_amplitude<double>(), ei_random_amplitude<double>());
-}
-inline bool ei_isMuchSmallerThan(long double a, long double b, long double prec = precision<long double>())
-{
- return ei_abs(a) <= ei_abs(b) * prec;
-}
-inline bool ei_isApprox(long double a, long double b, long double prec = precision<long double>())
-{
- return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec;
-}
-inline bool ei_isApproxOrLessThan(long double a, long double b, long double prec = precision<long double>())
-{
- return a <= b || ei_isApprox(a, b, prec);
-}
-
-#endif // EIGEN_MATHFUNCTIONS_H
diff --git a/extern/Eigen2/Eigen/src/Core/Matrix.h b/extern/Eigen2/Eigen/src/Core/Matrix.h
deleted file mode 100644
index 22090c777da..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Matrix.h
+++ /dev/null
@@ -1,639 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_MATRIX_H
-#define EIGEN_MATRIX_H
-
-
-/** \class Matrix
- *
- * \brief The matrix class, also used for vectors and row-vectors
- *
- * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen.
- * Vectors are matrices with one column, and row-vectors are matrices with one row.
- *
- * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note").
- *
- * The first three template parameters are required:
- * \param _Scalar Numeric type, i.e. float, double, int
- * \param _Rows Number of rows, or \b Dynamic
- * \param _Cols Number of columns, or \b Dynamic
- *
- * The remaining template parameters are optional -- in most cases you don't have to worry about them.
- * \param _Options A combination of either \b RowMajor or \b ColMajor, and of either
- * \b AutoAlign or \b DontAlign.
- * The former controls storage order, and defaults to column-major. The latter controls alignment, which is required
- * for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size.
- * \param _MaxRows Maximum number of rows. Defaults to \a _Rows (\ref maxrows "note").
- * \param _MaxCols Maximum number of columns. Defaults to \a _Cols (\ref maxrows "note").
- *
- * Eigen provides a number of typedefs covering the usual cases. Here are some examples:
- *
- * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix<double, 2, 2>)
- * \li \c Vector4f is a vector of 4 floats (\c Matrix<float, 4, 1>)
- * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix<int, 1, 3>)
- *
- * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix<float, Dynamic, Dynamic>)
- * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix<float, Dynamic, 1>)
- *
- * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs.
- *
- * You can access elements of vectors and matrices using normal subscripting:
- *
- * \code
- * Eigen::VectorXd v(10);
- * v[0] = 0.1;
- * v[1] = 0.2;
- * v(0) = 0.3;
- * v(1) = 0.4;
- *
- * Eigen::MatrixXi m(10, 10);
- * m(0, 1) = 1;
- * m(0, 2) = 2;
- * m(0, 3) = 3;
- * \endcode
- *
- * <i><b>Some notes:</b></i>
- *
- * <dl>
- * <dt><b>\anchor dense Dense versus sparse:</b></dt>
- * <dd>This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the Sparse module.
- *
- * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary contiguous array.
- * This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero coefficients.</dd>
- *
- * <dt><b>\anchor fixedsize Fixed-size versus dynamic-size:</b></dt>
- * <dd>Fixed-size means that the numbers of rows and columns are known are compile-time. In this case, Eigen allocates the array
- * of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, typically up to 4x4, sometimes up
- * to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time.
- *
- * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime
- * variables, and the array of coefficients is allocated dynamically on the heap.
- *
- * Note that \em dense matrices, be they Fixed-size or Dynamic-size, <em>do not</em> expand dynamically in the sense of a std::map.
- * If you want this behavior, see the Sparse module.</dd>
- *
- * <dt><b>\anchor maxrows _MaxRows and _MaxCols:</b></dt>
- * <dd>In most cases, one just leaves these parameters to the default values.
- * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases
- * when the exact numbers of rows and columns are not known are compile-time, but it is known at compile-time that they cannot
- * exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case _MaxRows and _MaxCols
- * are the dimensions of the original matrix, while _Rows and _Cols are Dynamic.</dd>
- * </dl>
- *
- * \see MatrixBase for the majority of the API methods for matrices
- */
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-struct ei_traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
-{
- typedef _Scalar Scalar;
- enum {
- RowsAtCompileTime = _Rows,
- ColsAtCompileTime = _Cols,
- MaxRowsAtCompileTime = _MaxRows,
- MaxColsAtCompileTime = _MaxCols,
- Flags = ei_compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret,
- CoeffReadCost = NumTraits<Scalar>::ReadCost
- };
-};
-
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-class Matrix
- : public MatrixBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
-{
- public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(Matrix)
- enum { Options = _Options };
- friend class Eigen::Map<Matrix, Unaligned>;
- typedef class Eigen::Map<Matrix, Unaligned> UnalignedMapType;
- friend class Eigen::Map<Matrix, Aligned>;
- typedef class Eigen::Map<Matrix, Aligned> AlignedMapType;
-
- protected:
- ei_matrix_storage<Scalar, MaxSizeAtCompileTime, RowsAtCompileTime, ColsAtCompileTime, Options> m_storage;
-
- public:
- enum { NeedsToAlign = (Options&AutoAlign) == AutoAlign
- && SizeAtCompileTime!=Dynamic && ((sizeof(Scalar)*SizeAtCompileTime)%16)==0 };
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
-
- Base& base() { return *static_cast<Base*>(this); }
- const Base& base() const { return *static_cast<const Base*>(this); }
-
- EIGEN_STRONG_INLINE int rows() const { return m_storage.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_storage.cols(); }
-
- EIGEN_STRONG_INLINE int stride(void) const
- {
- if(Flags & RowMajorBit)
- return m_storage.cols();
- else
- return m_storage.rows();
- }
-
- EIGEN_STRONG_INLINE const Scalar& coeff(int row, int col) const
- {
- if(Flags & RowMajorBit)
- return m_storage.data()[col + row * m_storage.cols()];
- else // column-major
- return m_storage.data()[row + col * m_storage.rows()];
- }
-
- EIGEN_STRONG_INLINE const Scalar& coeff(int index) const
- {
- return m_storage.data()[index];
- }
-
- EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col)
- {
- if(Flags & RowMajorBit)
- return m_storage.data()[col + row * m_storage.cols()];
- else // column-major
- return m_storage.data()[row + col * m_storage.rows()];
- }
-
- EIGEN_STRONG_INLINE Scalar& coeffRef(int index)
- {
- return m_storage.data()[index];
- }
-
- template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const
- {
- return ei_ploadt<Scalar, LoadMode>
- (m_storage.data() + (Flags & RowMajorBit
- ? col + row * m_storage.cols()
- : row + col * m_storage.rows()));
- }
-
- template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int index) const
- {
- return ei_ploadt<Scalar, LoadMode>(m_storage.data() + index);
- }
-
- template<int StoreMode>
- EIGEN_STRONG_INLINE void writePacket(int row, int col, const PacketScalar& x)
- {
- ei_pstoret<Scalar, PacketScalar, StoreMode>
- (m_storage.data() + (Flags & RowMajorBit
- ? col + row * m_storage.cols()
- : row + col * m_storage.rows()), x);
- }
-
- template<int StoreMode>
- EIGEN_STRONG_INLINE void writePacket(int index, const PacketScalar& x)
- {
- ei_pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
- }
-
- /** \returns a const pointer to the data array of this matrix */
- EIGEN_STRONG_INLINE const Scalar *data() const
- { return m_storage.data(); }
-
- /** \returns a pointer to the data array of this matrix */
- EIGEN_STRONG_INLINE Scalar *data()
- { return m_storage.data(); }
-
- /** Resizes \c *this to a \a rows x \a cols matrix.
- *
- * Makes sense for dynamic-size matrices only.
- *
- * If the current number of coefficients of \c *this exactly matches the
- * product \a rows * \a cols, then no memory allocation is performed and
- * the current values are left unchanged. In all other cases, including
- * shrinking, the data is reallocated and all previous values are lost.
- *
- * \sa resize(int) for vectors.
- */
- inline void resize(int rows, int cols)
- {
- ei_assert((MaxRowsAtCompileTime == Dynamic || MaxRowsAtCompileTime >= rows)
- && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
- && (MaxColsAtCompileTime == Dynamic || MaxColsAtCompileTime >= cols)
- && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
- m_storage.resize(rows * cols, rows, cols);
- }
-
- /** Resizes \c *this to a vector of length \a size
- *
- * \sa resize(int,int) for the details.
- */
- inline void resize(int size)
- {
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix)
- if(RowsAtCompileTime == 1)
- m_storage.resize(size, 1, size);
- else
- m_storage.resize(size, size, 1);
- }
-
- /** Copies the value of the expression \a other into \c *this with automatic resizing.
- *
- * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
- * it will be initialized.
- *
- * Note that copying a row-vector into a vector (and conversely) is allowed.
- * The resizing, if any, is then done in the appropriate way so that row-vectors
- * remain row-vectors and vectors remain vectors.
- */
- template<typename OtherDerived>
- EIGEN_STRONG_INLINE Matrix& operator=(const MatrixBase<OtherDerived>& other)
- {
- return _set(other);
- }
-
- /** This is a special case of the templated operator=. Its purpose is to
- * prevent a default operator= from hiding the templated operator=.
- */
- EIGEN_STRONG_INLINE Matrix& operator=(const Matrix& other)
- {
- return _set(other);
- }
-
- EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Matrix, +=)
- EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Matrix, -=)
- EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Matrix, *=)
- EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Matrix, /=)
-
- /** Default constructor.
- *
- * For fixed-size matrices, does nothing.
- *
- * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix
- * is called a null matrix. This constructor is the unique way to create null matrices: resizing
- * a matrix to 0 is not supported.
- *
- * \sa resize(int,int)
- */
- EIGEN_STRONG_INLINE explicit Matrix() : m_storage()
- {
- _check_template_params();
- }
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- /** \internal */
- Matrix(ei_constructor_without_unaligned_array_assert)
- : m_storage(ei_constructor_without_unaligned_array_assert())
- {}
-#endif
-
- /** Constructs a vector or row-vector with given dimension. \only_for_vectors
- *
- * Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
- * it is redundant to pass the dimension here, so it makes more sense to use the default
- * constructor Matrix() instead.
- */
- EIGEN_STRONG_INLINE explicit Matrix(int dim)
- : m_storage(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
- {
- _check_template_params();
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix)
- ei_assert(dim > 0);
- ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
- }
-
- /** This constructor has two very different behaviors, depending on the type of *this.
- *
- * \li When Matrix is a fixed-size vector type of size 2, this constructor constructs
- * an initialized vector. The parameters \a x, \a y are copied into the first and second
- * coords of the vector respectively.
- * \li Otherwise, this constructor constructs an uninitialized matrix with \a x rows and
- * \a y columns. This is useful for dynamic-size matrices. For fixed-size matrices,
- * it is redundant to pass these parameters, so one should use the default constructor
- * Matrix() instead.
- */
- EIGEN_STRONG_INLINE Matrix(int x, int y) : m_storage(x*y, x, y)
- {
- _check_template_params();
- if((RowsAtCompileTime == 1 && ColsAtCompileTime == 2)
- || (RowsAtCompileTime == 2 && ColsAtCompileTime == 1))
- {
- m_storage.data()[0] = Scalar(x);
- m_storage.data()[1] = Scalar(y);
- }
- else
- {
- ei_assert(x > 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == x)
- && y > 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == y));
- }
- }
- /** constructs an initialized 2D vector with given coefficients */
- EIGEN_STRONG_INLINE Matrix(const float& x, const float& y)
- {
- _check_template_params();
- EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 2)
- m_storage.data()[0] = x;
- m_storage.data()[1] = y;
- }
- /** constructs an initialized 2D vector with given coefficients */
- EIGEN_STRONG_INLINE Matrix(const double& x, const double& y)
- {
- _check_template_params();
- EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 2)
- m_storage.data()[0] = x;
- m_storage.data()[1] = y;
- }
- /** constructs an initialized 3D vector with given coefficients */
- EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z)
- {
- _check_template_params();
- EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3)
- m_storage.data()[0] = x;
- m_storage.data()[1] = y;
- m_storage.data()[2] = z;
- }
- /** constructs an initialized 4D vector with given coefficients */
- EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w)
- {
- _check_template_params();
- EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4)
- m_storage.data()[0] = x;
- m_storage.data()[1] = y;
- m_storage.data()[2] = z;
- m_storage.data()[3] = w;
- }
-
- explicit Matrix(const Scalar *data);
-
- /** Constructor copying the value of the expression \a other */
- template<typename OtherDerived>
- EIGEN_STRONG_INLINE Matrix(const MatrixBase<OtherDerived>& other)
- : m_storage(other.rows() * other.cols(), other.rows(), other.cols())
- {
- _check_template_params();
- _set_noalias(other);
- }
- /** Copy constructor */
- EIGEN_STRONG_INLINE Matrix(const Matrix& other)
- : Base(), m_storage(other.rows() * other.cols(), other.rows(), other.cols())
- {
- _check_template_params();
- _set_noalias(other);
- }
- /** Destructor */
- inline ~Matrix() {}
-
- /** Override MatrixBase::swap() since for dynamic-sized matrices of same type it is enough to swap the
- * data pointers.
- */
- template<typename OtherDerived>
- void swap(const MatrixBase<OtherDerived>& other);
-
- /** \name Map
- * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects,
- * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned
- * \a data pointers.
- *
- * \see class Map
- */
- //@{
- inline static const UnalignedMapType Map(const Scalar* data)
- { return UnalignedMapType(data); }
- inline static UnalignedMapType Map(Scalar* data)
- { return UnalignedMapType(data); }
- inline static const UnalignedMapType Map(const Scalar* data, int size)
- { return UnalignedMapType(data, size); }
- inline static UnalignedMapType Map(Scalar* data, int size)
- { return UnalignedMapType(data, size); }
- inline static const UnalignedMapType Map(const Scalar* data, int rows, int cols)
- { return UnalignedMapType(data, rows, cols); }
- inline static UnalignedMapType Map(Scalar* data, int rows, int cols)
- { return UnalignedMapType(data, rows, cols); }
-
- inline static const AlignedMapType MapAligned(const Scalar* data)
- { return AlignedMapType(data); }
- inline static AlignedMapType MapAligned(Scalar* data)
- { return AlignedMapType(data); }
- inline static const AlignedMapType MapAligned(const Scalar* data, int size)
- { return AlignedMapType(data, size); }
- inline static AlignedMapType MapAligned(Scalar* data, int size)
- { return AlignedMapType(data, size); }
- inline static const AlignedMapType MapAligned(const Scalar* data, int rows, int cols)
- { return AlignedMapType(data, rows, cols); }
- inline static AlignedMapType MapAligned(Scalar* data, int rows, int cols)
- { return AlignedMapType(data, rows, cols); }
- //@}
-
- using Base::setConstant;
- Matrix& setConstant(int size, const Scalar& value);
- Matrix& setConstant(int rows, int cols, const Scalar& value);
-
- using Base::setZero;
- Matrix& setZero(int size);
- Matrix& setZero(int rows, int cols);
-
- using Base::setOnes;
- Matrix& setOnes(int size);
- Matrix& setOnes(int rows, int cols);
-
- using Base::setRandom;
- Matrix& setRandom(int size);
- Matrix& setRandom(int rows, int cols);
-
- using Base::setIdentity;
- Matrix& setIdentity(int rows, int cols);
-
-/////////// Geometry module ///////////
-
- template<typename OtherDerived>
- explicit Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r);
- template<typename OtherDerived>
- Matrix& operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r);
-
- // allow to extend Matrix outside Eigen
- #ifdef EIGEN_MATRIX_PLUGIN
- #include EIGEN_MATRIX_PLUGIN
- #endif
-
- private:
- /** \internal Resizes *this in preparation for assigning \a other to it.
- * Takes care of doing all the checking that's needed.
- *
- * Note that copying a row-vector into a vector (and conversely) is allowed.
- * The resizing, if any, is then done in the appropriate way so that row-vectors
- * remain row-vectors and vectors remain vectors.
- */
- template<typename OtherDerived>
- EIGEN_STRONG_INLINE void _resize_to_match(const MatrixBase<OtherDerived>& other)
- {
- if(RowsAtCompileTime == 1)
- {
- ei_assert(other.isVector());
- resize(1, other.size());
- }
- else if(ColsAtCompileTime == 1)
- {
- ei_assert(other.isVector());
- resize(other.size(), 1);
- }
- else resize(other.rows(), other.cols());
- }
-
- /** \internal Copies the value of the expression \a other into \c *this with automatic resizing.
- *
- * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
- * it will be initialized.
- *
- * Note that copying a row-vector into a vector (and conversely) is allowed.
- * The resizing, if any, is then done in the appropriate way so that row-vectors
- * remain row-vectors and vectors remain vectors.
- *
- * \sa operator=(const MatrixBase<OtherDerived>&), _set_noalias()
- */
- template<typename OtherDerived>
- EIGEN_STRONG_INLINE Matrix& _set(const MatrixBase<OtherDerived>& other)
- {
- // this enum introduced to fix compilation with gcc 3.3
- enum { cond = int(OtherDerived::Flags) & EvalBeforeAssigningBit };
- _set_selector(other.derived(), typename ei_meta_if<bool(cond), ei_meta_true, ei_meta_false>::ret());
- return *this;
- }
-
- template<typename OtherDerived>
- EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const ei_meta_true&) { _set_noalias(other.eval()); }
-
- template<typename OtherDerived>
- EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const ei_meta_false&) { _set_noalias(other); }
-
- /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which
- * is the case when creating a new matrix) so one can enforce lazy evaluation.
- *
- * \sa operator=(const MatrixBase<OtherDerived>&), _set()
- */
- template<typename OtherDerived>
- EIGEN_STRONG_INLINE Matrix& _set_noalias(const MatrixBase<OtherDerived>& other)
- {
- _resize_to_match(other);
- // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because
- // it wouldn't allow to copy a row-vector into a column-vector.
- return ei_assign_selector<Matrix,OtherDerived,false>::run(*this, other.derived());
- }
-
- static EIGEN_STRONG_INLINE void _check_template_params()
- {
- EIGEN_STATIC_ASSERT((_Rows > 0
- && _Cols > 0
- && _MaxRows <= _Rows
- && _MaxCols <= _Cols
- && (_Options & (AutoAlign|RowMajor)) == _Options),
- INVALID_MATRIX_TEMPLATE_PARAMETERS)
- }
-
- template<typename MatrixType, typename OtherDerived, bool IsSameType, bool IsDynamicSize>
- friend struct ei_matrix_swap_impl;
-};
-
-template<typename MatrixType, typename OtherDerived,
- bool IsSameType = ei_is_same_type<MatrixType, OtherDerived>::ret,
- bool IsDynamicSize = MatrixType::SizeAtCompileTime==Dynamic>
-struct ei_matrix_swap_impl
-{
- static inline void run(MatrixType& matrix, MatrixBase<OtherDerived>& other)
- {
- matrix.base().swap(other);
- }
-};
-
-template<typename MatrixType, typename OtherDerived>
-struct ei_matrix_swap_impl<MatrixType, OtherDerived, true, true>
-{
- static inline void run(MatrixType& matrix, MatrixBase<OtherDerived>& other)
- {
- matrix.m_storage.swap(other.derived().m_storage);
- }
-};
-
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-template<typename OtherDerived>
-inline void Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::swap(const MatrixBase<OtherDerived>& other)
-{
- ei_matrix_swap_impl<Matrix, OtherDerived>::run(*this, *const_cast<MatrixBase<OtherDerived>*>(&other));
-}
-
-
-/** \defgroup matrixtypedefs Global matrix typedefs
- *
- * \ingroup Core_Module
- *
- * Eigen defines several typedef shortcuts for most common matrix and vector types.
- *
- * The general patterns are the following:
- *
- * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size,
- * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd
- * for complex double.
- *
- * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of floats.
- *
- * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is
- * a fixed-size vector of 4 complex floats.
- *
- * \sa class Matrix
- */
-
-#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \
-/** \ingroup matrixtypedefs */ \
-typedef Matrix<Type, Size, Size> Matrix##SizeSuffix##TypeSuffix; \
-/** \ingroup matrixtypedefs */ \
-typedef Matrix<Type, Size, 1> Vector##SizeSuffix##TypeSuffix; \
-/** \ingroup matrixtypedefs */ \
-typedef Matrix<Type, 1, Size> RowVector##SizeSuffix##TypeSuffix;
-
-#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
-EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \
-EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \
-EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \
-EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X)
-
-EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i)
-EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f)
-EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d)
-EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<float>, cf)
-EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
-
-#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES
-#undef EIGEN_MAKE_TYPEDEFS
-
-#undef EIGEN_MAKE_TYPEDEFS_LARGE
-
-#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \
-using Eigen::Matrix##SizeSuffix##TypeSuffix; \
-using Eigen::Vector##SizeSuffix##TypeSuffix; \
-using Eigen::RowVector##SizeSuffix##TypeSuffix;
-
-#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(TypeSuffix) \
-EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \
-EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \
-EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \
-EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \
-
-#define EIGEN_USING_MATRIX_TYPEDEFS \
-EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(i) \
-EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(f) \
-EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(d) \
-EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cf) \
-EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cd)
-
-#endif // EIGEN_MATRIX_H
diff --git a/extern/Eigen2/Eigen/src/Core/MatrixBase.h b/extern/Eigen2/Eigen/src/Core/MatrixBase.h
deleted file mode 100644
index 7935a7554ea..00000000000
--- a/extern/Eigen2/Eigen/src/Core/MatrixBase.h
+++ /dev/null
@@ -1,632 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_MATRIXBASE_H
-#define EIGEN_MATRIXBASE_H
-
-/** \class MatrixBase
- *
- * \brief Base class for all matrices, vectors, and expressions
- *
- * This class is the base that is inherited by all matrix, vector, and expression
- * types. Most of the Eigen API is contained in this class. Other important classes for
- * the Eigen API are Matrix, Cwise, and PartialRedux.
- *
- * Note that some methods are defined in the \ref Array module.
- *
- * \param Derived is the derived type, e.g. a matrix type, or an expression, etc.
- *
- * When writing a function taking Eigen objects as argument, if you want your function
- * to take as argument any matrix, vector, or expression, just let it take a
- * MatrixBase argument. As an example, here is a function printFirstRow which, given
- * a matrix, vector, or expression \a x, prints the first row of \a x.
- *
- * \code
- template<typename Derived>
- void printFirstRow(const Eigen::MatrixBase<Derived>& x)
- {
- cout << x.row(0) << endl;
- }
- * \endcode
- *
- */
-template<typename Derived> class MatrixBase
-{
- public:
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- class InnerIterator;
-
- typedef typename ei_traits<Derived>::Scalar Scalar;
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
- enum {
-
- RowsAtCompileTime = ei_traits<Derived>::RowsAtCompileTime,
- /**< The number of rows at compile-time. This is just a copy of the value provided
- * by the \a Derived type. If a value is not known at compile-time,
- * it is set to the \a Dynamic constant.
- * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
-
- ColsAtCompileTime = ei_traits<Derived>::ColsAtCompileTime,
- /**< The number of columns at compile-time. This is just a copy of the value provided
- * by the \a Derived type. If a value is not known at compile-time,
- * it is set to the \a Dynamic constant.
- * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
-
-
- SizeAtCompileTime = (ei_size_at_compile_time<ei_traits<Derived>::RowsAtCompileTime,
- ei_traits<Derived>::ColsAtCompileTime>::ret),
- /**< This is equal to the number of coefficients, i.e. the number of
- * rows times the number of columns, or to \a Dynamic if this is not
- * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
-
- MaxRowsAtCompileTime = ei_traits<Derived>::MaxRowsAtCompileTime,
- /**< This value is equal to the maximum possible number of rows that this expression
- * might have. If this expression might have an arbitrarily high number of rows,
- * this value is set to \a Dynamic.
- *
- * This value is useful to know when evaluating an expression, in order to determine
- * whether it is possible to avoid doing a dynamic memory allocation.
- *
- * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime
- */
-
- MaxColsAtCompileTime = ei_traits<Derived>::MaxColsAtCompileTime,
- /**< This value is equal to the maximum possible number of columns that this expression
- * might have. If this expression might have an arbitrarily high number of columns,
- * this value is set to \a Dynamic.
- *
- * This value is useful to know when evaluating an expression, in order to determine
- * whether it is possible to avoid doing a dynamic memory allocation.
- *
- * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime
- */
-
- MaxSizeAtCompileTime = (ei_size_at_compile_time<ei_traits<Derived>::MaxRowsAtCompileTime,
- ei_traits<Derived>::MaxColsAtCompileTime>::ret),
- /**< This value is equal to the maximum possible number of coefficients that this expression
- * might have. If this expression might have an arbitrarily high number of coefficients,
- * this value is set to \a Dynamic.
- *
- * This value is useful to know when evaluating an expression, in order to determine
- * whether it is possible to avoid doing a dynamic memory allocation.
- *
- * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
- */
-
- IsVectorAtCompileTime = ei_traits<Derived>::RowsAtCompileTime == 1
- || ei_traits<Derived>::ColsAtCompileTime == 1,
- /**< This is set to true if either the number of rows or the number of
- * columns is known at compile-time to be equal to 1. Indeed, in that case,
- * we are dealing with a column-vector (if there is only one column) or with
- * a row-vector (if there is only one row). */
-
- Flags = ei_traits<Derived>::Flags,
- /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
- * constructed from this one. See the \ref flags "list of flags".
- */
-
- CoeffReadCost = ei_traits<Derived>::CoeffReadCost
- /**< This is a rough measure of how expensive it is to read one coefficient from
- * this expression.
- */
- };
-
- /** Default constructor. Just checks at compile-time for self-consistency of the flags. */
- MatrixBase()
- {
- ei_assert(ei_are_flags_consistent<Flags>::ret);
- }
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- /** This is the "real scalar" type; if the \a Scalar type is already real numbers
- * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
- * \a Scalar is \a std::complex<T> then RealScalar is \a T.
- *
- * \sa class NumTraits
- */
- typedef typename NumTraits<Scalar>::Real RealScalar;
-
- /** type of the equivalent square matrix */
- typedef Matrix<Scalar,EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime),
- EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
- /** \returns the number of rows. \sa cols(), RowsAtCompileTime */
- inline int rows() const { return derived().rows(); }
- /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
- inline int cols() const { return derived().cols(); }
- /** \returns the number of coefficients, which is \a rows()*cols().
- * \sa rows(), cols(), SizeAtCompileTime. */
- inline int size() const { return rows() * cols(); }
- /** \returns the number of nonzero coefficients which is in practice the number
- * of stored coefficients. */
- inline int nonZeros() const { return derived.nonZeros(); }
- /** \returns true if either the number of rows or the number of columns is equal to 1.
- * In other words, this function returns
- * \code rows()==1 || cols()==1 \endcode
- * \sa rows(), cols(), IsVectorAtCompileTime. */
- inline bool isVector() const { return rows()==1 || cols()==1; }
- /** \returns the size of the storage major dimension,
- * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
- int outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
- /** \returns the size of the inner dimension according to the storage order,
- * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
- int innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- /** \internal the plain matrix type corresponding to this expression. Note that is not necessarily
- * exactly the return type of eval(): in the case of plain matrices, the return type of eval() is a const
- * reference to a matrix, not a matrix! It guaranteed however, that the return type of eval() is either
- * PlainMatrixType or const PlainMatrixType&.
- */
- typedef typename ei_plain_matrix_type<Derived>::type PlainMatrixType;
- /** \internal the column-major plain matrix type corresponding to this expression. Note that is not necessarily
- * exactly the return type of eval(): in the case of plain matrices, the return type of eval() is a const
- * reference to a matrix, not a matrix!
- * The only difference from PlainMatrixType is that PlainMatrixType_ColMajor is guaranteed to be column-major.
- */
- typedef typename ei_plain_matrix_type<Derived>::type PlainMatrixType_ColMajor;
-
- /** \internal Represents a matrix with all coefficients equal to one another*/
- typedef CwiseNullaryOp<ei_scalar_constant_op<Scalar>,Derived> ConstantReturnType;
- /** \internal Represents a scalar multiple of a matrix */
- typedef CwiseUnaryOp<ei_scalar_multiple_op<Scalar>, Derived> ScalarMultipleReturnType;
- /** \internal Represents a quotient of a matrix by a scalar*/
- typedef CwiseUnaryOp<ei_scalar_quotient1_op<Scalar>, Derived> ScalarQuotient1ReturnType;
- /** \internal the return type of MatrixBase::conjugate() */
- typedef typename ei_meta_if<NumTraits<Scalar>::IsComplex,
- const CwiseUnaryOp<ei_scalar_conjugate_op<Scalar>, Derived>,
- const Derived&
- >::ret ConjugateReturnType;
- /** \internal the return type of MatrixBase::real() */
- typedef CwiseUnaryOp<ei_scalar_real_op<Scalar>, Derived> RealReturnType;
- /** \internal the return type of MatrixBase::imag() */
- typedef CwiseUnaryOp<ei_scalar_imag_op<Scalar>, Derived> ImagReturnType;
- /** \internal the return type of MatrixBase::adjoint() */
- typedef Eigen::Transpose<NestByValue<typename ei_cleantype<ConjugateReturnType>::type> >
- AdjointReturnType;
- /** \internal the return type of MatrixBase::eigenvalues() */
- typedef Matrix<typename NumTraits<typename ei_traits<Derived>::Scalar>::Real, ei_traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;
- /** \internal expression tyepe of a column */
- typedef Block<Derived, ei_traits<Derived>::RowsAtCompileTime, 1> ColXpr;
- /** \internal expression tyepe of a column */
- typedef Block<Derived, 1, ei_traits<Derived>::ColsAtCompileTime> RowXpr;
- /** \internal the return type of identity */
- typedef CwiseNullaryOp<ei_scalar_identity_op<Scalar>,Derived> IdentityReturnType;
- /** \internal the return type of unit vectors */
- typedef Block<CwiseNullaryOp<ei_scalar_identity_op<Scalar>, SquareMatrixType>,
- ei_traits<Derived>::RowsAtCompileTime,
- ei_traits<Derived>::ColsAtCompileTime> BasisReturnType;
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
-
- /** Copies \a other into *this. \returns a reference to *this. */
- template<typename OtherDerived>
- Derived& operator=(const MatrixBase<OtherDerived>& other);
-
- /** Special case of the template operator=, in order to prevent the compiler
- * from generating a default operator= (issue hit with g++ 4.1)
- */
- inline Derived& operator=(const MatrixBase& other)
- {
- return this->operator=<Derived>(other);
- }
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- /** Copies \a other into *this without evaluating other. \returns a reference to *this. */
- template<typename OtherDerived>
- Derived& lazyAssign(const MatrixBase<OtherDerived>& other);
-
- /** Overloaded for cache friendly product evaluation */
- template<typename Lhs, typename Rhs>
- Derived& lazyAssign(const Product<Lhs,Rhs,CacheFriendlyProduct>& product);
-
- /** Overloaded for cache friendly product evaluation */
- template<typename OtherDerived>
- Derived& lazyAssign(const Flagged<OtherDerived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other)
- { return lazyAssign(other._expression()); }
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
- CommaInitializer<Derived> operator<< (const Scalar& s);
-
- template<typename OtherDerived>
- CommaInitializer<Derived> operator<< (const MatrixBase<OtherDerived>& other);
-
- const Scalar coeff(int row, int col) const;
- const Scalar operator()(int row, int col) const;
-
- Scalar& coeffRef(int row, int col);
- Scalar& operator()(int row, int col);
-
- const Scalar coeff(int index) const;
- const Scalar operator[](int index) const;
- const Scalar operator()(int index) const;
-
- Scalar& coeffRef(int index);
- Scalar& operator[](int index);
- Scalar& operator()(int index);
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- template<typename OtherDerived>
- void copyCoeff(int row, int col, const MatrixBase<OtherDerived>& other);
- template<typename OtherDerived>
- void copyCoeff(int index, const MatrixBase<OtherDerived>& other);
- template<typename OtherDerived, int StoreMode, int LoadMode>
- void copyPacket(int row, int col, const MatrixBase<OtherDerived>& other);
- template<typename OtherDerived, int StoreMode, int LoadMode>
- void copyPacket(int index, const MatrixBase<OtherDerived>& other);
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
- template<int LoadMode>
- PacketScalar packet(int row, int col) const;
- template<int StoreMode>
- void writePacket(int row, int col, const PacketScalar& x);
-
- template<int LoadMode>
- PacketScalar packet(int index) const;
- template<int StoreMode>
- void writePacket(int index, const PacketScalar& x);
-
- const Scalar x() const;
- const Scalar y() const;
- const Scalar z() const;
- const Scalar w() const;
- Scalar& x();
- Scalar& y();
- Scalar& z();
- Scalar& w();
-
-
- const CwiseUnaryOp<ei_scalar_opposite_op<typename ei_traits<Derived>::Scalar>,Derived> operator-() const;
-
- template<typename OtherDerived>
- const CwiseBinaryOp<ei_scalar_sum_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
- operator+(const MatrixBase<OtherDerived> &other) const;
-
- template<typename OtherDerived>
- const CwiseBinaryOp<ei_scalar_difference_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
- operator-(const MatrixBase<OtherDerived> &other) const;
-
- template<typename OtherDerived>
- Derived& operator+=(const MatrixBase<OtherDerived>& other);
- template<typename OtherDerived>
- Derived& operator-=(const MatrixBase<OtherDerived>& other);
-
- template<typename Lhs,typename Rhs>
- Derived& operator+=(const Flagged<Product<Lhs,Rhs,CacheFriendlyProduct>, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other);
-
- Derived& operator*=(const Scalar& other);
- Derived& operator/=(const Scalar& other);
-
- const ScalarMultipleReturnType operator*(const Scalar& scalar) const;
- const CwiseUnaryOp<ei_scalar_quotient1_op<typename ei_traits<Derived>::Scalar>, Derived>
- operator/(const Scalar& scalar) const;
-
- inline friend const CwiseUnaryOp<ei_scalar_multiple_op<typename ei_traits<Derived>::Scalar>, Derived>
- operator*(const Scalar& scalar, const MatrixBase& matrix)
- { return matrix*scalar; }
-
-
- template<typename OtherDerived>
- const typename ProductReturnType<Derived,OtherDerived>::Type
- operator*(const MatrixBase<OtherDerived> &other) const;
-
- template<typename OtherDerived>
- Derived& operator*=(const MatrixBase<OtherDerived>& other);
-
- template<typename OtherDerived>
- typename ei_plain_matrix_type_column_major<OtherDerived>::type
- solveTriangular(const MatrixBase<OtherDerived>& other) const;
-
- template<typename OtherDerived>
- void solveTriangularInPlace(const MatrixBase<OtherDerived>& other) const;
-
-
- template<typename OtherDerived>
- Scalar dot(const MatrixBase<OtherDerived>& other) const;
- RealScalar squaredNorm() const;
- RealScalar norm() const;
- const PlainMatrixType normalized() const;
- void normalize();
-
- Eigen::Transpose<Derived> transpose();
- const Eigen::Transpose<Derived> transpose() const;
- void transposeInPlace();
- const AdjointReturnType adjoint() const;
-
-
- RowXpr row(int i);
- const RowXpr row(int i) const;
-
- ColXpr col(int i);
- const ColXpr col(int i) const;
-
- Minor<Derived> minor(int row, int col);
- const Minor<Derived> minor(int row, int col) const;
-
- typename BlockReturnType<Derived>::Type block(int startRow, int startCol, int blockRows, int blockCols);
- const typename BlockReturnType<Derived>::Type
- block(int startRow, int startCol, int blockRows, int blockCols) const;
-
- typename BlockReturnType<Derived>::SubVectorType segment(int start, int size);
- const typename BlockReturnType<Derived>::SubVectorType segment(int start, int size) const;
-
- typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size);
- const typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size) const;
-
- typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size);
- const typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size) const;
-
- typename BlockReturnType<Derived>::Type corner(CornerType type, int cRows, int cCols);
- const typename BlockReturnType<Derived>::Type corner(CornerType type, int cRows, int cCols) const;
-
- template<int BlockRows, int BlockCols>
- typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol);
- template<int BlockRows, int BlockCols>
- const typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol) const;
-
- template<int CRows, int CCols>
- typename BlockReturnType<Derived, CRows, CCols>::Type corner(CornerType type);
- template<int CRows, int CCols>
- const typename BlockReturnType<Derived, CRows, CCols>::Type corner(CornerType type) const;
-
- template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType start(void);
- template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType start() const;
-
- template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType end();
- template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType end() const;
-
- template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType segment(int start);
- template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType segment(int start) const;
-
- DiagonalCoeffs<Derived> diagonal();
- const DiagonalCoeffs<Derived> diagonal() const;
-
- template<unsigned int Mode> Part<Derived, Mode> part();
- template<unsigned int Mode> const Part<Derived, Mode> part() const;
-
-
- static const ConstantReturnType
- Constant(int rows, int cols, const Scalar& value);
- static const ConstantReturnType
- Constant(int size, const Scalar& value);
- static const ConstantReturnType
- Constant(const Scalar& value);
-
- template<typename CustomNullaryOp>
- static const CwiseNullaryOp<CustomNullaryOp, Derived>
- NullaryExpr(int rows, int cols, const CustomNullaryOp& func);
- template<typename CustomNullaryOp>
- static const CwiseNullaryOp<CustomNullaryOp, Derived>
- NullaryExpr(int size, const CustomNullaryOp& func);
- template<typename CustomNullaryOp>
- static const CwiseNullaryOp<CustomNullaryOp, Derived>
- NullaryExpr(const CustomNullaryOp& func);
-
- static const ConstantReturnType Zero(int rows, int cols);
- static const ConstantReturnType Zero(int size);
- static const ConstantReturnType Zero();
- static const ConstantReturnType Ones(int rows, int cols);
- static const ConstantReturnType Ones(int size);
- static const ConstantReturnType Ones();
- static const IdentityReturnType Identity();
- static const IdentityReturnType Identity(int rows, int cols);
- static const BasisReturnType Unit(int size, int i);
- static const BasisReturnType Unit(int i);
- static const BasisReturnType UnitX();
- static const BasisReturnType UnitY();
- static const BasisReturnType UnitZ();
- static const BasisReturnType UnitW();
-
- const DiagonalMatrix<Derived> asDiagonal() const;
-
- void fill(const Scalar& value);
- Derived& setConstant(const Scalar& value);
- Derived& setZero();
- Derived& setOnes();
- Derived& setRandom();
- Derived& setIdentity();
-
-
- template<typename OtherDerived>
- bool isApprox(const MatrixBase<OtherDerived>& other,
- RealScalar prec = precision<Scalar>()) const;
- bool isMuchSmallerThan(const RealScalar& other,
- RealScalar prec = precision<Scalar>()) const;
- template<typename OtherDerived>
- bool isMuchSmallerThan(const MatrixBase<OtherDerived>& other,
- RealScalar prec = precision<Scalar>()) const;
-
- bool isApproxToConstant(const Scalar& value, RealScalar prec = precision<Scalar>()) const;
- bool isConstant(const Scalar& value, RealScalar prec = precision<Scalar>()) const;
- bool isZero(RealScalar prec = precision<Scalar>()) const;
- bool isOnes(RealScalar prec = precision<Scalar>()) const;
- bool isIdentity(RealScalar prec = precision<Scalar>()) const;
- bool isDiagonal(RealScalar prec = precision<Scalar>()) const;
-
- bool isUpperTriangular(RealScalar prec = precision<Scalar>()) const;
- bool isLowerTriangular(RealScalar prec = precision<Scalar>()) const;
-
- template<typename OtherDerived>
- bool isOrthogonal(const MatrixBase<OtherDerived>& other,
- RealScalar prec = precision<Scalar>()) const;
- bool isUnitary(RealScalar prec = precision<Scalar>()) const;
-
- template<typename OtherDerived>
- inline bool operator==(const MatrixBase<OtherDerived>& other) const
- { return (cwise() == other).all(); }
-
- template<typename OtherDerived>
- inline bool operator!=(const MatrixBase<OtherDerived>& other) const
- { return (cwise() != other).any(); }
-
-
- template<typename NewType>
- const CwiseUnaryOp<ei_scalar_cast_op<typename ei_traits<Derived>::Scalar, NewType>, Derived> cast() const;
-
- /** \returns the matrix or vector obtained by evaluating this expression.
- *
- * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
- * a const reference, in order to avoid a useless copy.
- */
- EIGEN_STRONG_INLINE const typename ei_eval<Derived>::type eval() const
- { return typename ei_eval<Derived>::type(derived()); }
-
- template<typename OtherDerived>
- void swap(const MatrixBase<OtherDerived>& other);
-
- template<unsigned int Added>
- const Flagged<Derived, Added, 0> marked() const;
- const Flagged<Derived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit> lazy() const;
-
- /** \returns number of elements to skip to pass from one row (resp. column) to another
- * for a row-major (resp. column-major) matrix.
- * Combined with coeffRef() and the \ref flags flags, it allows a direct access to the data
- * of the underlying matrix.
- */
- inline int stride(void) const { return derived().stride(); }
-
- inline const NestByValue<Derived> nestByValue() const;
-
-
- ConjugateReturnType conjugate() const;
- const RealReturnType real() const;
- const ImagReturnType imag() const;
-
- template<typename CustomUnaryOp>
- const CwiseUnaryOp<CustomUnaryOp, Derived> unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const;
-
- template<typename CustomBinaryOp, typename OtherDerived>
- const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
- binaryExpr(const MatrixBase<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const;
-
-
- Scalar sum() const;
- Scalar trace() const;
-
- typename ei_traits<Derived>::Scalar minCoeff() const;
- typename ei_traits<Derived>::Scalar maxCoeff() const;
-
- typename ei_traits<Derived>::Scalar minCoeff(int* row, int* col) const;
- typename ei_traits<Derived>::Scalar maxCoeff(int* row, int* col) const;
-
- typename ei_traits<Derived>::Scalar minCoeff(int* index) const;
- typename ei_traits<Derived>::Scalar maxCoeff(int* index) const;
-
- template<typename BinaryOp>
- typename ei_result_of<BinaryOp(typename ei_traits<Derived>::Scalar)>::type
- redux(const BinaryOp& func) const;
-
- template<typename Visitor>
- void visit(Visitor& func) const;
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
- inline Derived& derived() { return *static_cast<Derived*>(this); }
- inline Derived& const_cast_derived() const
- { return *static_cast<Derived*>(const_cast<MatrixBase*>(this)); }
-#endif // not EIGEN_PARSED_BY_DOXYGEN
-
- const Cwise<Derived> cwise() const;
- Cwise<Derived> cwise();
-
- inline const WithFormat<Derived> format(const IOFormat& fmt) const;
-
-/////////// Array module ///////////
-
- bool all(void) const;
- bool any(void) const;
- int count() const;
-
- const PartialRedux<Derived,Horizontal> rowwise() const;
- const PartialRedux<Derived,Vertical> colwise() const;
-
- static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int rows, int cols);
- static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int size);
- static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random();
-
- template<typename ThenDerived,typename ElseDerived>
- const Select<Derived,ThenDerived,ElseDerived>
- select(const MatrixBase<ThenDerived>& thenMatrix,
- const MatrixBase<ElseDerived>& elseMatrix) const;
-
- template<typename ThenDerived>
- inline const Select<Derived,ThenDerived, NestByValue<typename ThenDerived::ConstantReturnType> >
- select(const MatrixBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const;
-
- template<typename ElseDerived>
- inline const Select<Derived, NestByValue<typename ElseDerived::ConstantReturnType>, ElseDerived >
- select(typename ElseDerived::Scalar thenScalar, const MatrixBase<ElseDerived>& elseMatrix) const;
-
- template<int p> RealScalar lpNorm() const;
-
-/////////// LU module ///////////
-
- const LU<PlainMatrixType> lu() const;
- const PlainMatrixType inverse() const;
- void computeInverse(PlainMatrixType *result) const;
- Scalar determinant() const;
-
-/////////// Cholesky module ///////////
-
- const LLT<PlainMatrixType> llt() const;
- const LDLT<PlainMatrixType> ldlt() const;
-
-/////////// QR module ///////////
-
- const QR<PlainMatrixType> qr() const;
-
- EigenvaluesReturnType eigenvalues() const;
- RealScalar operatorNorm() const;
-
-/////////// SVD module ///////////
-
- SVD<PlainMatrixType> svd() const;
-
-/////////// Geometry module ///////////
-
- template<typename OtherDerived>
- PlainMatrixType cross(const MatrixBase<OtherDerived>& other) const;
- PlainMatrixType unitOrthogonal(void) const;
- Matrix<Scalar,3,1> eulerAngles(int a0, int a1, int a2) const;
-
-/////////// Sparse module ///////////
-
- // dense = spasre * dense
- template<typename Derived1, typename Derived2>
- Derived& lazyAssign(const SparseProduct<Derived1,Derived2,SparseTimeDenseProduct>& product);
- // dense = dense * spasre
- template<typename Derived1, typename Derived2>
- Derived& lazyAssign(const SparseProduct<Derived1,Derived2,DenseTimeSparseProduct>& product);
-
- #ifdef EIGEN_MATRIXBASE_PLUGIN
- #include EIGEN_MATRIXBASE_PLUGIN
- #endif
-};
-
-#endif // EIGEN_MATRIXBASE_H
diff --git a/extern/Eigen2/Eigen/src/Core/MatrixStorage.h b/extern/Eigen2/Eigen/src/Core/MatrixStorage.h
deleted file mode 100644
index ba2355b8e60..00000000000
--- a/extern/Eigen2/Eigen/src/Core/MatrixStorage.h
+++ /dev/null
@@ -1,249 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_MATRIXSTORAGE_H
-#define EIGEN_MATRIXSTORAGE_H
-
-struct ei_constructor_without_unaligned_array_assert {};
-
-/** \internal
- * Static array automatically aligned if the total byte size is a multiple of 16 and the matrix options require auto alignment
- */
-template <typename T, int Size, int MatrixOptions,
- bool Align = (MatrixOptions&AutoAlign) && (((Size*sizeof(T))&0xf)==0)
-> struct ei_matrix_array
-{
- EIGEN_ALIGN_128 T array[Size];
-
- ei_matrix_array()
- {
- #ifndef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
- ei_assert((reinterpret_cast<size_t>(array) & 0xf) == 0
- && "this assertion is explained here: http://eigen.tuxfamily.org/dox/UnalignedArrayAssert.html **** READ THIS WEB PAGE !!! ****");
- #endif
- }
-
- ei_matrix_array(ei_constructor_without_unaligned_array_assert) {}
-};
-
-template <typename T, int Size, int MatrixOptions> struct ei_matrix_array<T,Size,MatrixOptions,false>
-{
- T array[Size];
- ei_matrix_array() {}
- ei_matrix_array(ei_constructor_without_unaligned_array_assert) {}
-};
-
-/** \internal
- *
- * \class ei_matrix_storage
- *
- * \brief Stores the data of a matrix
- *
- * This class stores the data of fixed-size, dynamic-size or mixed matrices
- * in a way as compact as possible.
- *
- * \sa Matrix
- */
-template<typename T, int Size, int _Rows, int _Cols, int _Options> class ei_matrix_storage;
-
-// purely fixed-size matrix
-template<typename T, int Size, int _Rows, int _Cols, int _Options> class ei_matrix_storage
-{
- ei_matrix_array<T,Size,_Options> m_data;
- public:
- inline explicit ei_matrix_storage() {}
- inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
- : m_data(ei_constructor_without_unaligned_array_assert()) {}
- inline ei_matrix_storage(int,int,int) {}
- inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); }
- inline static int rows(void) {return _Rows;}
- inline static int cols(void) {return _Cols;}
- inline void resize(int,int,int) {}
- inline const T *data() const { return m_data.array; }
- inline T *data() { return m_data.array; }
-};
-
-// dynamic-size matrix with fixed-size storage
-template<typename T, int Size, int _Options> class ei_matrix_storage<T, Size, Dynamic, Dynamic, _Options>
-{
- ei_matrix_array<T,Size,_Options> m_data;
- int m_rows;
- int m_cols;
- public:
- inline explicit ei_matrix_storage() : m_rows(0), m_cols(0) {}
- inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
- : m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
- inline ei_matrix_storage(int, int rows, int cols) : m_rows(rows), m_cols(cols) {}
- inline ~ei_matrix_storage() {}
- inline void swap(ei_matrix_storage& other)
- { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
- inline int rows(void) const {return m_rows;}
- inline int cols(void) const {return m_cols;}
- inline void resize(int, int rows, int cols)
- {
- m_rows = rows;
- m_cols = cols;
- }
- inline const T *data() const { return m_data.array; }
- inline T *data() { return m_data.array; }
-};
-
-// dynamic-size matrix with fixed-size storage and fixed width
-template<typename T, int Size, int _Cols, int _Options> class ei_matrix_storage<T, Size, Dynamic, _Cols, _Options>
-{
- ei_matrix_array<T,Size,_Options> m_data;
- int m_rows;
- public:
- inline explicit ei_matrix_storage() : m_rows(0) {}
- inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
- : m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0) {}
- inline ei_matrix_storage(int, int rows, int) : m_rows(rows) {}
- inline ~ei_matrix_storage() {}
- inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
- inline int rows(void) const {return m_rows;}
- inline int cols(void) const {return _Cols;}
- inline void resize(int /*size*/, int rows, int)
- {
- m_rows = rows;
- }
- inline const T *data() const { return m_data.array; }
- inline T *data() { return m_data.array; }
-};
-
-// dynamic-size matrix with fixed-size storage and fixed height
-template<typename T, int Size, int _Rows, int _Options> class ei_matrix_storage<T, Size, _Rows, Dynamic, _Options>
-{
- ei_matrix_array<T,Size,_Options> m_data;
- int m_cols;
- public:
- inline explicit ei_matrix_storage() : m_cols(0) {}
- inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
- : m_data(ei_constructor_without_unaligned_array_assert()), m_cols(0) {}
- inline ei_matrix_storage(int, int, int cols) : m_cols(cols) {}
- inline ~ei_matrix_storage() {}
- inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
- inline int rows(void) const {return _Rows;}
- inline int cols(void) const {return m_cols;}
- inline void resize(int, int, int cols)
- {
- m_cols = cols;
- }
- inline const T *data() const { return m_data.array; }
- inline T *data() { return m_data.array; }
-};
-
-// purely dynamic matrix.
-template<typename T, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic, Dynamic, _Options>
-{
- T *m_data;
- int m_rows;
- int m_cols;
- public:
- inline explicit ei_matrix_storage() : m_data(0), m_rows(0), m_cols(0) {}
- inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert)
- : m_data(0), m_rows(0), m_cols(0) {}
- inline ei_matrix_storage(int size, int rows, int cols)
- : m_data(ei_aligned_new<T>(size)), m_rows(rows), m_cols(cols) {}
- inline ~ei_matrix_storage() { ei_aligned_delete(m_data, m_rows*m_cols); }
- inline void swap(ei_matrix_storage& other)
- { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
- inline int rows(void) const {return m_rows;}
- inline int cols(void) const {return m_cols;}
- void resize(int size, int rows, int cols)
- {
- if(size != m_rows*m_cols)
- {
- ei_aligned_delete(m_data, m_rows*m_cols);
- if (size)
- m_data = ei_aligned_new<T>(size);
- else
- m_data = 0;
- }
- m_rows = rows;
- m_cols = cols;
- }
- inline const T *data() const { return m_data; }
- inline T *data() { return m_data; }
-};
-
-// matrix with dynamic width and fixed height (so that matrix has dynamic size).
-template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic, _Rows, Dynamic, _Options>
-{
- T *m_data;
- int m_cols;
- public:
- inline explicit ei_matrix_storage() : m_data(0), m_cols(0) {}
- inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
- inline ei_matrix_storage(int size, int, int cols) : m_data(ei_aligned_new<T>(size)), m_cols(cols) {}
- inline ~ei_matrix_storage() { ei_aligned_delete(m_data, _Rows*m_cols); }
- inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
- inline static int rows(void) {return _Rows;}
- inline int cols(void) const {return m_cols;}
- void resize(int size, int, int cols)
- {
- if(size != _Rows*m_cols)
- {
- ei_aligned_delete(m_data, _Rows*m_cols);
- if (size)
- m_data = ei_aligned_new<T>(size);
- else
- m_data = 0;
- }
- m_cols = cols;
- }
- inline const T *data() const { return m_data; }
- inline T *data() { return m_data; }
-};
-
-// matrix with dynamic height and fixed width (so that matrix has dynamic size).
-template<typename T, int _Cols, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic, _Cols, _Options>
-{
- T *m_data;
- int m_rows;
- public:
- inline explicit ei_matrix_storage() : m_data(0), m_rows(0) {}
- inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
- inline ei_matrix_storage(int size, int rows, int) : m_data(ei_aligned_new<T>(size)), m_rows(rows) {}
- inline ~ei_matrix_storage() { ei_aligned_delete(m_data, _Cols*m_rows); }
- inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
- inline int rows(void) const {return m_rows;}
- inline static int cols(void) {return _Cols;}
- void resize(int size, int rows, int)
- {
- if(size != m_rows*_Cols)
- {
- ei_aligned_delete(m_data, _Cols*m_rows);
- if (size)
- m_data = ei_aligned_new<T>(size);
- else
- m_data = 0;
- }
- m_rows = rows;
- }
- inline const T *data() const { return m_data; }
- inline T *data() { return m_data; }
-};
-
-#endif // EIGEN_MATRIX_H
diff --git a/extern/Eigen2/Eigen/src/Core/NumTraits.h b/extern/Eigen2/Eigen/src/Core/NumTraits.h
deleted file mode 100644
index b27284a78bc..00000000000
--- a/extern/Eigen2/Eigen/src/Core/NumTraits.h
+++ /dev/null
@@ -1,142 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_NUMTRAITS_H
-#define EIGEN_NUMTRAITS_H
-
-/** \class NumTraits
- *
- * \brief Holds some data about the various numeric (i.e. scalar) types allowed by Eigen.
- *
- * \param T the numeric type about which this class provides data. Recall that Eigen allows
- * only the following types for \a T: \c int, \c float, \c double,
- * \c std::complex<float>, \c std::complex<double>, and \c long \c double (especially
- * useful to enforce x87 arithmetics when SSE is the default).
- *
- * The provided data consists of:
- * \li A typedef \a Real, giving the "real part" type of \a T. If \a T is already real,
- * then \a Real is just a typedef to \a T. If \a T is \c std::complex<U> then \a Real
- * is a typedef to \a U.
- * \li A typedef \a FloatingPoint, giving the "floating-point type" of \a T. If \a T is
- * \c int, then \a FloatingPoint is a typedef to \c double. Otherwise, \a FloatingPoint
- * is a typedef to \a T.
- * \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c std::complex
- * type, and to 0 otherwise.
- * \li An enum \a HasFloatingPoint. It is equal to \c 0 if \a T is \c int,
- * and to \c 1 otherwise.
- */
-template<typename T> struct NumTraits;
-
-template<> struct NumTraits<int>
-{
- typedef int Real;
- typedef double FloatingPoint;
- enum {
- IsComplex = 0,
- HasFloatingPoint = 0,
- ReadCost = 1,
- AddCost = 1,
- MulCost = 1
- };
-};
-
-template<> struct NumTraits<float>
-{
- typedef float Real;
- typedef float FloatingPoint;
- enum {
- IsComplex = 0,
- HasFloatingPoint = 1,
- ReadCost = 1,
- AddCost = 1,
- MulCost = 1
- };
-};
-
-template<> struct NumTraits<double>
-{
- typedef double Real;
- typedef double FloatingPoint;
- enum {
- IsComplex = 0,
- HasFloatingPoint = 1,
- ReadCost = 1,
- AddCost = 1,
- MulCost = 1
- };
-};
-
-template<typename _Real> struct NumTraits<std::complex<_Real> >
-{
- typedef _Real Real;
- typedef std::complex<_Real> FloatingPoint;
- enum {
- IsComplex = 1,
- HasFloatingPoint = NumTraits<Real>::HasFloatingPoint,
- ReadCost = 2,
- AddCost = 2 * NumTraits<Real>::AddCost,
- MulCost = 4 * NumTraits<Real>::MulCost + 2 * NumTraits<Real>::AddCost
- };
-};
-
-template<> struct NumTraits<long long int>
-{
- typedef long long int Real;
- typedef long double FloatingPoint;
- enum {
- IsComplex = 0,
- HasFloatingPoint = 0,
- ReadCost = 1,
- AddCost = 1,
- MulCost = 1
- };
-};
-
-template<> struct NumTraits<long double>
-{
- typedef long double Real;
- typedef long double FloatingPoint;
- enum {
- IsComplex = 0,
- HasFloatingPoint = 1,
- ReadCost = 1,
- AddCost = 1,
- MulCost = 1
- };
-};
-
-template<> struct NumTraits<bool>
-{
- typedef bool Real;
- typedef float FloatingPoint;
- enum {
- IsComplex = 0,
- HasFloatingPoint = 0,
- ReadCost = 1,
- AddCost = 1,
- MulCost = 1
- };
-};
-
-#endif // EIGEN_NUMTRAITS_H
diff --git a/extern/Eigen2/Eigen/src/Core/Part.h b/extern/Eigen2/Eigen/src/Core/Part.h
deleted file mode 100644
index 96229f43b68..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Part.h
+++ /dev/null
@@ -1,377 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_PART_H
-#define EIGEN_PART_H
-
-/** \nonstableyet
- * \class Part
- *
- * \brief Expression of a triangular matrix extracted from a given matrix
- *
- * \param MatrixType the type of the object in which we are taking the triangular part
- * \param Mode the kind of triangular matrix expression to construct. Can be UpperTriangular, StrictlyUpperTriangular,
- * UnitUpperTriangular, LowerTriangular, StrictlyLowerTriangular, UnitLowerTriangular. This is in fact a bit field; it must have either
- * UpperTriangularBit or LowerTriangularBit, and additionnaly it may have either ZeroDiagBit or
- * UnitDiagBit.
- *
- * This class represents an expression of the upper or lower triangular part of
- * a square matrix, possibly with a further assumption on the diagonal. It is the return type
- * of MatrixBase::part() and most of the time this is the only way it is used.
- *
- * \sa MatrixBase::part()
- */
-template<typename MatrixType, unsigned int Mode>
-struct ei_traits<Part<MatrixType, Mode> > : ei_traits<MatrixType>
-{
- typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
- typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
- enum {
- Flags = (_MatrixTypeNested::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) | Mode,
- CoeffReadCost = _MatrixTypeNested::CoeffReadCost
- };
-};
-
-template<typename MatrixType, unsigned int Mode> class Part
- : public MatrixBase<Part<MatrixType, Mode> >
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(Part)
-
- inline Part(const MatrixType& matrix) : m_matrix(matrix)
- { ei_assert(ei_are_flags_consistent<Mode>::ret); }
-
- /** \sa MatrixBase::operator+=() */
- template<typename Other> Part& operator+=(const Other& other);
- /** \sa MatrixBase::operator-=() */
- template<typename Other> Part& operator-=(const Other& other);
- /** \sa MatrixBase::operator*=() */
- Part& operator*=(const typename ei_traits<MatrixType>::Scalar& other);
- /** \sa MatrixBase::operator/=() */
- Part& operator/=(const typename ei_traits<MatrixType>::Scalar& other);
-
- /** \sa operator=(), MatrixBase::lazyAssign() */
- template<typename Other> void lazyAssign(const Other& other);
- /** \sa MatrixBase::operator=() */
- template<typename Other> Part& operator=(const Other& other);
-
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
- inline int stride() const { return m_matrix.stride(); }
-
- inline Scalar coeff(int row, int col) const
- {
- // SelfAdjointBit doesn't play any role here: just because a matrix is selfadjoint doesn't say anything about
- // each individual coefficient, except for the not-very-useful-here fact that diagonal coefficients are real.
- if( ((Flags & LowerTriangularBit) && (col>row)) || ((Flags & UpperTriangularBit) && (row>col)) )
- return (Scalar)0;
- if(Flags & UnitDiagBit)
- return col==row ? (Scalar)1 : m_matrix.coeff(row, col);
- else if(Flags & ZeroDiagBit)
- return col==row ? (Scalar)0 : m_matrix.coeff(row, col);
- else
- return m_matrix.coeff(row, col);
- }
-
- inline Scalar& coeffRef(int row, int col)
- {
- EIGEN_STATIC_ASSERT(!(Flags & UnitDiagBit), WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED)
- EIGEN_STATIC_ASSERT(!(Flags & SelfAdjointBit), COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED)
- ei_assert( (Mode==UpperTriangular && col>=row)
- || (Mode==LowerTriangular && col<=row)
- || (Mode==StrictlyUpperTriangular && col>row)
- || (Mode==StrictlyLowerTriangular && col<row));
- return m_matrix.const_cast_derived().coeffRef(row, col);
- }
-
- /** \internal */
- const MatrixType& _expression() const { return m_matrix; }
-
- /** discard any writes to a row */
- const Block<Part, 1, ColsAtCompileTime> row(int i) { return Base::row(i); }
- const Block<Part, 1, ColsAtCompileTime> row(int i) const { return Base::row(i); }
- /** discard any writes to a column */
- const Block<Part, RowsAtCompileTime, 1> col(int i) { return Base::col(i); }
- const Block<Part, RowsAtCompileTime, 1> col(int i) const { return Base::col(i); }
-
- template<typename OtherDerived>
- void swap(const MatrixBase<OtherDerived>& other)
- {
- Part<SwapWrapper<MatrixType>,Mode>(const_cast<MatrixType&>(m_matrix)).lazyAssign(other.derived());
- }
-
- protected:
- const typename MatrixType::Nested m_matrix;
-
- private:
- Part& operator=(const Part&);
-};
-
-/** \nonstableyet
- * \returns an expression of a triangular matrix extracted from the current matrix
- *
- * The parameter \a Mode can have the following values: \c UpperTriangular, \c StrictlyUpperTriangular, \c UnitUpperTriangular,
- * \c LowerTriangular, \c StrictlyLowerTriangular, \c UnitLowerTriangular.
- *
- * \addexample PartExample \label How to extract a triangular part of an arbitrary matrix
- *
- * Example: \include MatrixBase_extract.cpp
- * Output: \verbinclude MatrixBase_extract.out
- *
- * \sa class Part, part(), marked()
- */
-template<typename Derived>
-template<unsigned int Mode>
-const Part<Derived, Mode> MatrixBase<Derived>::part() const
-{
- return derived();
-}
-
-template<typename MatrixType, unsigned int Mode>
-template<typename Other>
-inline Part<MatrixType, Mode>& Part<MatrixType, Mode>::operator=(const Other& other)
-{
- if(Other::Flags & EvalBeforeAssigningBit)
- {
- typename MatrixBase<Other>::PlainMatrixType other_evaluated(other.rows(), other.cols());
- other_evaluated.template part<Mode>().lazyAssign(other);
- lazyAssign(other_evaluated);
- }
- else
- lazyAssign(other.derived());
- return *this;
-}
-
-template<typename Derived1, typename Derived2, unsigned int Mode, int UnrollCount>
-struct ei_part_assignment_impl
-{
- enum {
- col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
- row = (UnrollCount-1) % Derived1::RowsAtCompileTime
- };
-
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- ei_part_assignment_impl<Derived1, Derived2, Mode, UnrollCount-1>::run(dst, src);
-
- if(Mode == SelfAdjoint)
- {
- if(row == col)
- dst.coeffRef(row, col) = ei_real(src.coeff(row, col));
- else if(row < col)
- dst.coeffRef(col, row) = ei_conj(dst.coeffRef(row, col) = src.coeff(row, col));
- }
- else
- {
- ei_assert(Mode == UpperTriangular || Mode == LowerTriangular || Mode == StrictlyUpperTriangular || Mode == StrictlyLowerTriangular);
- if((Mode == UpperTriangular && row <= col)
- || (Mode == LowerTriangular && row >= col)
- || (Mode == StrictlyUpperTriangular && row < col)
- || (Mode == StrictlyLowerTriangular && row > col))
- dst.copyCoeff(row, col, src);
- }
- }
-};
-
-template<typename Derived1, typename Derived2, unsigned int Mode>
-struct ei_part_assignment_impl<Derived1, Derived2, Mode, 1>
-{
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- if(!(Mode & ZeroDiagBit))
- dst.copyCoeff(0, 0, src);
- }
-};
-
-// prevent buggy user code from causing an infinite recursion
-template<typename Derived1, typename Derived2, unsigned int Mode>
-struct ei_part_assignment_impl<Derived1, Derived2, Mode, 0>
-{
- inline static void run(Derived1 &, const Derived2 &) {}
-};
-
-template<typename Derived1, typename Derived2>
-struct ei_part_assignment_impl<Derived1, Derived2, UpperTriangular, Dynamic>
-{
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- for(int j = 0; j < dst.cols(); ++j)
- for(int i = 0; i <= j; ++i)
- dst.copyCoeff(i, j, src);
- }
-};
-
-template<typename Derived1, typename Derived2>
-struct ei_part_assignment_impl<Derived1, Derived2, LowerTriangular, Dynamic>
-{
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- for(int j = 0; j < dst.cols(); ++j)
- for(int i = j; i < dst.rows(); ++i)
- dst.copyCoeff(i, j, src);
- }
-};
-
-template<typename Derived1, typename Derived2>
-struct ei_part_assignment_impl<Derived1, Derived2, StrictlyUpperTriangular, Dynamic>
-{
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- for(int j = 0; j < dst.cols(); ++j)
- for(int i = 0; i < j; ++i)
- dst.copyCoeff(i, j, src);
- }
-};
-template<typename Derived1, typename Derived2>
-struct ei_part_assignment_impl<Derived1, Derived2, StrictlyLowerTriangular, Dynamic>
-{
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- for(int j = 0; j < dst.cols(); ++j)
- for(int i = j+1; i < dst.rows(); ++i)
- dst.copyCoeff(i, j, src);
- }
-};
-template<typename Derived1, typename Derived2>
-struct ei_part_assignment_impl<Derived1, Derived2, SelfAdjoint, Dynamic>
-{
- inline static void run(Derived1 &dst, const Derived2 &src)
- {
- for(int j = 0; j < dst.cols(); ++j)
- {
- for(int i = 0; i < j; ++i)
- dst.coeffRef(j, i) = ei_conj(dst.coeffRef(i, j) = src.coeff(i, j));
- dst.coeffRef(j, j) = ei_real(src.coeff(j, j));
- }
- }
-};
-
-template<typename MatrixType, unsigned int Mode>
-template<typename Other>
-void Part<MatrixType, Mode>::lazyAssign(const Other& other)
-{
- const bool unroll = MatrixType::SizeAtCompileTime * Other::CoeffReadCost / 2 <= EIGEN_UNROLLING_LIMIT;
- ei_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
-
- ei_part_assignment_impl
- <MatrixType, Other, Mode,
- unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic
- >::run(m_matrix.const_cast_derived(), other.derived());
-}
-
-/** \nonstableyet
- * \returns a lvalue pseudo-expression allowing to perform special operations on \c *this.
- *
- * The \a Mode parameter can have the following values: \c UpperTriangular, \c StrictlyUpperTriangular, \c LowerTriangular,
- * \c StrictlyLowerTriangular, \c SelfAdjoint.
- *
- * \addexample PartExample \label How to write to a triangular part of a matrix
- *
- * Example: \include MatrixBase_part.cpp
- * Output: \verbinclude MatrixBase_part.out
- *
- * \sa class Part, MatrixBase::extract(), MatrixBase::marked()
- */
-template<typename Derived>
-template<unsigned int Mode>
-inline Part<Derived, Mode> MatrixBase<Derived>::part()
-{
- return Part<Derived, Mode>(derived());
-}
-
-/** \returns true if *this is approximately equal to an upper triangular matrix,
- * within the precision given by \a prec.
- *
- * \sa isLowerTriangular(), extract(), part(), marked()
- */
-template<typename Derived>
-bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
-{
- if(cols() != rows()) return false;
- RealScalar maxAbsOnUpperTriangularPart = static_cast<RealScalar>(-1);
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i <= j; ++i)
- {
- RealScalar absValue = ei_abs(coeff(i,j));
- if(absValue > maxAbsOnUpperTriangularPart) maxAbsOnUpperTriangularPart = absValue;
- }
- for(int j = 0; j < cols()-1; ++j)
- for(int i = j+1; i < rows(); ++i)
- if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnUpperTriangularPart, prec)) return false;
- return true;
-}
-
-/** \returns true if *this is approximately equal to a lower triangular matrix,
- * within the precision given by \a prec.
- *
- * \sa isUpperTriangular(), extract(), part(), marked()
- */
-template<typename Derived>
-bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
-{
- if(cols() != rows()) return false;
- RealScalar maxAbsOnLowerTriangularPart = static_cast<RealScalar>(-1);
- for(int j = 0; j < cols(); ++j)
- for(int i = j; i < rows(); ++i)
- {
- RealScalar absValue = ei_abs(coeff(i,j));
- if(absValue > maxAbsOnLowerTriangularPart) maxAbsOnLowerTriangularPart = absValue;
- }
- for(int j = 1; j < cols(); ++j)
- for(int i = 0; i < j; ++i)
- if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnLowerTriangularPart, prec)) return false;
- return true;
-}
-
-template<typename MatrixType, unsigned int Mode>
-template<typename Other>
-inline Part<MatrixType, Mode>& Part<MatrixType, Mode>::operator+=(const Other& other)
-{
- return *this = m_matrix + other;
-}
-
-template<typename MatrixType, unsigned int Mode>
-template<typename Other>
-inline Part<MatrixType, Mode>& Part<MatrixType, Mode>::operator-=(const Other& other)
-{
- return *this = m_matrix - other;
-}
-
-template<typename MatrixType, unsigned int Mode>
-inline Part<MatrixType, Mode>& Part<MatrixType, Mode>::operator*=
-(const typename ei_traits<MatrixType>::Scalar& other)
-{
- return *this = m_matrix * other;
-}
-
-template<typename MatrixType, unsigned int Mode>
-inline Part<MatrixType, Mode>& Part<MatrixType, Mode>::operator/=
-(const typename ei_traits<MatrixType>::Scalar& other)
-{
- return *this = m_matrix / other;
-}
-
-#endif // EIGEN_PART_H
diff --git a/extern/Eigen2/Eigen/src/Core/Product.h b/extern/Eigen2/Eigen/src/Core/Product.h
deleted file mode 100644
index 1151b21641c..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Product.h
+++ /dev/null
@@ -1,769 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_PRODUCT_H
-#define EIGEN_PRODUCT_H
-
-/***************************
-*** Forward declarations ***
-***************************/
-
-template<int VectorizationMode, int Index, typename Lhs, typename Rhs, typename RetScalar>
-struct ei_product_coeff_impl;
-
-template<int StorageOrder, int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
-struct ei_product_packet_impl;
-
-/** \class ProductReturnType
- *
- * \brief Helper class to get the correct and optimized returned type of operator*
- *
- * \param Lhs the type of the left-hand side
- * \param Rhs the type of the right-hand side
- * \param ProductMode the type of the product (determined automatically by ei_product_mode)
- *
- * This class defines the typename Type representing the optimized product expression
- * between two matrix expressions. In practice, using ProductReturnType<Lhs,Rhs>::Type
- * is the recommended way to define the result type of a function returning an expression
- * which involve a matrix product. The class Product or DiagonalProduct should never be
- * used directly.
- *
- * \sa class Product, class DiagonalProduct, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
- */
-template<typename Lhs, typename Rhs, int ProductMode>
-struct ProductReturnType
-{
- typedef typename ei_nested<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
- typedef typename ei_nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
-
- typedef Product<LhsNested, RhsNested, ProductMode> Type;
-};
-
-// cache friendly specialization
-// note that there is a DiagonalProduct specialization in DiagonalProduct.h
-template<typename Lhs, typename Rhs>
-struct ProductReturnType<Lhs,Rhs,CacheFriendlyProduct>
-{
- typedef typename ei_nested<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
-
- typedef typename ei_nested<Rhs,Lhs::RowsAtCompileTime,
- typename ei_plain_matrix_type_column_major<Rhs>::type
- >::type RhsNested;
-
- typedef Product<LhsNested, RhsNested, CacheFriendlyProduct> Type;
-};
-
-/* Helper class to determine the type of the product, can be either:
- * - NormalProduct
- * - CacheFriendlyProduct
- * - DiagonalProduct
- */
-template<typename Lhs, typename Rhs> struct ei_product_mode
-{
- enum{
-
- value = ((Rhs::Flags&Diagonal)==Diagonal) || ((Lhs::Flags&Diagonal)==Diagonal)
- ? DiagonalProduct
- : Lhs::MaxColsAtCompileTime == Dynamic
- && ( Lhs::MaxRowsAtCompileTime == Dynamic
- || Rhs::MaxColsAtCompileTime == Dynamic )
- && (!(Rhs::IsVectorAtCompileTime && (Lhs::Flags&RowMajorBit) && (!(Lhs::Flags&DirectAccessBit))))
- && (!(Lhs::IsVectorAtCompileTime && (!(Rhs::Flags&RowMajorBit)) && (!(Rhs::Flags&DirectAccessBit))))
- && (ei_is_same_type<typename Lhs::Scalar, typename Rhs::Scalar>::ret)
- ? CacheFriendlyProduct
- : NormalProduct };
-};
-
-/** \class Product
- *
- * \brief Expression of the product of two matrices
- *
- * \param LhsNested the type used to store the left-hand side
- * \param RhsNested the type used to store the right-hand side
- * \param ProductMode the type of the product
- *
- * This class represents an expression of the product of two matrices.
- * It is the return type of the operator* between matrices. Its template
- * arguments are determined automatically by ProductReturnType. Therefore,
- * Product should never be used direclty. To determine the result type of a
- * function which involves a matrix product, use ProductReturnType::Type.
- *
- * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
- */
-template<typename LhsNested, typename RhsNested, int ProductMode>
-struct ei_traits<Product<LhsNested, RhsNested, ProductMode> >
-{
- // clean the nested types:
- typedef typename ei_cleantype<LhsNested>::type _LhsNested;
- typedef typename ei_cleantype<RhsNested>::type _RhsNested;
- typedef typename ei_scalar_product_traits<typename _LhsNested::Scalar, typename _RhsNested::Scalar>::ReturnType Scalar;
-
- enum {
- LhsCoeffReadCost = _LhsNested::CoeffReadCost,
- RhsCoeffReadCost = _RhsNested::CoeffReadCost,
- LhsFlags = _LhsNested::Flags,
- RhsFlags = _RhsNested::Flags,
-
- RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
- ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
- InnerSize = EIGEN_ENUM_MIN(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
-
- MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
-
- LhsRowMajor = LhsFlags & RowMajorBit,
- RhsRowMajor = RhsFlags & RowMajorBit,
-
- CanVectorizeRhs = RhsRowMajor && (RhsFlags & PacketAccessBit)
- && (ColsAtCompileTime % ei_packet_traits<Scalar>::size == 0),
-
- CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit)
- && (RowsAtCompileTime % ei_packet_traits<Scalar>::size == 0),
-
- EvalToRowMajor = RhsRowMajor && (ProductMode==(int)CacheFriendlyProduct ? LhsRowMajor : (!CanVectorizeLhs)),
-
- RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
-
- Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
- | EvalBeforeAssigningBit
- | EvalBeforeNestingBit
- | (CanVectorizeLhs || CanVectorizeRhs ? PacketAccessBit : 0)
- | (LhsFlags & RhsFlags & AlignedBit),
-
- CoeffReadCost = InnerSize == Dynamic ? Dynamic
- : InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost)
- + (InnerSize - 1) * NumTraits<Scalar>::AddCost,
-
- /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside
- * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner
- * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect
- * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI.
- */
- CanVectorizeInner = LhsRowMajor && (!RhsRowMajor) && (LhsFlags & RhsFlags & ActualPacketAccessBit)
- && (InnerSize % ei_packet_traits<Scalar>::size == 0)
- };
-};
-
-template<typename LhsNested, typename RhsNested, int ProductMode> class Product : ei_no_assignment_operator,
- public MatrixBase<Product<LhsNested, RhsNested, ProductMode> >
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
-
- private:
-
- typedef typename ei_traits<Product>::_LhsNested _LhsNested;
- typedef typename ei_traits<Product>::_RhsNested _RhsNested;
-
- enum {
- PacketSize = ei_packet_traits<Scalar>::size,
- InnerSize = ei_traits<Product>::InnerSize,
- Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT,
- CanVectorizeInner = ei_traits<Product>::CanVectorizeInner
- };
-
- typedef ei_product_coeff_impl<CanVectorizeInner ? InnerVectorization : NoVectorization,
- Unroll ? InnerSize-1 : Dynamic,
- _LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
-
- public:
-
- template<typename Lhs, typename Rhs>
- inline Product(const Lhs& lhs, const Rhs& rhs)
- : m_lhs(lhs), m_rhs(rhs)
- {
- // we don't allow taking products of matrices of different real types, as that wouldn't be vectorizable.
- // We still allow to mix T and complex<T>.
- EIGEN_STATIC_ASSERT((ei_is_same_type<typename Lhs::RealScalar, typename Rhs::RealScalar>::ret),
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
- ei_assert(lhs.cols() == rhs.rows()
- && "invalid matrix product"
- && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
- }
-
- /** \internal
- * compute \a res += \c *this using the cache friendly product.
- */
- template<typename DestDerived>
- void _cacheFriendlyEvalAndAdd(DestDerived& res) const;
-
- /** \internal
- * \returns whether it is worth it to use the cache friendly product.
- */
- EIGEN_STRONG_INLINE bool _useCacheFriendlyProduct() const
- {
- return m_lhs.cols()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
- && ( rows()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
- || cols()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD);
- }
-
- EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); }
-
- EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const
- {
- Scalar res;
- ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
- return res;
- }
-
- /* Allow index-based non-packet access. It is impossible though to allow index-based packed access,
- * which is why we don't set the LinearAccessBit.
- */
- EIGEN_STRONG_INLINE const Scalar coeff(int index) const
- {
- Scalar res;
- const int row = RowsAtCompileTime == 1 ? 0 : index;
- const int col = RowsAtCompileTime == 1 ? index : 0;
- ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
- return res;
- }
-
- template<int LoadMode>
- EIGEN_STRONG_INLINE const PacketScalar packet(int row, int col) const
- {
- PacketScalar res;
- ei_product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
- Unroll ? InnerSize-1 : Dynamic,
- _LhsNested, _RhsNested, PacketScalar, LoadMode>
- ::run(row, col, m_lhs, m_rhs, res);
- return res;
- }
-
- EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
- EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
-
- protected:
- const LhsNested m_lhs;
- const RhsNested m_rhs;
-};
-
-/** \returns the matrix product of \c *this and \a other.
- *
- * \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().
- *
- * \sa lazy(), operator*=(const MatrixBase&), Cwise::operator*()
- */
-template<typename Derived>
-template<typename OtherDerived>
-inline const typename ProductReturnType<Derived,OtherDerived>::Type
-MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
-{
- enum {
- ProductIsValid = Derived::ColsAtCompileTime==Dynamic
- || OtherDerived::RowsAtCompileTime==Dynamic
- || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
- AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
- SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
- };
- // note to the lost user:
- // * for a dot product use: v1.dot(v2)
- // * for a coeff-wise product use: v1.cwise()*v2
- EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
- INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
- EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
- INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
- EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
- return typename ProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
-}
-
-/** replaces \c *this by \c *this * \a other.
- *
- * \returns a reference to \c *this
- */
-template<typename Derived>
-template<typename OtherDerived>
-inline Derived &
-MatrixBase<Derived>::operator*=(const MatrixBase<OtherDerived> &other)
-{
- return derived() = derived() * other.derived();
-}
-
-/***************************************************************************
-* Normal product .coeff() implementation (with meta-unrolling)
-***************************************************************************/
-
-/**************************************
-*** Scalar path - no vectorization ***
-**************************************/
-
-template<int Index, typename Lhs, typename Rhs, typename RetScalar>
-struct ei_product_coeff_impl<NoVectorization, Index, Lhs, Rhs, RetScalar>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
- {
- ei_product_coeff_impl<NoVectorization, Index-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
- res += lhs.coeff(row, Index) * rhs.coeff(Index, col);
- }
-};
-
-template<typename Lhs, typename Rhs, typename RetScalar>
-struct ei_product_coeff_impl<NoVectorization, 0, Lhs, Rhs, RetScalar>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
- {
- res = lhs.coeff(row, 0) * rhs.coeff(0, col);
- }
-};
-
-template<typename Lhs, typename Rhs, typename RetScalar>
-struct ei_product_coeff_impl<NoVectorization, Dynamic, Lhs, Rhs, RetScalar>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar& res)
- {
- ei_assert(lhs.cols()>0 && "you are using a non initialized matrix");
- res = lhs.coeff(row, 0) * rhs.coeff(0, col);
- for(int i = 1; i < lhs.cols(); ++i)
- res += lhs.coeff(row, i) * rhs.coeff(i, col);
- }
-};
-
-// prevent buggy user code from causing an infinite recursion
-template<typename Lhs, typename Rhs, typename RetScalar>
-struct ei_product_coeff_impl<NoVectorization, -1, Lhs, Rhs, RetScalar>
-{
- EIGEN_STRONG_INLINE static void run(int, int, const Lhs&, const Rhs&, RetScalar&) {}
-};
-
-/*******************************************
-*** Scalar path with inner vectorization ***
-*******************************************/
-
-template<int Index, typename Lhs, typename Rhs, typename PacketScalar>
-struct ei_product_coeff_vectorized_unroller
-{
- enum { PacketSize = ei_packet_traits<typename Lhs::Scalar>::size };
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
- {
- ei_product_coeff_vectorized_unroller<Index-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres);
- pres = ei_padd(pres, ei_pmul( lhs.template packet<Aligned>(row, Index) , rhs.template packet<Aligned>(Index, col) ));
- }
-};
-
-template<typename Lhs, typename Rhs, typename PacketScalar>
-struct ei_product_coeff_vectorized_unroller<0, Lhs, Rhs, PacketScalar>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
- {
- pres = ei_pmul(lhs.template packet<Aligned>(row, 0) , rhs.template packet<Aligned>(0, col));
- }
-};
-
-template<int Index, typename Lhs, typename Rhs, typename RetScalar>
-struct ei_product_coeff_impl<InnerVectorization, Index, Lhs, Rhs, RetScalar>
-{
- typedef typename Lhs::PacketScalar PacketScalar;
- enum { PacketSize = ei_packet_traits<typename Lhs::Scalar>::size };
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
- {
- PacketScalar pres;
- ei_product_coeff_vectorized_unroller<Index+1-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres);
- ei_product_coeff_impl<NoVectorization,Index,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
- res = ei_predux(pres);
- }
-};
-
-template<typename Lhs, typename Rhs, int LhsRows = Lhs::RowsAtCompileTime, int RhsCols = Rhs::ColsAtCompileTime>
-struct ei_product_coeff_vectorized_dyn_selector
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
- {
- res = ei_dot_impl<
- Block<Lhs, 1, ei_traits<Lhs>::ColsAtCompileTime>,
- Block<Rhs, ei_traits<Rhs>::RowsAtCompileTime, 1>,
- LinearVectorization, NoUnrolling>::run(lhs.row(row), rhs.col(col));
- }
-};
-
-// NOTE the 3 following specializations are because taking .col(0) on a vector is a bit slower
-// NOTE maybe they are now useless since we have a specialization for Block<Matrix>
-template<typename Lhs, typename Rhs, int RhsCols>
-struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,RhsCols>
-{
- EIGEN_STRONG_INLINE static void run(int /*row*/, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
- {
- res = ei_dot_impl<
- Lhs,
- Block<Rhs, ei_traits<Rhs>::RowsAtCompileTime, 1>,
- LinearVectorization, NoUnrolling>::run(lhs, rhs.col(col));
- }
-};
-
-template<typename Lhs, typename Rhs, int LhsRows>
-struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,LhsRows,1>
-{
- EIGEN_STRONG_INLINE static void run(int row, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
- {
- res = ei_dot_impl<
- Block<Lhs, 1, ei_traits<Lhs>::ColsAtCompileTime>,
- Rhs,
- LinearVectorization, NoUnrolling>::run(lhs.row(row), rhs);
- }
-};
-
-template<typename Lhs, typename Rhs>
-struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,1>
-{
- EIGEN_STRONG_INLINE static void run(int /*row*/, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
- {
- res = ei_dot_impl<
- Lhs,
- Rhs,
- LinearVectorization, NoUnrolling>::run(lhs, rhs);
- }
-};
-
-template<typename Lhs, typename Rhs, typename RetScalar>
-struct ei_product_coeff_impl<InnerVectorization, Dynamic, Lhs, Rhs, RetScalar>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
- {
- ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs>::run(row, col, lhs, rhs, res);
- }
-};
-
-/*******************
-*** Packet path ***
-*******************/
-
-template<int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
-struct ei_product_packet_impl<RowMajor, Index, Lhs, Rhs, PacketScalar, LoadMode>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
- {
- ei_product_packet_impl<RowMajor, Index-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res);
- res = ei_pmadd(ei_pset1(lhs.coeff(row, Index)), rhs.template packet<LoadMode>(Index, col), res);
- }
-};
-
-template<int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
-struct ei_product_packet_impl<ColMajor, Index, Lhs, Rhs, PacketScalar, LoadMode>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
- {
- ei_product_packet_impl<ColMajor, Index-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res);
- res = ei_pmadd(lhs.template packet<LoadMode>(row, Index), ei_pset1(rhs.coeff(Index, col)), res);
- }
-};
-
-template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
-struct ei_product_packet_impl<RowMajor, 0, Lhs, Rhs, PacketScalar, LoadMode>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
- {
- res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
- }
-};
-
-template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
-struct ei_product_packet_impl<ColMajor, 0, Lhs, Rhs, PacketScalar, LoadMode>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
- {
- res = ei_pmul(lhs.template packet<LoadMode>(row, 0), ei_pset1(rhs.coeff(0, col)));
- }
-};
-
-template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
-struct ei_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMode>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res)
- {
- ei_assert(lhs.cols()>0 && "you are using a non initialized matrix");
- res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
- for(int i = 1; i < lhs.cols(); ++i)
- res = ei_pmadd(ei_pset1(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
- }
-};
-
-template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
-struct ei_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMode>
-{
- EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res)
- {
- ei_assert(lhs.cols()>0 && "you are using a non initialized matrix");
- res = ei_pmul(lhs.template packet<LoadMode>(row, 0), ei_pset1(rhs.coeff(0, col)));
- for(int i = 1; i < lhs.cols(); ++i)
- res = ei_pmadd(lhs.template packet<LoadMode>(row, i), ei_pset1(rhs.coeff(i, col)), res);
- }
-};
-
-/***************************************************************************
-* Cache friendly product callers and specific nested evaluation strategies
-***************************************************************************/
-
-template<typename Scalar, typename RhsType>
-static void ei_cache_friendly_product_colmajor_times_vector(
- int size, const Scalar* lhs, int lhsStride, const RhsType& rhs, Scalar* res);
-
-template<typename Scalar, typename ResType>
-static void ei_cache_friendly_product_rowmajor_times_vector(
- const Scalar* lhs, int lhsStride, const Scalar* rhs, int rhsSize, ResType& res);
-
-template<typename ProductType,
- int LhsRows = ei_traits<ProductType>::RowsAtCompileTime,
- int LhsOrder = int(ei_traits<ProductType>::LhsFlags)&RowMajorBit ? RowMajor : ColMajor,
- int LhsHasDirectAccess = int(ei_traits<ProductType>::LhsFlags)&DirectAccessBit? HasDirectAccess : NoDirectAccess,
- int RhsCols = ei_traits<ProductType>::ColsAtCompileTime,
- int RhsOrder = int(ei_traits<ProductType>::RhsFlags)&RowMajorBit ? RowMajor : ColMajor,
- int RhsHasDirectAccess = int(ei_traits<ProductType>::RhsFlags)&DirectAccessBit? HasDirectAccess : NoDirectAccess>
-struct ei_cache_friendly_product_selector
-{
- template<typename DestDerived>
- inline static void run(DestDerived& res, const ProductType& product)
- {
- product._cacheFriendlyEvalAndAdd(res);
- }
-};
-
-// optimized colmajor * vector path
-template<typename ProductType, int LhsRows, int RhsOrder, int RhsAccess>
-struct ei_cache_friendly_product_selector<ProductType,LhsRows,ColMajor,NoDirectAccess,1,RhsOrder,RhsAccess>
-{
- template<typename DestDerived>
- inline static void run(DestDerived& res, const ProductType& product)
- {
- const int size = product.rhs().rows();
- for (int k=0; k<size; ++k)
- res += product.rhs().coeff(k) * product.lhs().col(k);
- }
-};
-
-// optimized cache friendly colmajor * vector path for matrix with direct access flag
-// NOTE this path could also be enabled for expressions if we add runtime align queries
-template<typename ProductType, int LhsRows, int RhsOrder, int RhsAccess>
-struct ei_cache_friendly_product_selector<ProductType,LhsRows,ColMajor,HasDirectAccess,1,RhsOrder,RhsAccess>
-{
- typedef typename ProductType::Scalar Scalar;
-
- template<typename DestDerived>
- inline static void run(DestDerived& res, const ProductType& product)
- {
- enum {
- EvalToRes = (ei_packet_traits<Scalar>::size==1)
- ||((DestDerived::Flags&ActualPacketAccessBit) && (!(DestDerived::Flags & RowMajorBit))) };
- Scalar* EIGEN_RESTRICT _res;
- if (EvalToRes)
- _res = &res.coeffRef(0);
- else
- {
- _res = ei_aligned_stack_new(Scalar,res.size());
- Map<Matrix<Scalar,DestDerived::RowsAtCompileTime,1> >(_res, res.size()) = res;
- }
- ei_cache_friendly_product_colmajor_times_vector(res.size(),
- &product.lhs().const_cast_derived().coeffRef(0,0), product.lhs().stride(),
- product.rhs(), _res);
-
- if (!EvalToRes)
- {
- res = Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size());
- ei_aligned_stack_delete(Scalar, _res, res.size());
- }
- }
-};
-
-// optimized vector * rowmajor path
-template<typename ProductType, int LhsOrder, int LhsAccess, int RhsCols>
-struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCols,RowMajor,NoDirectAccess>
-{
- template<typename DestDerived>
- inline static void run(DestDerived& res, const ProductType& product)
- {
- const int cols = product.lhs().cols();
- for (int j=0; j<cols; ++j)
- res += product.lhs().coeff(j) * product.rhs().row(j);
- }
-};
-
-// optimized cache friendly vector * rowmajor path for matrix with direct access flag
-// NOTE this path coul also be enabled for expressions if we add runtime align queries
-template<typename ProductType, int LhsOrder, int LhsAccess, int RhsCols>
-struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCols,RowMajor,HasDirectAccess>
-{
- typedef typename ProductType::Scalar Scalar;
-
- template<typename DestDerived>
- inline static void run(DestDerived& res, const ProductType& product)
- {
- enum {
- EvalToRes = (ei_packet_traits<Scalar>::size==1)
- ||((DestDerived::Flags & ActualPacketAccessBit) && (DestDerived::Flags & RowMajorBit)) };
- Scalar* EIGEN_RESTRICT _res;
- if (EvalToRes)
- _res = &res.coeffRef(0);
- else
- {
- _res = ei_aligned_stack_new(Scalar, res.size());
- Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size()) = res;
- }
- ei_cache_friendly_product_colmajor_times_vector(res.size(),
- &product.rhs().const_cast_derived().coeffRef(0,0), product.rhs().stride(),
- product.lhs().transpose(), _res);
-
- if (!EvalToRes)
- {
- res = Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size());
- ei_aligned_stack_delete(Scalar, _res, res.size());
- }
- }
-};
-
-// optimized rowmajor - vector product
-template<typename ProductType, int LhsRows, int RhsOrder, int RhsAccess>
-struct ei_cache_friendly_product_selector<ProductType,LhsRows,RowMajor,HasDirectAccess,1,RhsOrder,RhsAccess>
-{
- typedef typename ProductType::Scalar Scalar;
- typedef typename ei_traits<ProductType>::_RhsNested Rhs;
- enum {
- UseRhsDirectly = ((ei_packet_traits<Scalar>::size==1) || (Rhs::Flags&ActualPacketAccessBit))
- && (!(Rhs::Flags & RowMajorBit)) };
-
- template<typename DestDerived>
- inline static void run(DestDerived& res, const ProductType& product)
- {
- Scalar* EIGEN_RESTRICT _rhs;
- if (UseRhsDirectly)
- _rhs = &product.rhs().const_cast_derived().coeffRef(0);
- else
- {
- _rhs = ei_aligned_stack_new(Scalar, product.rhs().size());
- Map<Matrix<Scalar,Rhs::SizeAtCompileTime,1> >(_rhs, product.rhs().size()) = product.rhs();
- }
- ei_cache_friendly_product_rowmajor_times_vector(&product.lhs().const_cast_derived().coeffRef(0,0), product.lhs().stride(),
- _rhs, product.rhs().size(), res);
-
- if (!UseRhsDirectly) ei_aligned_stack_delete(Scalar, _rhs, product.rhs().size());
- }
-};
-
-// optimized vector - colmajor product
-template<typename ProductType, int LhsOrder, int LhsAccess, int RhsCols>
-struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCols,ColMajor,HasDirectAccess>
-{
- typedef typename ProductType::Scalar Scalar;
- typedef typename ei_traits<ProductType>::_LhsNested Lhs;
- enum {
- UseLhsDirectly = ((ei_packet_traits<Scalar>::size==1) || (Lhs::Flags&ActualPacketAccessBit))
- && (Lhs::Flags & RowMajorBit) };
-
- template<typename DestDerived>
- inline static void run(DestDerived& res, const ProductType& product)
- {
- Scalar* EIGEN_RESTRICT _lhs;
- if (UseLhsDirectly)
- _lhs = &product.lhs().const_cast_derived().coeffRef(0);
- else
- {
- _lhs = ei_aligned_stack_new(Scalar, product.lhs().size());
- Map<Matrix<Scalar,Lhs::SizeAtCompileTime,1> >(_lhs, product.lhs().size()) = product.lhs();
- }
- ei_cache_friendly_product_rowmajor_times_vector(&product.rhs().const_cast_derived().coeffRef(0,0), product.rhs().stride(),
- _lhs, product.lhs().size(), res);
-
- if(!UseLhsDirectly) ei_aligned_stack_delete(Scalar, _lhs, product.lhs().size());
- }
-};
-
-// discard this case which has to be handled by the default path
-// (we keep it to be sure to hit a compilation error if this is not the case)
-template<typename ProductType, int LhsRows, int RhsOrder, int RhsAccess>
-struct ei_cache_friendly_product_selector<ProductType,LhsRows,RowMajor,NoDirectAccess,1,RhsOrder,RhsAccess>
-{};
-
-// discard this case which has to be handled by the default path
-// (we keep it to be sure to hit a compilation error if this is not the case)
-template<typename ProductType, int LhsOrder, int LhsAccess, int RhsCols>
-struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCols,ColMajor,NoDirectAccess>
-{};
-
-
-/** \internal */
-template<typename Derived>
-template<typename Lhs,typename Rhs>
-inline Derived&
-MatrixBase<Derived>::operator+=(const Flagged<Product<Lhs,Rhs,CacheFriendlyProduct>, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other)
-{
- if (other._expression()._useCacheFriendlyProduct())
- ei_cache_friendly_product_selector<Product<Lhs,Rhs,CacheFriendlyProduct> >::run(const_cast_derived(), other._expression());
- else
- lazyAssign(derived() + other._expression());
- return derived();
-}
-
-template<typename Derived>
-template<typename Lhs, typename Rhs>
-inline Derived& MatrixBase<Derived>::lazyAssign(const Product<Lhs,Rhs,CacheFriendlyProduct>& product)
-{
- if (product._useCacheFriendlyProduct())
- {
- setZero();
- ei_cache_friendly_product_selector<Product<Lhs,Rhs,CacheFriendlyProduct> >::run(const_cast_derived(), product);
- }
- else
- {
- lazyAssign<Product<Lhs,Rhs,CacheFriendlyProduct> >(product);
- }
- return derived();
-}
-
-template<typename T> struct ei_product_copy_rhs
-{
- typedef typename ei_meta_if<
- (ei_traits<T>::Flags & RowMajorBit)
- || (!(ei_traits<T>::Flags & DirectAccessBit)),
- typename ei_plain_matrix_type_column_major<T>::type,
- const T&
- >::ret type;
-};
-
-template<typename T> struct ei_product_copy_lhs
-{
- typedef typename ei_meta_if<
- (!(int(ei_traits<T>::Flags) & DirectAccessBit)),
- typename ei_plain_matrix_type<T>::type,
- const T&
- >::ret type;
-};
-
-template<typename Lhs, typename Rhs, int ProductMode>
-template<typename DestDerived>
-inline void Product<Lhs,Rhs,ProductMode>::_cacheFriendlyEvalAndAdd(DestDerived& res) const
-{
- typedef typename ei_product_copy_lhs<_LhsNested>::type LhsCopy;
- typedef typename ei_unref<LhsCopy>::type _LhsCopy;
- typedef typename ei_product_copy_rhs<_RhsNested>::type RhsCopy;
- typedef typename ei_unref<RhsCopy>::type _RhsCopy;
- LhsCopy lhs(m_lhs);
- RhsCopy rhs(m_rhs);
- ei_cache_friendly_product<Scalar>(
- rows(), cols(), lhs.cols(),
- _LhsCopy::Flags&RowMajorBit, (const Scalar*)&(lhs.const_cast_derived().coeffRef(0,0)), lhs.stride(),
- _RhsCopy::Flags&RowMajorBit, (const Scalar*)&(rhs.const_cast_derived().coeffRef(0,0)), rhs.stride(),
- DestDerived::Flags&RowMajorBit, (Scalar*)&(res.coeffRef(0,0)), res.stride()
- );
-}
-
-#endif // EIGEN_PRODUCT_H
diff --git a/extern/Eigen2/Eigen/src/Core/Redux.h b/extern/Eigen2/Eigen/src/Core/Redux.h
deleted file mode 100644
index 734ef1929a4..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Redux.h
+++ /dev/null
@@ -1,117 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_REDUX_H
-#define EIGEN_REDUX_H
-
-template<typename BinaryOp, typename Derived, int Start, int Length>
-struct ei_redux_impl
-{
- enum {
- HalfLength = Length/2
- };
-
- typedef typename ei_result_of<BinaryOp(typename Derived::Scalar)>::type Scalar;
-
- static Scalar run(const Derived &mat, const BinaryOp& func)
- {
- return func(
- ei_redux_impl<BinaryOp, Derived, Start, HalfLength>::run(mat, func),
- ei_redux_impl<BinaryOp, Derived, Start+HalfLength, Length - HalfLength>::run(mat, func));
- }
-};
-
-template<typename BinaryOp, typename Derived, int Start>
-struct ei_redux_impl<BinaryOp, Derived, Start, 1>
-{
- enum {
- col = Start / Derived::RowsAtCompileTime,
- row = Start % Derived::RowsAtCompileTime
- };
-
- typedef typename ei_result_of<BinaryOp(typename Derived::Scalar)>::type Scalar;
-
- static Scalar run(const Derived &mat, const BinaryOp &)
- {
- return mat.coeff(row, col);
- }
-};
-
-template<typename BinaryOp, typename Derived, int Start>
-struct ei_redux_impl<BinaryOp, Derived, Start, Dynamic>
-{
- typedef typename ei_result_of<BinaryOp(typename Derived::Scalar)>::type Scalar;
- static Scalar run(const Derived& mat, const BinaryOp& func)
- {
- ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix");
- Scalar res;
- res = mat.coeff(0,0);
- for(int i = 1; i < mat.rows(); ++i)
- res = func(res, mat.coeff(i, 0));
- for(int j = 1; j < mat.cols(); ++j)
- for(int i = 0; i < mat.rows(); ++i)
- res = func(res, mat.coeff(i, j));
- return res;
- }
-};
-
-/** \returns the result of a full redux operation on the whole matrix or vector using \a func
- *
- * The template parameter \a BinaryOp is the type of the functor \a func which must be
- * an assiociative operator. Both current STL and TR1 functor styles are handled.
- *
- * \sa MatrixBase::sum(), MatrixBase::minCoeff(), MatrixBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
- */
-template<typename Derived>
-template<typename BinaryOp>
-typename ei_result_of<BinaryOp(typename ei_traits<Derived>::Scalar)>::type
-MatrixBase<Derived>::redux(const BinaryOp& func) const
-{
- const bool unroll = SizeAtCompileTime * CoeffReadCost
- + (SizeAtCompileTime-1) * ei_functor_traits<BinaryOp>::Cost
- <= EIGEN_UNROLLING_LIMIT;
- return ei_redux_impl<BinaryOp, Derived, 0, unroll ? int(SizeAtCompileTime) : Dynamic>
- ::run(derived(), func);
-}
-
-/** \returns the minimum of all coefficients of *this
- */
-template<typename Derived>
-inline typename ei_traits<Derived>::Scalar
-MatrixBase<Derived>::minCoeff() const
-{
- return this->redux(Eigen::ei_scalar_min_op<Scalar>());
-}
-
-/** \returns the maximum of all coefficients of *this
- */
-template<typename Derived>
-inline typename ei_traits<Derived>::Scalar
-MatrixBase<Derived>::maxCoeff() const
-{
- return this->redux(Eigen::ei_scalar_max_op<Scalar>());
-}
-
-#endif // EIGEN_REDUX_H
diff --git a/extern/Eigen2/Eigen/src/Core/SolveTriangular.h b/extern/Eigen2/Eigen/src/Core/SolveTriangular.h
deleted file mode 100644
index 12fb0e1d159..00000000000
--- a/extern/Eigen2/Eigen/src/Core/SolveTriangular.h
+++ /dev/null
@@ -1,297 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SOLVETRIANGULAR_H
-#define EIGEN_SOLVETRIANGULAR_H
-
-template<typename XprType> struct ei_is_part { enum {value=false}; };
-template<typename XprType, unsigned int Mode> struct ei_is_part<Part<XprType,Mode> > { enum {value=true}; };
-
-template<typename Lhs, typename Rhs,
- int TriangularPart = (int(Lhs::Flags) & LowerTriangularBit)
- ? LowerTriangular
- : (int(Lhs::Flags) & UpperTriangularBit)
- ? UpperTriangular
- : -1,
- int StorageOrder = ei_is_part<Lhs>::value ? -1 // this is to solve ambiguous specializations
- : int(Lhs::Flags) & (RowMajorBit|SparseBit)
- >
-struct ei_solve_triangular_selector;
-
-// transform a Part xpr to a Flagged xpr
-template<typename Lhs, unsigned int LhsMode, typename Rhs, int UpLo, int StorageOrder>
-struct ei_solve_triangular_selector<Part<Lhs,LhsMode>,Rhs,UpLo,StorageOrder>
-{
- static void run(const Part<Lhs,LhsMode>& lhs, Rhs& other)
- {
- ei_solve_triangular_selector<Flagged<Lhs,LhsMode,0>,Rhs>::run(lhs._expression(), other);
- }
-};
-
-// forward substitution, row-major
-template<typename Lhs, typename Rhs, int UpLo>
-struct ei_solve_triangular_selector<Lhs,Rhs,UpLo,RowMajor|IsDense>
-{
- typedef typename Rhs::Scalar Scalar;
- static void run(const Lhs& lhs, Rhs& other)
- {
- const bool IsLowerTriangular = (UpLo==LowerTriangular);
- const int size = lhs.cols();
- /* We perform the inverse product per block of 4 rows such that we perfectly match
- * our optimized matrix * vector product. blockyStart represents the number of rows
- * we have process first using the non-block version.
- */
- int blockyStart = (std::max(size-5,0)/4)*4;
- if (IsLowerTriangular)
- blockyStart = size - blockyStart;
- else
- blockyStart -= 1;
- for(int c=0 ; c<other.cols() ; ++c)
- {
- // process first rows using the non block version
- if(!(Lhs::Flags & UnitDiagBit))
- {
- if (IsLowerTriangular)
- other.coeffRef(0,c) = other.coeff(0,c)/lhs.coeff(0, 0);
- else
- other.coeffRef(size-1,c) = other.coeff(size-1, c)/lhs.coeff(size-1, size-1);
- }
- for(int i=(IsLowerTriangular ? 1 : size-2); IsLowerTriangular ? i<blockyStart : i>blockyStart; i += (IsLowerTriangular ? 1 : -1) )
- {
- Scalar tmp = other.coeff(i,c)
- - (IsLowerTriangular ? ((lhs.row(i).start(i)) * other.col(c).start(i)).coeff(0,0)
- : ((lhs.row(i).end(size-i-1)) * other.col(c).end(size-i-1)).coeff(0,0));
- if (Lhs::Flags & UnitDiagBit)
- other.coeffRef(i,c) = tmp;
- else
- other.coeffRef(i,c) = tmp/lhs.coeff(i,i);
- }
-
- // now let's process the remaining rows 4 at once
- for(int i=blockyStart; IsLowerTriangular ? i<size : i>0; )
- {
- int startBlock = i;
- int endBlock = startBlock + (IsLowerTriangular ? 4 : -4);
-
- /* Process the i cols times 4 rows block, and keep the result in a temporary vector */
- // FIXME use fixed size block but take care to small fixed size matrices...
- Matrix<Scalar,Dynamic,1> btmp(4);
- if (IsLowerTriangular)
- btmp = lhs.block(startBlock,0,4,i) * other.col(c).start(i);
- else
- btmp = lhs.block(i-3,i+1,4,size-1-i) * other.col(c).end(size-1-i);
-
- /* Let's process the 4x4 sub-matrix as usual.
- * btmp stores the diagonal coefficients used to update the remaining part of the result.
- */
- {
- Scalar tmp = other.coeff(startBlock,c)-btmp.coeff(IsLowerTriangular?0:3);
- if (Lhs::Flags & UnitDiagBit)
- other.coeffRef(i,c) = tmp;
- else
- other.coeffRef(i,c) = tmp/lhs.coeff(i,i);
- }
-
- i += IsLowerTriangular ? 1 : -1;
- for (;IsLowerTriangular ? i<endBlock : i>endBlock; i += IsLowerTriangular ? 1 : -1)
- {
- int remainingSize = IsLowerTriangular ? i-startBlock : startBlock-i;
- Scalar tmp = other.coeff(i,c)
- - btmp.coeff(IsLowerTriangular ? remainingSize : 3-remainingSize)
- - ( lhs.row(i).segment(IsLowerTriangular ? startBlock : i+1, remainingSize)
- * other.col(c).segment(IsLowerTriangular ? startBlock : i+1, remainingSize)).coeff(0,0);
-
- if (Lhs::Flags & UnitDiagBit)
- other.coeffRef(i,c) = tmp;
- else
- other.coeffRef(i,c) = tmp/lhs.coeff(i,i);
- }
- }
- }
- }
-};
-
-// Implements the following configurations:
-// - inv(LowerTriangular, ColMajor) * Column vector
-// - inv(LowerTriangular,UnitDiag,ColMajor) * Column vector
-// - inv(UpperTriangular, ColMajor) * Column vector
-// - inv(UpperTriangular,UnitDiag,ColMajor) * Column vector
-template<typename Lhs, typename Rhs, int UpLo>
-struct ei_solve_triangular_selector<Lhs,Rhs,UpLo,ColMajor|IsDense>
-{
- typedef typename Rhs::Scalar Scalar;
- typedef typename ei_packet_traits<Scalar>::type Packet;
- enum { PacketSize = ei_packet_traits<Scalar>::size };
-
- static void run(const Lhs& lhs, Rhs& other)
- {
- static const bool IsLowerTriangular = (UpLo==LowerTriangular);
- const int size = lhs.cols();
- for(int c=0 ; c<other.cols() ; ++c)
- {
- /* let's perform the inverse product per block of 4 columns such that we perfectly match
- * our optimized matrix * vector product. blockyEnd represents the number of rows
- * we can process using the block version.
- */
- int blockyEnd = (std::max(size-5,0)/4)*4;
- if (!IsLowerTriangular)
- blockyEnd = size-1 - blockyEnd;
- for(int i=IsLowerTriangular ? 0 : size-1; IsLowerTriangular ? i<blockyEnd : i>blockyEnd;)
- {
- /* Let's process the 4x4 sub-matrix as usual.
- * btmp stores the diagonal coefficients used to update the remaining part of the result.
- */
- int startBlock = i;
- int endBlock = startBlock + (IsLowerTriangular ? 4 : -4);
- Matrix<Scalar,4,1> btmp;
- for (;IsLowerTriangular ? i<endBlock : i>endBlock;
- i += IsLowerTriangular ? 1 : -1)
- {
- if(!(Lhs::Flags & UnitDiagBit))
- other.coeffRef(i,c) /= lhs.coeff(i,i);
- int remainingSize = IsLowerTriangular ? endBlock-i-1 : i-endBlock-1;
- if (remainingSize>0)
- other.col(c).segment((IsLowerTriangular ? i : endBlock) + 1, remainingSize) -=
- other.coeffRef(i,c)
- * Block<Lhs,Dynamic,1>(lhs, (IsLowerTriangular ? i : endBlock) + 1, i, remainingSize, 1);
- btmp.coeffRef(IsLowerTriangular ? i-startBlock : remainingSize) = -other.coeffRef(i,c);
- }
-
- /* Now we can efficiently update the remaining part of the result as a matrix * vector product.
- * NOTE in order to reduce both compilation time and binary size, let's directly call
- * the fast product implementation. It is equivalent to the following code:
- * other.col(c).end(size-endBlock) += (lhs.block(endBlock, startBlock, size-endBlock, endBlock-startBlock)
- * * other.col(c).block(startBlock,endBlock-startBlock)).lazy();
- */
- // FIXME this is cool but what about conjugate/adjoint expressions ? do we want to evaluate them ?
- // this is a more general problem though.
- ei_cache_friendly_product_colmajor_times_vector(
- IsLowerTriangular ? size-endBlock : endBlock+1,
- &(lhs.const_cast_derived().coeffRef(IsLowerTriangular ? endBlock : 0, IsLowerTriangular ? startBlock : endBlock+1)),
- lhs.stride(),
- btmp, &(other.coeffRef(IsLowerTriangular ? endBlock : 0, c)));
-// if (IsLowerTriangular)
-// other.col(c).end(size-endBlock) += (lhs.block(endBlock, startBlock, size-endBlock, endBlock-startBlock)
-// * other.col(c).block(startBlock,endBlock-startBlock)).lazy();
-// else
-// other.col(c).end(size-endBlock) += (lhs.block(endBlock, startBlock, size-endBlock, endBlock-startBlock)
-// * other.col(c).block(startBlock,endBlock-startBlock)).lazy();
- }
-
- /* Now we have to process the remaining part as usual */
- int i;
- for(i=blockyEnd; IsLowerTriangular ? i<size-1 : i>0; i += (IsLowerTriangular ? 1 : -1) )
- {
- if(!(Lhs::Flags & UnitDiagBit))
- other.coeffRef(i,c) /= lhs.coeff(i,i);
-
- /* NOTE we cannot use lhs.col(i).end(size-i-1) because Part::coeffRef gets called by .col() to
- * get the address of the start of the row
- */
- if(IsLowerTriangular)
- other.col(c).end(size-i-1) -= other.coeffRef(i,c) * Block<Lhs,Dynamic,1>(lhs, i+1,i, size-i-1,1);
- else
- other.col(c).start(i) -= other.coeffRef(i,c) * Block<Lhs,Dynamic,1>(lhs, 0,i, i, 1);
- }
- if(!(Lhs::Flags & UnitDiagBit))
- other.coeffRef(i,c) /= lhs.coeff(i,i);
- }
- }
-};
-
-/** "in-place" version of MatrixBase::solveTriangular() where the result is written in \a other
- *
- * \nonstableyet
- *
- * The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
- * This function will const_cast it, so constness isn't honored here.
- *
- * See MatrixBase:solveTriangular() for the details.
- */
-template<typename Derived>
-template<typename OtherDerived>
-void MatrixBase<Derived>::solveTriangularInPlace(const MatrixBase<OtherDerived>& _other) const
-{
- MatrixBase<OtherDerived>& other = _other.const_cast_derived();
- ei_assert(derived().cols() == derived().rows());
- ei_assert(derived().cols() == other.rows());
- ei_assert(!(Flags & ZeroDiagBit));
- ei_assert(Flags & (UpperTriangularBit|LowerTriangularBit));
-
- enum { copy = ei_traits<OtherDerived>::Flags & RowMajorBit };
-
- typedef typename ei_meta_if<copy,
- typename ei_plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::ret OtherCopy;
- OtherCopy otherCopy(other.derived());
-
- ei_solve_triangular_selector<Derived, typename ei_unref<OtherCopy>::type>::run(derived(), otherCopy);
-
- if (copy)
- other = otherCopy;
-}
-
-/** \returns the product of the inverse of \c *this with \a other, \a *this being triangular.
- *
- * \nonstableyet
- *
- * This function computes the inverse-matrix matrix product inverse(\c *this) * \a other.
- * The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the
- * diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this
- * is an upper (resp. lower) triangular matrix.
- *
- * It is required that \c *this be marked as either an upper or a lower triangular matrix, which
- * can be done by marked(), and that is automatically the case with expressions such as those returned
- * by extract().
- *
- * \addexample SolveTriangular \label How to solve a triangular system (aka. how to multiply the inverse of a triangular matrix by another one)
- *
- * Example: \include MatrixBase_marked.cpp
- * Output: \verbinclude MatrixBase_marked.out
- *
- * This function is essentially a wrapper to the faster solveTriangularInPlace() function creating
- * a temporary copy of \a other, calling solveTriangularInPlace() on the copy and returning it.
- * Therefore, if \a other is not needed anymore, it is quite faster to call solveTriangularInPlace()
- * instead of solveTriangular().
- *
- * For users coming from BLAS, this function (and more specifically solveTriangularInPlace()) offer
- * all the operations supported by the \c *TRSV and \c *TRSM BLAS routines.
- *
- * \b Tips: to perform a \em "right-inverse-multiply" you can simply transpose the operation, e.g.:
- * \code
- * M * T^1 <=> T.transpose().solveTriangularInPlace(M.transpose());
- * \endcode
- *
- * \sa solveTriangularInPlace(), marked(), extract()
- */
-template<typename Derived>
-template<typename OtherDerived>
-typename ei_plain_matrix_type_column_major<OtherDerived>::type
-MatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
-{
- typename ei_plain_matrix_type_column_major<OtherDerived>::type res(other);
- solveTriangularInPlace(res);
- return res;
-}
-
-#endif // EIGEN_SOLVETRIANGULAR_H
diff --git a/extern/Eigen2/Eigen/src/Core/Sum.h b/extern/Eigen2/Eigen/src/Core/Sum.h
deleted file mode 100644
index 6d7e9959fa5..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Sum.h
+++ /dev/null
@@ -1,271 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SUM_H
-#define EIGEN_SUM_H
-
-/***************************************************************************
-* Part 1 : the logic deciding a strategy for vectorization and unrolling
-***************************************************************************/
-
-template<typename Derived>
-struct ei_sum_traits
-{
-private:
- enum {
- PacketSize = ei_packet_traits<typename Derived::Scalar>::size
- };
-
-public:
- enum {
- Vectorization = (int(Derived::Flags)&ActualPacketAccessBit)
- && (int(Derived::Flags)&LinearAccessBit)
- ? LinearVectorization
- : NoVectorization
- };
-
-private:
- enum {
- Cost = Derived::SizeAtCompileTime * Derived::CoeffReadCost
- + (Derived::SizeAtCompileTime-1) * NumTraits<typename Derived::Scalar>::AddCost,
- UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Vectorization) == int(NoVectorization) ? 1 : int(PacketSize))
- };
-
-public:
- enum {
- Unrolling = Cost <= UnrollingLimit
- ? CompleteUnrolling
- : NoUnrolling
- };
-};
-
-/***************************************************************************
-* Part 2 : unrollers
-***************************************************************************/
-
-/*** no vectorization ***/
-
-template<typename Derived, int Start, int Length>
-struct ei_sum_novec_unroller
-{
- enum {
- HalfLength = Length/2
- };
-
- typedef typename Derived::Scalar Scalar;
-
- inline static Scalar run(const Derived &mat)
- {
- return ei_sum_novec_unroller<Derived, Start, HalfLength>::run(mat)
- + ei_sum_novec_unroller<Derived, Start+HalfLength, Length-HalfLength>::run(mat);
- }
-};
-
-template<typename Derived, int Start>
-struct ei_sum_novec_unroller<Derived, Start, 1>
-{
- enum {
- col = Start / Derived::RowsAtCompileTime,
- row = Start % Derived::RowsAtCompileTime
- };
-
- typedef typename Derived::Scalar Scalar;
-
- inline static Scalar run(const Derived &mat)
- {
- return mat.coeff(row, col);
- }
-};
-
-/*** vectorization ***/
-
-template<typename Derived, int Start, int Length>
-struct ei_sum_vec_unroller
-{
- enum {
- PacketSize = ei_packet_traits<typename Derived::Scalar>::size,
- HalfLength = Length/2
- };
-
- typedef typename Derived::Scalar Scalar;
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
-
- inline static PacketScalar run(const Derived &mat)
- {
- return ei_padd(
- ei_sum_vec_unroller<Derived, Start, HalfLength>::run(mat),
- ei_sum_vec_unroller<Derived, Start+HalfLength, Length-HalfLength>::run(mat) );
- }
-};
-
-template<typename Derived, int Start>
-struct ei_sum_vec_unroller<Derived, Start, 1>
-{
- enum {
- index = Start * ei_packet_traits<typename Derived::Scalar>::size,
- row = int(Derived::Flags)&RowMajorBit
- ? index / int(Derived::ColsAtCompileTime)
- : index % Derived::RowsAtCompileTime,
- col = int(Derived::Flags)&RowMajorBit
- ? index % int(Derived::ColsAtCompileTime)
- : index / Derived::RowsAtCompileTime,
- alignment = (Derived::Flags & AlignedBit) ? Aligned : Unaligned
- };
-
- typedef typename Derived::Scalar Scalar;
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
-
- inline static PacketScalar run(const Derived &mat)
- {
- return mat.template packet<alignment>(row, col);
- }
-};
-
-/***************************************************************************
-* Part 3 : implementation of all cases
-***************************************************************************/
-
-template<typename Derived,
- int Vectorization = ei_sum_traits<Derived>::Vectorization,
- int Unrolling = ei_sum_traits<Derived>::Unrolling
->
-struct ei_sum_impl;
-
-template<typename Derived>
-struct ei_sum_impl<Derived, NoVectorization, NoUnrolling>
-{
- typedef typename Derived::Scalar Scalar;
- static Scalar run(const Derived& mat)
- {
- ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix");
- Scalar res;
- res = mat.coeff(0, 0);
- for(int i = 1; i < mat.rows(); ++i)
- res += mat.coeff(i, 0);
- for(int j = 1; j < mat.cols(); ++j)
- for(int i = 0; i < mat.rows(); ++i)
- res += mat.coeff(i, j);
- return res;
- }
-};
-
-template<typename Derived>
-struct ei_sum_impl<Derived, NoVectorization, CompleteUnrolling>
- : public ei_sum_novec_unroller<Derived, 0, Derived::SizeAtCompileTime>
-{};
-
-template<typename Derived>
-struct ei_sum_impl<Derived, LinearVectorization, NoUnrolling>
-{
- typedef typename Derived::Scalar Scalar;
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
-
- static Scalar run(const Derived& mat)
- {
- const int size = mat.size();
- const int packetSize = ei_packet_traits<Scalar>::size;
- const int alignedStart = (Derived::Flags & AlignedBit)
- || !(Derived::Flags & DirectAccessBit)
- ? 0
- : ei_alignmentOffset(&mat.const_cast_derived().coeffRef(0), size);
- enum {
- alignment = (Derived::Flags & DirectAccessBit) || (Derived::Flags & AlignedBit)
- ? Aligned : Unaligned
- };
- const int alignedSize = ((size-alignedStart)/packetSize)*packetSize;
- const int alignedEnd = alignedStart + alignedSize;
- Scalar res;
-
- if(alignedSize)
- {
- PacketScalar packet_res = mat.template packet<alignment>(alignedStart);
- for(int index = alignedStart + packetSize; index < alignedEnd; index += packetSize)
- packet_res = ei_padd(packet_res, mat.template packet<alignment>(index));
- res = ei_predux(packet_res);
- }
- else // too small to vectorize anything.
- // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
- {
- res = Scalar(0);
- }
-
- for(int index = 0; index < alignedStart; ++index)
- res += mat.coeff(index);
-
- for(int index = alignedEnd; index < size; ++index)
- res += mat.coeff(index);
-
- return res;
- }
-};
-
-template<typename Derived>
-struct ei_sum_impl<Derived, LinearVectorization, CompleteUnrolling>
-{
- typedef typename Derived::Scalar Scalar;
- typedef typename ei_packet_traits<Scalar>::type PacketScalar;
- enum {
- PacketSize = ei_packet_traits<Scalar>::size,
- Size = Derived::SizeAtCompileTime,
- VectorizationSize = (Size / PacketSize) * PacketSize
- };
- static Scalar run(const Derived& mat)
- {
- Scalar res = ei_predux(ei_sum_vec_unroller<Derived, 0, Size / PacketSize>::run(mat));
- if (VectorizationSize != Size)
- res += ei_sum_novec_unroller<Derived, VectorizationSize, Size-VectorizationSize>::run(mat);
- return res;
- }
-};
-
-/***************************************************************************
-* Part 4 : implementation of MatrixBase methods
-***************************************************************************/
-
-/** \returns the sum of all coefficients of *this
- *
- * \sa trace()
- */
-template<typename Derived>
-inline typename ei_traits<Derived>::Scalar
-MatrixBase<Derived>::sum() const
-{
- return ei_sum_impl<Derived>::run(derived());
-}
-
-/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal.
- *
- * \c *this can be any matrix, not necessarily square.
- *
- * \sa diagonal(), sum()
- */
-template<typename Derived>
-inline typename ei_traits<Derived>::Scalar
-MatrixBase<Derived>::trace() const
-{
- return diagonal().sum();
-}
-
-#endif // EIGEN_SUM_H
diff --git a/extern/Eigen2/Eigen/src/Core/Transpose.h b/extern/Eigen2/Eigen/src/Core/Transpose.h
deleted file mode 100644
index 870edfe320b..00000000000
--- a/extern/Eigen2/Eigen/src/Core/Transpose.h
+++ /dev/null
@@ -1,228 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_TRANSPOSE_H
-#define EIGEN_TRANSPOSE_H
-
-/** \class Transpose
- *
- * \brief Expression of the transpose of a matrix
- *
- * \param MatrixType the type of the object of which we are taking the transpose
- *
- * This class represents an expression of the transpose of a matrix.
- * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint()
- * and most of the time this is the only way it is used.
- *
- * \sa MatrixBase::transpose(), MatrixBase::adjoint()
- */
-template<typename MatrixType>
-struct ei_traits<Transpose<MatrixType> >
-{
- typedef typename MatrixType::Scalar Scalar;
- typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
- typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
- enum {
- RowsAtCompileTime = MatrixType::ColsAtCompileTime,
- ColsAtCompileTime = MatrixType::RowsAtCompileTime,
- MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime,
- MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
- Flags = ((int(_MatrixTypeNested::Flags) ^ RowMajorBit)
- & ~(LowerTriangularBit | UpperTriangularBit))
- | (int(_MatrixTypeNested::Flags)&UpperTriangularBit ? LowerTriangularBit : 0)
- | (int(_MatrixTypeNested::Flags)&LowerTriangularBit ? UpperTriangularBit : 0),
- CoeffReadCost = _MatrixTypeNested::CoeffReadCost
- };
-};
-
-template<typename MatrixType> class Transpose
- : public MatrixBase<Transpose<MatrixType> >
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
-
- inline Transpose(const MatrixType& matrix) : m_matrix(matrix) {}
-
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
-
- inline int rows() const { return m_matrix.cols(); }
- inline int cols() const { return m_matrix.rows(); }
- inline int nonZeros() const { return m_matrix.nonZeros(); }
- inline int stride(void) const { return m_matrix.stride(); }
-
- inline Scalar& coeffRef(int row, int col)
- {
- return m_matrix.const_cast_derived().coeffRef(col, row);
- }
-
- inline const Scalar coeff(int row, int col) const
- {
- return m_matrix.coeff(col, row);
- }
-
- inline const Scalar coeff(int index) const
- {
- return m_matrix.coeff(index);
- }
-
- inline Scalar& coeffRef(int index)
- {
- return m_matrix.const_cast_derived().coeffRef(index);
- }
-
- template<int LoadMode>
- inline const PacketScalar packet(int row, int col) const
- {
- return m_matrix.template packet<LoadMode>(col, row);
- }
-
- template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
- {
- m_matrix.const_cast_derived().template writePacket<LoadMode>(col, row, x);
- }
-
- template<int LoadMode>
- inline const PacketScalar packet(int index) const
- {
- return m_matrix.template packet<LoadMode>(index);
- }
-
- template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
- {
- m_matrix.const_cast_derived().template writePacket<LoadMode>(index, x);
- }
-
- protected:
- const typename MatrixType::Nested m_matrix;
-};
-
-/** \returns an expression of the transpose of *this.
- *
- * Example: \include MatrixBase_transpose.cpp
- * Output: \verbinclude MatrixBase_transpose.out
- *
- * \warning If you want to replace a matrix by its own transpose, do \b NOT do this:
- * \code
- * m = m.transpose(); // bug!!! caused by aliasing effect
- * \endcode
- * Instead, use the transposeInPlace() method:
- * \code
- * m.transposeInPlace();
- * \endcode
- * which gives Eigen good opportunities for optimization, or alternatively you can also do:
- * \code
- * m = m.transpose().eval();
- * \endcode
- *
- * \sa transposeInPlace(), adjoint() */
-template<typename Derived>
-inline Transpose<Derived>
-MatrixBase<Derived>::transpose()
-{
- return derived();
-}
-
-/** This is the const version of transpose().
- *
- * Make sure you read the warning for transpose() !
- *
- * \sa transposeInPlace(), adjoint() */
-template<typename Derived>
-inline const Transpose<Derived>
-MatrixBase<Derived>::transpose() const
-{
- return derived();
-}
-
-/** \returns an expression of the adjoint (i.e. conjugate transpose) of *this.
- *
- * Example: \include MatrixBase_adjoint.cpp
- * Output: \verbinclude MatrixBase_adjoint.out
- *
- * \warning If you want to replace a matrix by its own adjoint, do \b NOT do this:
- * \code
- * m = m.adjoint(); // bug!!! caused by aliasing effect
- * \endcode
- * Instead, do:
- * \code
- * m = m.adjoint().eval();
- * \endcode
- *
- * \sa transpose(), conjugate(), class Transpose, class ei_scalar_conjugate_op */
-template<typename Derived>
-inline const typename MatrixBase<Derived>::AdjointReturnType
-MatrixBase<Derived>::adjoint() const
-{
- return conjugate().nestByValue();
-}
-
-/***************************************************************************
-* "in place" transpose implementation
-***************************************************************************/
-
-template<typename MatrixType,
- bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic>
-struct ei_inplace_transpose_selector;
-
-template<typename MatrixType>
-struct ei_inplace_transpose_selector<MatrixType,true> { // square matrix
- static void run(MatrixType& m) {
- m.template part<StrictlyUpperTriangular>().swap(m.transpose());
- }
-};
-
-template<typename MatrixType>
-struct ei_inplace_transpose_selector<MatrixType,false> { // non square matrix
- static void run(MatrixType& m) {
- if (m.rows()==m.cols())
- m.template part<StrictlyUpperTriangular>().swap(m.transpose());
- else
- m = m.transpose().eval();
- }
-};
-
-/** This is the "in place" version of transpose: it transposes \c *this.
- *
- * In most cases it is probably better to simply use the transposed expression
- * of a matrix. However, when transposing the matrix data itself is really needed,
- * then this "in-place" version is probably the right choice because it provides
- * the following additional features:
- * - less error prone: doing the same operation with .transpose() requires special care:
- * \code m = m.transpose().eval(); \endcode
- * - no temporary object is created (currently only for squared matrices)
- * - it allows future optimizations (cache friendliness, etc.)
- *
- * \note if the matrix is not square, then \c *this must be a resizable matrix.
- *
- * \sa transpose(), adjoint() */
-template<typename Derived>
-inline void MatrixBase<Derived>::transposeInPlace()
-{
- ei_inplace_transpose_selector<Derived>::run(derived());
-}
-
-#endif // EIGEN_TRANSPOSE_H
diff --git a/extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h b/extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h
deleted file mode 100644
index 4de3b5e2e0b..00000000000
--- a/extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h
+++ /dev/null
@@ -1,354 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Konstantinos Margaritis <markos@codex.gr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_PACKET_MATH_ALTIVEC_H
-#define EIGEN_PACKET_MATH_ALTIVEC_H
-
-#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4
-#endif
-
-typedef __vector float v4f;
-typedef __vector int v4i;
-typedef __vector unsigned int v4ui;
-typedef __vector __bool int v4bi;
-
-// We don't want to write the same code all the time, but we need to reuse the constants
-// and it doesn't really work to declare them global, so we define macros instead
-
-#define USE_CONST_v0i const v4i v0i = vec_splat_s32(0)
-#define USE_CONST_v1i const v4i v1i = vec_splat_s32(1)
-#define USE_CONST_v16i_ const v4i v16i_ = vec_splat_s32(-16)
-#define USE_CONST_v0f USE_CONST_v0i; const v4f v0f = (v4f) v0i
-#define USE_CONST_v1f USE_CONST_v1i; const v4f v1f = vec_ctf(v1i, 0)
-#define USE_CONST_v1i_ const v4ui v1i_ = vec_splat_u32(-1)
-#define USE_CONST_v0f_ USE_CONST_v1i_; const v4f v0f_ = (v4f) vec_sl(v1i_, v1i_)
-
-template<> struct ei_packet_traits<float> { typedef v4f type; enum {size=4}; };
-template<> struct ei_packet_traits<int> { typedef v4i type; enum {size=4}; };
-
-template<> struct ei_unpacket_traits<v4f> { typedef float type; enum {size=4}; };
-template<> struct ei_unpacket_traits<v4i> { typedef int type; enum {size=4}; };
-
-inline std::ostream & operator <<(std::ostream & s, const v4f & v)
-{
- union {
- v4f v;
- float n[4];
- } vt;
- vt.v = v;
- s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
- return s;
-}
-
-inline std::ostream & operator <<(std::ostream & s, const v4i & v)
-{
- union {
- v4i v;
- int n[4];
- } vt;
- vt.v = v;
- s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
- return s;
-}
-
-inline std::ostream & operator <<(std::ostream & s, const v4ui & v)
-{
- union {
- v4ui v;
- unsigned int n[4];
- } vt;
- vt.v = v;
- s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
- return s;
-}
-
-inline std::ostream & operator <<(std::ostream & s, const v4bi & v)
-{
- union {
- __vector __bool int v;
- unsigned int n[4];
- } vt;
- vt.v = v;
- s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
- return s;
-}
-
-template<> inline v4f ei_padd(const v4f& a, const v4f& b) { return vec_add(a,b); }
-template<> inline v4i ei_padd(const v4i& a, const v4i& b) { return vec_add(a,b); }
-
-template<> inline v4f ei_psub(const v4f& a, const v4f& b) { return vec_sub(a,b); }
-template<> inline v4i ei_psub(const v4i& a, const v4i& b) { return vec_sub(a,b); }
-
-template<> inline v4f ei_pmul(const v4f& a, const v4f& b) { USE_CONST_v0f; return vec_madd(a,b, v0f); }
-template<> inline v4i ei_pmul(const v4i& a, const v4i& b)
-{
- // Detailed in: http://freevec.org/content/32bit_signed_integer_multiplication_altivec
- //Set up constants, variables
- v4i a1, b1, bswap, low_prod, high_prod, prod, prod_, v1sel;
- USE_CONST_v0i;
- USE_CONST_v1i;
- USE_CONST_v16i_;
-
- // Get the absolute values
- a1 = vec_abs(a);
- b1 = vec_abs(b);
-
- // Get the signs using xor
- v4bi sgn = (v4bi) vec_cmplt(vec_xor(a, b), v0i);
-
- // Do the multiplication for the asbolute values.
- bswap = (v4i) vec_rl((v4ui) b1, (v4ui) v16i_ );
- low_prod = vec_mulo((__vector short)a1, (__vector short)b1);
- high_prod = vec_msum((__vector short)a1, (__vector short)bswap, v0i);
- high_prod = (v4i) vec_sl((v4ui) high_prod, (v4ui) v16i_);
- prod = vec_add( low_prod, high_prod );
-
- // NOR the product and select only the negative elements according to the sign mask
- prod_ = vec_nor(prod, prod);
- prod_ = vec_sel(v0i, prod_, sgn);
-
- // Add 1 to the result to get the negative numbers
- v1sel = vec_sel(v0i, v1i, sgn);
- prod_ = vec_add(prod_, v1sel);
-
- // Merge the results back to the final vector.
- prod = vec_sel(prod, prod_, sgn);
-
- return prod;
-}
-
-template<> inline v4f ei_pdiv(const v4f& a, const v4f& b) {
- v4f t, y_0, y_1, res;
- USE_CONST_v0f;
- USE_CONST_v1f;
-
- // Altivec does not offer a divide instruction, we have to do a reciprocal approximation
- y_0 = vec_re(b);
-
- // Do one Newton-Raphson iteration to get the needed accuracy
- t = vec_nmsub(y_0, b, v1f);
- y_1 = vec_madd(y_0, t, y_0);
-
- res = vec_madd(a, y_1, v0f);
- return res;
-}
-
-template<> inline v4f ei_pmadd(const v4f& a, const v4f& b, const v4f& c) { return vec_madd(a, b, c); }
-
-template<> inline v4f ei_pmin(const v4f& a, const v4f& b) { return vec_min(a,b); }
-template<> inline v4i ei_pmin(const v4i& a, const v4i& b) { return vec_min(a,b); }
-
-template<> inline v4f ei_pmax(const v4f& a, const v4f& b) { return vec_max(a,b); }
-template<> inline v4i ei_pmax(const v4i& a, const v4i& b) { return vec_max(a,b); }
-
-template<> inline v4f ei_pload(const float* from) { return vec_ld(0, from); }
-template<> inline v4i ei_pload(const int* from) { return vec_ld(0, from); }
-
-template<> inline v4f ei_ploadu(const float* from)
-{
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- __vector unsigned char MSQ, LSQ;
- __vector unsigned char mask;
- MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword
- LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword
- mask = vec_lvsl(0, from); // create the permute mask
- return (v4f) vec_perm(MSQ, LSQ, mask); // align the data
-}
-
-template<> inline v4i ei_ploadu(const int* from)
-{
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- __vector unsigned char MSQ, LSQ;
- __vector unsigned char mask;
- MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword
- LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword
- mask = vec_lvsl(0, from); // create the permute mask
- return (v4i) vec_perm(MSQ, LSQ, mask); // align the data
-}
-
-template<> inline v4f ei_pset1(const float& from)
-{
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- float __attribute__(aligned(16)) af[4];
- af[0] = from;
- v4f vc = vec_ld(0, af);
- vc = vec_splat(vc, 0);
- return vc;
-}
-
-template<> inline v4i ei_pset1(const int& from)
-{
- int __attribute__(aligned(16)) ai[4];
- ai[0] = from;
- v4i vc = vec_ld(0, ai);
- vc = vec_splat(vc, 0);
- return vc;
-}
-
-template<> inline void ei_pstore(float* to, const v4f& from) { vec_st(from, 0, to); }
-template<> inline void ei_pstore(int* to, const v4i& from) { vec_st(from, 0, to); }
-
-template<> inline void ei_pstoreu(float* to, const v4f& from)
-{
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- // Warning: not thread safe!
- __vector unsigned char MSQ, LSQ, edges;
- __vector unsigned char edgeAlign, align;
-
- MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword
- LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword
- edgeAlign = vec_lvsl(0, to); // permute map to extract edges
- edges=vec_perm(LSQ,MSQ,edgeAlign); // extract the edges
- align = vec_lvsr( 0, to ); // permute map to misalign data
- MSQ = vec_perm(edges,(__vector unsigned char)from,align); // misalign the data (MSQ)
- LSQ = vec_perm((__vector unsigned char)from,edges,align); // misalign the data (LSQ)
- vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first
- vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
-}
-
-template<> inline void ei_pstoreu(int* to , const v4i& from )
-{
- // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
- // Warning: not thread safe!
- __vector unsigned char MSQ, LSQ, edges;
- __vector unsigned char edgeAlign, align;
-
- MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword
- LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword
- edgeAlign = vec_lvsl(0, to); // permute map to extract edges
- edges=vec_perm(LSQ,MSQ,edgeAlign); // extract the edges
- align = vec_lvsr( 0, to ); // permute map to misalign data
- MSQ = vec_perm(edges,(__vector unsigned char)from,align); // misalign the data (MSQ)
- LSQ = vec_perm((__vector unsigned char)from,edges,align); // misalign the data (LSQ)
- vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first
- vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
-}
-
-template<> inline float ei_pfirst(const v4f& a)
-{
- float __attribute__(aligned(16)) af[4];
- vec_st(a, 0, af);
- return af[0];
-}
-
-template<> inline int ei_pfirst(const v4i& a)
-{
- int __attribute__(aligned(16)) ai[4];
- vec_st(a, 0, ai);
- return ai[0];
-}
-
-inline v4f ei_preduxp(const v4f* vecs)
-{
- v4f v[4], sum[4];
-
- // It's easier and faster to transpose then add as columns
- // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
- // Do the transpose, first set of moves
- v[0] = vec_mergeh(vecs[0], vecs[2]);
- v[1] = vec_mergel(vecs[0], vecs[2]);
- v[2] = vec_mergeh(vecs[1], vecs[3]);
- v[3] = vec_mergel(vecs[1], vecs[3]);
- // Get the resulting vectors
- sum[0] = vec_mergeh(v[0], v[2]);
- sum[1] = vec_mergel(v[0], v[2]);
- sum[2] = vec_mergeh(v[1], v[3]);
- sum[3] = vec_mergel(v[1], v[3]);
-
- // Now do the summation:
- // Lines 0+1
- sum[0] = vec_add(sum[0], sum[1]);
- // Lines 2+3
- sum[1] = vec_add(sum[2], sum[3]);
- // Add the results
- sum[0] = vec_add(sum[0], sum[1]);
- return sum[0];
-}
-
-inline float ei_predux(const v4f& a)
-{
- v4f b, sum;
- b = (v4f)vec_sld(a, a, 8);
- sum = vec_add(a, b);
- b = (v4f)vec_sld(sum, sum, 4);
- sum = vec_add(sum, b);
- return ei_pfirst(sum);
-}
-
-inline v4i ei_preduxp(const v4i* vecs)
-{
- v4i v[4], sum[4];
-
- // It's easier and faster to transpose then add as columns
- // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
- // Do the transpose, first set of moves
- v[0] = vec_mergeh(vecs[0], vecs[2]);
- v[1] = vec_mergel(vecs[0], vecs[2]);
- v[2] = vec_mergeh(vecs[1], vecs[3]);
- v[3] = vec_mergel(vecs[1], vecs[3]);
- // Get the resulting vectors
- sum[0] = vec_mergeh(v[0], v[2]);
- sum[1] = vec_mergel(v[0], v[2]);
- sum[2] = vec_mergeh(v[1], v[3]);
- sum[3] = vec_mergel(v[1], v[3]);
-
- // Now do the summation:
- // Lines 0+1
- sum[0] = vec_add(sum[0], sum[1]);
- // Lines 2+3
- sum[1] = vec_add(sum[2], sum[3]);
- // Add the results
- sum[0] = vec_add(sum[0], sum[1]);
- return sum[0];
-}
-
-inline int ei_predux(const v4i& a)
-{
- USE_CONST_v0i;
- v4i sum;
- sum = vec_sums(a, v0i);
- sum = vec_sld(sum, v0i, 12);
- return ei_pfirst(sum);
-}
-
-template<int Offset>
-struct ei_palign_impl<Offset, v4f>
-{
- inline static void run(v4f& first, const v4f& second)
- {
- first = vec_sld(first, second, Offset*4);
- }
-};
-
-template<int Offset>
-struct ei_palign_impl<Offset, v4i>
-{
- inline static void run(v4i& first, const v4i& second)
- {
- first = vec_sld(first, second, Offset*4);
- }
-};
-
-#endif // EIGEN_PACKET_MATH_ALTIVEC_H
diff --git a/extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h b/extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h
deleted file mode 100644
index 9ca65b9be5b..00000000000
--- a/extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h
+++ /dev/null
@@ -1,321 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_PACKET_MATH_SSE_H
-#define EIGEN_PACKET_MATH_SSE_H
-
-#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16
-#endif
-
-template<> struct ei_packet_traits<float> { typedef __m128 type; enum {size=4}; };
-template<> struct ei_packet_traits<double> { typedef __m128d type; enum {size=2}; };
-template<> struct ei_packet_traits<int> { typedef __m128i type; enum {size=4}; };
-
-template<> struct ei_unpacket_traits<__m128> { typedef float type; enum {size=4}; };
-template<> struct ei_unpacket_traits<__m128d> { typedef double type; enum {size=2}; };
-template<> struct ei_unpacket_traits<__m128i> { typedef int type; enum {size=4}; };
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pset1<float>(const float& from) { return _mm_set1_ps(from); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pset1<double>(const double& from) { return _mm_set1_pd(from); }
-template<> EIGEN_STRONG_INLINE __m128i ei_pset1<int>(const int& from) { return _mm_set1_epi32(from); }
-
-template<> EIGEN_STRONG_INLINE __m128 ei_padd<__m128>(const __m128& a, const __m128& b) { return _mm_add_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_padd<__m128d>(const __m128d& a, const __m128d& b) { return _mm_add_pd(a,b); }
-template<> EIGEN_STRONG_INLINE __m128i ei_padd<__m128i>(const __m128i& a, const __m128i& b) { return _mm_add_epi32(a,b); }
-
-template<> EIGEN_STRONG_INLINE __m128 ei_psub<__m128>(const __m128& a, const __m128& b) { return _mm_sub_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_psub<__m128d>(const __m128d& a, const __m128d& b) { return _mm_sub_pd(a,b); }
-template<> EIGEN_STRONG_INLINE __m128i ei_psub<__m128i>(const __m128i& a, const __m128i& b) { return _mm_sub_epi32(a,b); }
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pmul<__m128>(const __m128& a, const __m128& b) { return _mm_mul_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pmul<__m128d>(const __m128d& a, const __m128d& b) { return _mm_mul_pd(a,b); }
-template<> EIGEN_STRONG_INLINE __m128i ei_pmul<__m128i>(const __m128i& a, const __m128i& b)
-{
- return _mm_or_si128(
- _mm_and_si128(
- _mm_mul_epu32(a,b),
- _mm_setr_epi32(0xffffffff,0,0xffffffff,0)),
- _mm_slli_si128(
- _mm_and_si128(
- _mm_mul_epu32(_mm_srli_si128(a,4),_mm_srli_si128(b,4)),
- _mm_setr_epi32(0xffffffff,0,0xffffffff,0)), 4));
-}
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pdiv<__m128>(const __m128& a, const __m128& b) { return _mm_div_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pdiv<__m128d>(const __m128d& a, const __m128d& b) { return _mm_div_pd(a,b); }
-template<> EIGEN_STRONG_INLINE __m128i ei_pdiv<__m128i>(const __m128i& /*a*/, const __m128i& /*b*/)
-{ ei_assert(false && "packet integer division are not supported by SSE");
- __m128i dummy = ei_pset1<int>(0);
- return dummy;
-}
-
-// for some weird raisons, it has to be overloaded for packet integer
-template<> EIGEN_STRONG_INLINE __m128i ei_pmadd(const __m128i& a, const __m128i& b, const __m128i& c) { return ei_padd(ei_pmul(a,b), c); }
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pmin<__m128>(const __m128& a, const __m128& b) { return _mm_min_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pmin<__m128d>(const __m128d& a, const __m128d& b) { return _mm_min_pd(a,b); }
-// FIXME this vectorized min operator is likely to be slower than the standard one
-template<> EIGEN_STRONG_INLINE __m128i ei_pmin<__m128i>(const __m128i& a, const __m128i& b)
-{
- __m128i mask = _mm_cmplt_epi32(a,b);
- return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
-}
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pmax<__m128>(const __m128& a, const __m128& b) { return _mm_max_ps(a,b); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pmax<__m128d>(const __m128d& a, const __m128d& b) { return _mm_max_pd(a,b); }
-// FIXME this vectorized max operator is likely to be slower than the standard one
-template<> EIGEN_STRONG_INLINE __m128i ei_pmax<__m128i>(const __m128i& a, const __m128i& b)
-{
- __m128i mask = _mm_cmpgt_epi32(a,b);
- return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
-}
-
-template<> EIGEN_STRONG_INLINE __m128 ei_pload<float>(const float* from) { return _mm_load_ps(from); }
-template<> EIGEN_STRONG_INLINE __m128d ei_pload<double>(const double* from) { return _mm_load_pd(from); }
-template<> EIGEN_STRONG_INLINE __m128i ei_pload<int>(const int* from) { return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
-
-template<> EIGEN_STRONG_INLINE __m128 ei_ploadu<float>(const float* from) { return _mm_loadu_ps(from); }
-// template<> EIGEN_STRONG_INLINE __m128 ei_ploadu(const float* from) {
-// if (size_t(from)&0xF)
-// return _mm_loadu_ps(from);
-// else
-// return _mm_loadu_ps(from);
-// }
-template<> EIGEN_STRONG_INLINE __m128d ei_ploadu<double>(const double* from) { return _mm_loadu_pd(from); }
-template<> EIGEN_STRONG_INLINE __m128i ei_ploadu<int>(const int* from) { return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from)); }
-
-template<> EIGEN_STRONG_INLINE void ei_pstore<float>(float* to, const __m128& from) { _mm_store_ps(to, from); }
-template<> EIGEN_STRONG_INLINE void ei_pstore<double>(double* to, const __m128d& from) { _mm_store_pd(to, from); }
-template<> EIGEN_STRONG_INLINE void ei_pstore<int>(int* to, const __m128i& from) { _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
-
-template<> EIGEN_STRONG_INLINE void ei_pstoreu<float>(float* to, const __m128& from) { _mm_storeu_ps(to, from); }
-template<> EIGEN_STRONG_INLINE void ei_pstoreu<double>(double* to, const __m128d& from) { _mm_storeu_pd(to, from); }
-template<> EIGEN_STRONG_INLINE void ei_pstoreu<int>(int* to, const __m128i& from) { _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
-
-#ifdef _MSC_VER
-// this fix internal compilation error
-template<> EIGEN_STRONG_INLINE float ei_pfirst<__m128>(const __m128& a) { float x = _mm_cvtss_f32(a); return x; }
-template<> EIGEN_STRONG_INLINE double ei_pfirst<__m128d>(const __m128d& a) { double x = _mm_cvtsd_f64(a); return x; }
-template<> EIGEN_STRONG_INLINE int ei_pfirst<__m128i>(const __m128i& a) { int x = _mm_cvtsi128_si32(a); return x; }
-#else
-template<> EIGEN_STRONG_INLINE float ei_pfirst<__m128>(const __m128& a) { return _mm_cvtss_f32(a); }
-template<> EIGEN_STRONG_INLINE double ei_pfirst<__m128d>(const __m128d& a) { return _mm_cvtsd_f64(a); }
-template<> EIGEN_STRONG_INLINE int ei_pfirst<__m128i>(const __m128i& a) { return _mm_cvtsi128_si32(a); }
-#endif
-
-#ifdef __SSE3__
-// TODO implement SSE2 versions as well as integer versions
-template<> EIGEN_STRONG_INLINE __m128 ei_preduxp<__m128>(const __m128* vecs)
-{
- return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
-}
-template<> EIGEN_STRONG_INLINE __m128d ei_preduxp<__m128d>(const __m128d* vecs)
-{
- return _mm_hadd_pd(vecs[0], vecs[1]);
-}
-// SSSE3 version:
-// EIGEN_STRONG_INLINE __m128i ei_preduxp(const __m128i* vecs)
-// {
-// return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
-// }
-
-template<> EIGEN_STRONG_INLINE float ei_predux<__m128>(const __m128& a)
-{
- __m128 tmp0 = _mm_hadd_ps(a,a);
- return ei_pfirst(_mm_hadd_ps(tmp0, tmp0));
-}
-
-template<> EIGEN_STRONG_INLINE double ei_predux<__m128d>(const __m128d& a) { return ei_pfirst(_mm_hadd_pd(a, a)); }
-
-// SSSE3 version:
-// EIGEN_STRONG_INLINE float ei_predux(const __m128i& a)
-// {
-// __m128i tmp0 = _mm_hadd_epi32(a,a);
-// return ei_pfirst(_mm_hadd_epi32(tmp0, tmp0));
-// }
-#else
-// SSE2 versions
-template<> EIGEN_STRONG_INLINE float ei_predux<__m128>(const __m128& a)
-{
- __m128 tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
- return ei_pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
-}
-template<> EIGEN_STRONG_INLINE double ei_predux<__m128d>(const __m128d& a)
-{
- return ei_pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
-}
-
-template<> EIGEN_STRONG_INLINE __m128 ei_preduxp<__m128>(const __m128* vecs)
-{
- __m128 tmp0, tmp1, tmp2;
- tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
- tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
- tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
- tmp0 = _mm_add_ps(tmp0, tmp1);
- tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
- tmp1 = _mm_add_ps(tmp1, tmp2);
- tmp2 = _mm_movehl_ps(tmp1, tmp0);
- tmp0 = _mm_movelh_ps(tmp0, tmp1);
- return _mm_add_ps(tmp0, tmp2);
-}
-
-template<> EIGEN_STRONG_INLINE __m128d ei_preduxp<__m128d>(const __m128d* vecs)
-{
- return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
-}
-#endif // SSE3
-
-template<> EIGEN_STRONG_INLINE int ei_predux<__m128i>(const __m128i& a)
-{
- __m128i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
- return ei_pfirst(tmp) + ei_pfirst(_mm_shuffle_epi32(tmp, 1));
-}
-
-template<> EIGEN_STRONG_INLINE __m128i ei_preduxp<__m128i>(const __m128i* vecs)
-{
- __m128i tmp0, tmp1, tmp2;
- tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
- tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
- tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
- tmp0 = _mm_add_epi32(tmp0, tmp1);
- tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
- tmp1 = _mm_add_epi32(tmp1, tmp2);
- tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
- tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
- return _mm_add_epi32(tmp0, tmp2);
-}
-
-#if (defined __GNUC__)
-// template <> EIGEN_STRONG_INLINE __m128 ei_pmadd(const __m128& a, const __m128& b, const __m128& c)
-// {
-// __m128 res = b;
-// asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
-// return res;
-// }
-// EIGEN_STRONG_INLINE __m128i _mm_alignr_epi8(const __m128i& a, const __m128i& b, const int i)
-// {
-// __m128i res = a;
-// asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
-// return res;
-// }
-#endif
-
-#ifdef __SSSE3__
-// SSSE3 versions
-template<int Offset>
-struct ei_palign_impl<Offset,__m128>
-{
- EIGEN_STRONG_INLINE static void run(__m128& first, const __m128& second)
- {
- if (Offset!=0)
- first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
- }
-};
-
-template<int Offset>
-struct ei_palign_impl<Offset,__m128i>
-{
- EIGEN_STRONG_INLINE static void run(__m128i& first, const __m128i& second)
- {
- if (Offset!=0)
- first = _mm_alignr_epi8(second,first, Offset*4);
- }
-};
-
-template<int Offset>
-struct ei_palign_impl<Offset,__m128d>
-{
- EIGEN_STRONG_INLINE static void run(__m128d& first, const __m128d& second)
- {
- if (Offset==1)
- first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
- }
-};
-#else
-// SSE2 versions
-template<int Offset>
-struct ei_palign_impl<Offset,__m128>
-{
- EIGEN_STRONG_INLINE static void run(__m128& first, const __m128& second)
- {
- if (Offset==1)
- {
- first = _mm_move_ss(first,second);
- first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
- }
- else if (Offset==2)
- {
- first = _mm_movehl_ps(first,first);
- first = _mm_movelh_ps(first,second);
- }
- else if (Offset==3)
- {
- first = _mm_move_ss(first,second);
- first = _mm_shuffle_ps(first,second,0x93);
- }
- }
-};
-
-template<int Offset>
-struct ei_palign_impl<Offset,__m128i>
-{
- EIGEN_STRONG_INLINE static void run(__m128i& first, const __m128i& second)
- {
- if (Offset==1)
- {
- first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
- first = _mm_shuffle_epi32(first,0x39);
- }
- else if (Offset==2)
- {
- first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
- first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
- }
- else if (Offset==3)
- {
- first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
- first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
- }
- }
-};
-
-template<int Offset>
-struct ei_palign_impl<Offset,__m128d>
-{
- EIGEN_STRONG_INLINE static void run(__m128d& first, const __m128d& second)
- {
- if (Offset==1)
- {
- first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
- first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
- }
- }
-};
-#endif
-
-#define ei_vec4f_swizzle1(v,p,q,r,s) \
- (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
-
-#endif // EIGEN_PACKET_MATH_SSE_H
diff --git a/extern/Eigen2/Eigen/src/Core/util/Constants.h b/extern/Eigen2/Eigen/src/Core/util/Constants.h
deleted file mode 100644
index 296c3caa5f6..00000000000
--- a/extern/Eigen2/Eigen/src/Core/util/Constants.h
+++ /dev/null
@@ -1,254 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_CONSTANTS_H
-#define EIGEN_CONSTANTS_H
-
-/** This value means that a quantity is not known at compile-time, and that instead the value is
- * stored in some runtime variable.
- *
- * Explanation for the choice of this value:
- * - It should be positive and larger than any reasonable compile-time-fixed number of rows or columns.
- * This allows to simplify many compile-time conditions throughout Eigen.
- * - It should be smaller than the sqrt of INT_MAX. Indeed, we often multiply a number of rows with a number
- * of columns in order to compute a number of coefficients. Even if we guard that with an "if" checking whether
- * the values are Dynamic, we still get a compiler warning "integer overflow". So the only way to get around
- * it would be a meta-selector. Doing this everywhere would reduce code readability and lenghten compilation times.
- * Also, disabling compiler warnings for integer overflow, sounds like a bad idea.
- *
- * If you wish to port Eigen to a platform where sizeof(int)==2, it is perfectly possible to set Dynamic to, say, 100.
- */
-const int Dynamic = 10000;
-
-/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm<int>().
- * The value Infinity there means the L-infinity norm.
- */
-const int Infinity = -1;
-
-/** \defgroup flags flags
- * \ingroup Core_Module
- *
- * These are the possible bits which can be OR'ed to constitute the flags of a matrix or
- * expression.
- *
- * It is important to note that these flags are a purely compile-time notion. They are a compile-time property of
- * an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any
- * runtime overhead.
- *
- * \sa MatrixBase::Flags
- */
-
-/** \ingroup flags
- *
- * for a matrix, this means that the storage order is row-major.
- * If this bit is not set, the storage order is column-major.
- * For an expression, this determines the storage order of
- * the matrix created by evaluation of that expression. */
-const unsigned int RowMajorBit = 0x1;
-
-/** \ingroup flags
- *
- * means the expression should be evaluated by the calling expression */
-const unsigned int EvalBeforeNestingBit = 0x2;
-
-/** \ingroup flags
- *
- * means the expression should be evaluated before any assignement */
-const unsigned int EvalBeforeAssigningBit = 0x4;
-
-/** \ingroup flags
- *
- * Short version: means the expression might be vectorized
- *
- * Long version: means that the coefficients can be handled by packets
- * and start at a memory location whose alignment meets the requirements
- * of the present CPU architecture for optimized packet access. In the fixed-size
- * case, there is the additional condition that the total size of the coefficients
- * array is a multiple of the packet size, so that it is possible to access all the
- * coefficients by packets. In the dynamic-size case, there is no such condition
- * on the total size, so it might not be possible to access the few last coeffs
- * by packets.
- *
- * \note This bit can be set regardless of whether vectorization is actually enabled.
- * To check for actual vectorizability, see \a ActualPacketAccessBit.
- */
-const unsigned int PacketAccessBit = 0x8;
-
-#ifdef EIGEN_VECTORIZE
-/** \ingroup flags
- *
- * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant
- * is set to the value \a PacketAccessBit.
- *
- * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant
- * is set to the value 0.
- */
-const unsigned int ActualPacketAccessBit = PacketAccessBit;
-#else
-const unsigned int ActualPacketAccessBit = 0x0;
-#endif
-
-/** \ingroup flags
- *
- * Short version: means the expression can be seen as 1D vector.
- *
- * Long version: means that one can access the coefficients
- * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These
- * index-based access methods are guaranteed
- * to not have to do any runtime computation of a (row, col)-pair from the index, so that it
- * is guaranteed that whenever it is available, index-based access is at least as fast as
- * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit.
- *
- * If both PacketAccessBit and LinearAccessBit are set, then the
- * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a
- * lvalue expression.
- *
- * Typically, all vector expressions have the LinearAccessBit, but there is one exception:
- * Product expressions don't have it, because it would be troublesome for vectorization, even when the
- * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but
- * not index-based packet access, so they don't have the LinearAccessBit.
- */
-const unsigned int LinearAccessBit = 0x10;
-
-/** \ingroup flags
- *
- * Means that the underlying array of coefficients can be directly accessed. This means two things.
- * First, references to the coefficients must be available through coeffRef(int, int). This rules out read-only
- * expressions whose coefficients are computed on demand by coeff(int, int). Second, the memory layout of the
- * array of coefficients must be exactly the natural one suggested by rows(), cols(), stride(), and the RowMajorBit.
- * This rules out expressions such as DiagonalCoeffs, whose coefficients, though referencable, do not have
- * such a regular memory layout.
- */
-const unsigned int DirectAccessBit = 0x20;
-
-/** \ingroup flags
- *
- * means the first coefficient packet is guaranteed to be aligned */
-const unsigned int AlignedBit = 0x40;
-
-/** \ingroup flags
- *
- * means all diagonal coefficients are equal to 0 */
-const unsigned int ZeroDiagBit = 0x80;
-
-/** \ingroup flags
- *
- * means all diagonal coefficients are equal to 1 */
-const unsigned int UnitDiagBit = 0x100;
-
-/** \ingroup flags
- *
- * means the matrix is selfadjoint (M=M*). */
-const unsigned int SelfAdjointBit = 0x200;
-
-/** \ingroup flags
- *
- * means the strictly lower triangular part is 0 */
-const unsigned int UpperTriangularBit = 0x400;
-
-/** \ingroup flags
- *
- * means the strictly upper triangular part is 0 */
-const unsigned int LowerTriangularBit = 0x800;
-
-/** \ingroup flags
- *
- * means the expression includes sparse matrices and the sparse path has to be taken. */
-const unsigned int SparseBit = 0x1000;
-
-// list of flags that are inherited by default
-const unsigned int HereditaryBits = RowMajorBit
- | EvalBeforeNestingBit
- | EvalBeforeAssigningBit
- | SparseBit;
-
-// Possible values for the Mode parameter of part() and of extract()
-const unsigned int UpperTriangular = UpperTriangularBit;
-const unsigned int StrictlyUpperTriangular = UpperTriangularBit | ZeroDiagBit;
-const unsigned int LowerTriangular = LowerTriangularBit;
-const unsigned int StrictlyLowerTriangular = LowerTriangularBit | ZeroDiagBit;
-const unsigned int SelfAdjoint = SelfAdjointBit;
-
-// additional possible values for the Mode parameter of extract()
-const unsigned int UnitUpperTriangular = UpperTriangularBit | UnitDiagBit;
-const unsigned int UnitLowerTriangular = LowerTriangularBit | UnitDiagBit;
-const unsigned int Diagonal = UpperTriangular | LowerTriangular;
-
-enum { Aligned, Unaligned };
-enum { ForceAligned, AsRequested };
-enum { ConditionalJumpCost = 5 };
-enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight };
-enum DirectionType { Vertical, Horizontal };
-enum ProductEvaluationMode { NormalProduct, CacheFriendlyProduct, DiagonalProduct, SparseTimeSparseProduct, SparseTimeDenseProduct, DenseTimeSparseProduct };
-
-enum {
- /** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignment
- * and good size */
- InnerVectorization,
- /** \internal Vectorization path using a single loop plus scalar loops for the
- * unaligned boundaries */
- LinearVectorization,
- /** \internal Generic vectorization path using one vectorized loop per row/column with some
- * scalar loops to handle the unaligned boundaries */
- SliceVectorization,
- NoVectorization
-};
-
-enum {
- NoUnrolling,
- InnerUnrolling,
- CompleteUnrolling
-};
-
-enum {
- ColMajor = 0,
- RowMajor = 0x1, // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that
- /** \internal Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be
- requested to be aligned) */
- DontAlign = 0,
- /** \internal Align the matrix itself if it is vectorizable fixed-size */
- AutoAlign = 0x2
-};
-
-enum {
- IsDense = 0,
- IsSparse = SparseBit,
- NoDirectAccess = 0,
- HasDirectAccess = DirectAccessBit
-};
-
-const int EiArch_Generic = 0x0;
-const int EiArch_SSE = 0x1;
-const int EiArch_AltiVec = 0x2;
-
-#if defined EIGEN_VECTORIZE_SSE
- const int EiArch = EiArch_SSE;
-#elif defined EIGEN_VECTORIZE_ALTIVEC
- const int EiArch = EiArch_AltiVec;
-#else
- const int EiArch = EiArch_Generic;
-#endif
-
-#endif // EIGEN_CONSTANTS_H
diff --git a/extern/Eigen2/Eigen/src/Core/util/DisableMSVCWarnings.h b/extern/Eigen2/Eigen/src/Core/util/DisableMSVCWarnings.h
deleted file mode 100644
index 765ddecc53c..00000000000
--- a/extern/Eigen2/Eigen/src/Core/util/DisableMSVCWarnings.h
+++ /dev/null
@@ -1,5 +0,0 @@
-
-#ifdef _MSC_VER
- #pragma warning( push )
- #pragma warning( disable : 4181 4244 4127 4211 4717 )
-#endif
diff --git a/extern/Eigen2/Eigen/src/Core/util/EnableMSVCWarnings.h b/extern/Eigen2/Eigen/src/Core/util/EnableMSVCWarnings.h
deleted file mode 100644
index 8bd61601ebb..00000000000
--- a/extern/Eigen2/Eigen/src/Core/util/EnableMSVCWarnings.h
+++ /dev/null
@@ -1,4 +0,0 @@
-
-#ifdef _MSC_VER
- #pragma warning( pop )
-#endif
diff --git a/extern/Eigen2/Eigen/src/Core/util/ForwardDeclarations.h b/extern/Eigen2/Eigen/src/Core/util/ForwardDeclarations.h
deleted file mode 100644
index a72a40b1bfc..00000000000
--- a/extern/Eigen2/Eigen/src/Core/util/ForwardDeclarations.h
+++ /dev/null
@@ -1,125 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_FORWARDDECLARATIONS_H
-#define EIGEN_FORWARDDECLARATIONS_H
-
-template<typename T> struct ei_traits;
-template<typename T> struct NumTraits;
-
-template<typename _Scalar, int _Rows, int _Cols,
- int _Options = EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION | AutoAlign,
- int _MaxRows = _Rows, int _MaxCols = _Cols> class Matrix;
-
-template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged;
-template<typename ExpressionType> class NestByValue;
-template<typename ExpressionType> class SwapWrapper;
-template<typename MatrixType> class Minor;
-template<typename MatrixType, int BlockRows=Dynamic, int BlockCols=Dynamic, int PacketAccess=AsRequested,
- int _DirectAccessStatus = ei_traits<MatrixType>::Flags&DirectAccessBit ? DirectAccessBit
- : ei_traits<MatrixType>::Flags&SparseBit> class Block;
-template<typename MatrixType> class Transpose;
-template<typename MatrixType> class Conjugate;
-template<typename NullaryOp, typename MatrixType> class CwiseNullaryOp;
-template<typename UnaryOp, typename MatrixType> class CwiseUnaryOp;
-template<typename BinaryOp, typename Lhs, typename Rhs> class CwiseBinaryOp;
-template<typename Lhs, typename Rhs, int ProductMode> class Product;
-template<typename CoeffsVectorType> class DiagonalMatrix;
-template<typename MatrixType> class DiagonalCoeffs;
-template<typename MatrixType, int PacketAccess = AsRequested> class Map;
-template<typename MatrixType, unsigned int Mode> class Part;
-template<typename MatrixType, unsigned int Mode> class Extract;
-template<typename ExpressionType> class Cwise;
-template<typename ExpressionType> class WithFormat;
-template<typename MatrixType> struct CommaInitializer;
-
-
-template<typename Lhs, typename Rhs> struct ei_product_mode;
-template<typename Lhs, typename Rhs, int ProductMode = ei_product_mode<Lhs,Rhs>::value> struct ProductReturnType;
-
-template<typename Scalar> struct ei_scalar_sum_op;
-template<typename Scalar> struct ei_scalar_difference_op;
-template<typename Scalar> struct ei_scalar_product_op;
-template<typename Scalar> struct ei_scalar_quotient_op;
-template<typename Scalar> struct ei_scalar_opposite_op;
-template<typename Scalar> struct ei_scalar_conjugate_op;
-template<typename Scalar> struct ei_scalar_real_op;
-template<typename Scalar> struct ei_scalar_imag_op;
-template<typename Scalar> struct ei_scalar_abs_op;
-template<typename Scalar> struct ei_scalar_abs2_op;
-template<typename Scalar> struct ei_scalar_sqrt_op;
-template<typename Scalar> struct ei_scalar_exp_op;
-template<typename Scalar> struct ei_scalar_log_op;
-template<typename Scalar> struct ei_scalar_cos_op;
-template<typename Scalar> struct ei_scalar_sin_op;
-template<typename Scalar> struct ei_scalar_pow_op;
-template<typename Scalar> struct ei_scalar_inverse_op;
-template<typename Scalar> struct ei_scalar_square_op;
-template<typename Scalar> struct ei_scalar_cube_op;
-template<typename Scalar, typename NewType> struct ei_scalar_cast_op;
-template<typename Scalar> struct ei_scalar_multiple_op;
-template<typename Scalar> struct ei_scalar_quotient1_op;
-template<typename Scalar> struct ei_scalar_min_op;
-template<typename Scalar> struct ei_scalar_max_op;
-template<typename Scalar> struct ei_scalar_random_op;
-template<typename Scalar> struct ei_scalar_add_op;
-template<typename Scalar> struct ei_scalar_constant_op;
-template<typename Scalar> struct ei_scalar_identity_op;
-
-struct IOFormat;
-
-template<typename Scalar>
-void ei_cache_friendly_product(
- int _rows, int _cols, int depth,
- bool _lhsRowMajor, const Scalar* _lhs, int _lhsStride,
- bool _rhsRowMajor, const Scalar* _rhs, int _rhsStride,
- bool resRowMajor, Scalar* res, int resStride);
-
-// Array module
-template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> class Select;
-template<typename MatrixType, typename BinaryOp, int Direction> class PartialReduxExpr;
-template<typename ExpressionType, int Direction> class PartialRedux;
-
-template<typename MatrixType> class LU;
-template<typename MatrixType> class QR;
-template<typename MatrixType> class SVD;
-template<typename MatrixType> class LLT;
-template<typename MatrixType> class LDLT;
-
-// Geometry module:
-template<typename Derived, int _Dim> class RotationBase;
-template<typename Lhs, typename Rhs> class Cross;
-template<typename Scalar> class Quaternion;
-template<typename Scalar> class Rotation2D;
-template<typename Scalar> class AngleAxis;
-template<typename Scalar,int Dim> class Transform;
-template <typename _Scalar, int _AmbientDim> class ParametrizedLine;
-template <typename _Scalar, int _AmbientDim> class Hyperplane;
-template<typename Scalar,int Dim> class Translation;
-template<typename Scalar,int Dim> class Scaling;
-
-// Sparse module:
-template<typename Lhs, typename Rhs, int ProductMode> class SparseProduct;
-
-#endif // EIGEN_FORWARDDECLARATIONS_H
diff --git a/extern/Eigen2/Eigen/src/Core/util/Macros.h b/extern/Eigen2/Eigen/src/Core/util/Macros.h
deleted file mode 100644
index 89b20312a52..00000000000
--- a/extern/Eigen2/Eigen/src/Core/util/Macros.h
+++ /dev/null
@@ -1,273 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_MACROS_H
-#define EIGEN_MACROS_H
-
-#undef minor
-
-#define EIGEN_WORLD_VERSION 2
-#define EIGEN_MAJOR_VERSION 0
-#define EIGEN_MINOR_VERSION 6
-
-#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
- (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
- EIGEN_MINOR_VERSION>=z))))
-
-// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable 16 byte alignment on all
-// platforms where vectorization might be enabled. In theory we could always enable alignment, but it can be a cause of problems
-// on some platforms, so we just disable it in certain common platform (compiler+architecture combinations) to avoid these problems.
-#if defined(__GNUC__) && !(defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(__ia64__) || defined(__ppc__))
-#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_ALIGNMENT 1
-#else
-#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_ALIGNMENT 0
-#endif
-
-#if defined(__GNUC__) && (__GNUC__ <= 3)
-#define EIGEN_GCC3_OR_OLDER 1
-#else
-#define EIGEN_GCC3_OR_OLDER 0
-#endif
-
-// FIXME vectorization + alignment is completely disabled with sun studio
-#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_ALIGNMENT && !EIGEN_GCC3_OR_OLDER && !defined(__SUNPRO_CC)
- #define EIGEN_ARCH_WANTS_ALIGNMENT 1
-#else
- #define EIGEN_ARCH_WANTS_ALIGNMENT 0
-#endif
-
-// EIGEN_ALIGN is the true test whether we want to align or not. It takes into account both the user choice to explicitly disable
-// alignment (EIGEN_DONT_ALIGN) and the architecture config (EIGEN_ARCH_WANTS_ALIGNMENT). Henceforth, only EIGEN_ALIGN should be used.
-#if EIGEN_ARCH_WANTS_ALIGNMENT && !defined(EIGEN_DONT_ALIGN)
- #define EIGEN_ALIGN 1
-#else
- #define EIGEN_ALIGN 0
- #ifdef EIGEN_VECTORIZE
- #error "Vectorization enabled, but our platform checks say that we don't do 16 byte alignment on this platform. If you added vectorization for another architecture, you also need to edit this platform check."
- #endif
- #ifndef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
- #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
- #endif
-#endif
-
-#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
-#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION RowMajor
-#else
-#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor
-#endif
-
-/** \internal Defines the maximal loop size to enable meta unrolling of loops.
- * Note that the value here is expressed in Eigen's own notion of "number of FLOPS",
- * it does not correspond to the number of iterations or the number of instructions
- */
-#ifndef EIGEN_UNROLLING_LIMIT
-#define EIGEN_UNROLLING_LIMIT 100
-#endif
-
-/** \internal Define the maximal size in Bytes of blocks fitting in CPU cache.
- * The current value is set to generate blocks of 256x256 for float
- *
- * Typically for a single-threaded application you would set that to 25% of the size of your CPU caches in bytes
- */
-#ifndef EIGEN_TUNE_FOR_CPU_CACHE_SIZE
-#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE (sizeof(float)*256*256)
-#endif
-
-// FIXME this should go away quickly
-#ifdef EIGEN_TUNE_FOR_L2_CACHE_SIZE
-#error EIGEN_TUNE_FOR_L2_CACHE_SIZE is now called EIGEN_TUNE_FOR_CPU_CACHE_SIZE.
-#endif
-
-#define USING_PART_OF_NAMESPACE_EIGEN \
-EIGEN_USING_MATRIX_TYPEDEFS \
-using Eigen::Matrix; \
-using Eigen::MatrixBase; \
-using Eigen::ei_random; \
-using Eigen::ei_real; \
-using Eigen::ei_imag; \
-using Eigen::ei_conj; \
-using Eigen::ei_abs; \
-using Eigen::ei_abs2; \
-using Eigen::ei_sqrt; \
-using Eigen::ei_exp; \
-using Eigen::ei_log; \
-using Eigen::ei_sin; \
-using Eigen::ei_cos;
-
-#ifdef NDEBUG
-# ifndef EIGEN_NO_DEBUG
-# define EIGEN_NO_DEBUG
-# endif
-#endif
-
-#ifndef ei_assert
-#ifdef EIGEN_NO_DEBUG
-#define ei_assert(x)
-#else
-#define ei_assert(x) assert(x)
-#endif
-#endif
-
-#ifdef EIGEN_INTERNAL_DEBUGGING
-#define ei_internal_assert(x) ei_assert(x)
-#else
-#define ei_internal_assert(x)
-#endif
-
-#ifdef EIGEN_NO_DEBUG
-#define EIGEN_ONLY_USED_FOR_DEBUG(x) (void)x
-#else
-#define EIGEN_ONLY_USED_FOR_DEBUG(x)
-#endif
-
-// EIGEN_ALWAYS_INLINE_ATTRIB should be use in the declaration of function
-// which should be inlined even in debug mode.
-// FIXME with the always_inline attribute,
-// gcc 3.4.x reports the following compilation error:
-// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
-// : function body not available
-#if EIGEN_GNUC_AT_LEAST(4,0)
-#define EIGEN_ALWAYS_INLINE_ATTRIB __attribute__((always_inline))
-#else
-#define EIGEN_ALWAYS_INLINE_ATTRIB
-#endif
-
-// EIGEN_FORCE_INLINE means "inline as much as possible"
-#if (defined _MSC_VER)
-#define EIGEN_STRONG_INLINE __forceinline
-#else
-#define EIGEN_STRONG_INLINE inline
-#endif
-
-#if (defined __GNUC__)
-#define EIGEN_DONT_INLINE __attribute__((noinline))
-#elif (defined _MSC_VER)
-#define EIGEN_DONT_INLINE __declspec(noinline)
-#else
-#define EIGEN_DONT_INLINE
-#endif
-
-#if (defined __GNUC__)
-#define EIGEN_DEPRECATED __attribute__((deprecated))
-#elif (defined _MSC_VER)
-#define EIGEN_DEPRECATED __declspec(deprecated)
-#else
-#define EIGEN_DEPRECATED
-#endif
-
-/* EIGEN_ALIGN_128 forces data to be 16-byte aligned, EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
- * so that vectorization doesn't affect binary compatibility.
- *
- * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
- * vectorized and non-vectorized code.
- */
-#if !EIGEN_ALIGN
-#define EIGEN_ALIGN_128
-#elif (defined __GNUC__)
-#define EIGEN_ALIGN_128 __attribute__((aligned(16)))
-#elif (defined _MSC_VER)
-#define EIGEN_ALIGN_128 __declspec(align(16))
-#else
-#error Please tell me what is the equivalent of __attribute__((aligned(16))) for your compiler
-#endif
-
-#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD
- #define EIGEN_RESTRICT
-#endif
-#ifndef EIGEN_RESTRICT
- #define EIGEN_RESTRICT __restrict
-#endif
-
-#ifndef EIGEN_STACK_ALLOCATION_LIMIT
-#define EIGEN_STACK_ALLOCATION_LIMIT 1000000
-#endif
-
-#ifndef EIGEN_DEFAULT_IO_FORMAT
-#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat()
-#endif
-
-// format used in Eigen's documentation
-// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic.
-#define EIGEN_DOCS_IO_FORMAT IOFormat(3, AlignCols, " ", "\n", "", "")
-
-#define EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
-template<typename OtherDerived> \
-EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::MatrixBase<OtherDerived>& other) \
-{ \
- return Base::operator Op(other.derived()); \
-} \
-EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
-{ \
- return Base::operator Op(other); \
-}
-
-#define EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
-template<typename Other> \
-EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
-{ \
- return Base::operator Op(scalar); \
-}
-
-#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
-EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
-EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
-EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
-EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
-EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
-
-#define _EIGEN_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \
-typedef BaseClass Base; \
-typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
-typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
-typedef typename Base::PacketScalar PacketScalar; \
-typedef typename Eigen::ei_nested<Derived>::type Nested; \
-enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
- ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
- MaxRowsAtCompileTime = Eigen::ei_traits<Derived>::MaxRowsAtCompileTime, \
- MaxColsAtCompileTime = Eigen::ei_traits<Derived>::MaxColsAtCompileTime, \
- Flags = Eigen::ei_traits<Derived>::Flags, \
- CoeffReadCost = Eigen::ei_traits<Derived>::CoeffReadCost, \
- SizeAtCompileTime = Base::SizeAtCompileTime, \
- MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
- IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
-
-#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
-_EIGEN_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::MatrixBase<Derived>)
-
-#define EIGEN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
-#define EIGEN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
-
-// just an empty macro !
-#define EIGEN_EMPTY
-
-// concatenate two tokens
-#define EIGEN_CAT2(a,b) a ## b
-#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b)
-
-// convert a token to a string
-#define EIGEN_MAKESTRING2(a) #a
-#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
-
-#endif // EIGEN_MACROS_H
diff --git a/extern/Eigen2/Eigen/src/Core/util/Memory.h b/extern/Eigen2/Eigen/src/Core/util/Memory.h
deleted file mode 100644
index 0a43e7f7bf2..00000000000
--- a/extern/Eigen2/Eigen/src/Core/util/Memory.h
+++ /dev/null
@@ -1,387 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_MEMORY_H
-#define EIGEN_MEMORY_H
-
-// FreeBSD 6 seems to have 16-byte aligned malloc
-// See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup
-// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures
-// See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup
-#if defined(__FreeBSD__) && !defined(__arm__) && !defined(__mips__)
-#define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1
-#else
-#define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0
-#endif
-
-#if defined(__APPLE__) || defined(_WIN64) || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
- #define EIGEN_MALLOC_ALREADY_ALIGNED 1
-#else
- #define EIGEN_MALLOC_ALREADY_ALIGNED 0
-#endif
-
-#if ((defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
- #define EIGEN_HAS_POSIX_MEMALIGN 1
-#else
- #define EIGEN_HAS_POSIX_MEMALIGN 0
-#endif
-
-#ifdef EIGEN_VECTORIZE_SSE
- #define EIGEN_HAS_MM_MALLOC 1
-#else
- #define EIGEN_HAS_MM_MALLOC 0
-#endif
-
-/** \internal like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
- * Fast, but wastes 16 additional bytes of memory.
- * Does not throw any exception.
- */
-inline void* ei_handmade_aligned_malloc(size_t size)
-{
- void *original = malloc(size+16);
- void *aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(original) & ~(size_t(15))) + 16);
- *(reinterpret_cast<void**>(aligned) - 1) = original;
- return aligned;
-}
-
-/** \internal frees memory allocated with ei_handmade_aligned_malloc */
-inline void ei_handmade_aligned_free(void *ptr)
-{
- if(ptr)
- free(*(reinterpret_cast<void**>(ptr) - 1));
-}
-
-/** \internal allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment.
- * On allocation error, the returned pointer is null, and if exceptions are enabled then a std::bad_alloc is thrown.
- */
-inline void* ei_aligned_malloc(size_t size)
-{
- #ifdef EIGEN_NO_MALLOC
- ei_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
- #endif
-
- void *result;
- #if !EIGEN_ALIGN
- result = malloc(size);
- #elif EIGEN_MALLOC_ALREADY_ALIGNED
- result = malloc(size);
- #elif EIGEN_HAS_POSIX_MEMALIGN
- if(posix_memalign(&result, 16, size)) result = 0;
- #elif EIGEN_HAS_MM_MALLOC
- result = _mm_malloc(size, 16);
- #elif (defined _MSC_VER)
- result = _aligned_malloc(size, 16);
- #else
- result = ei_handmade_aligned_malloc(size);
- #endif
-
- #ifdef EIGEN_EXCEPTIONS
- if(result == 0)
- throw std::bad_alloc();
- #endif
- return result;
-}
-
-/** allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
- * On allocation error, the returned pointer is null, and if exceptions are enabled then a std::bad_alloc is thrown.
- */
-template<bool Align> inline void* ei_conditional_aligned_malloc(size_t size)
-{
- return ei_aligned_malloc(size);
-}
-
-template<> inline void* ei_conditional_aligned_malloc<false>(size_t size)
-{
- #ifdef EIGEN_NO_MALLOC
- ei_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
- #endif
-
- void *result = malloc(size);
- #ifdef EIGEN_EXCEPTIONS
- if(!result) throw std::bad_alloc();
- #endif
- return result;
-}
-
-/** \internal construct the elements of an array.
- * The \a size parameter tells on how many objects to call the constructor of T.
- */
-template<typename T> inline T* ei_construct_elements_of_array(T *ptr, size_t size)
-{
- for (size_t i=0; i < size; ++i) ::new (ptr + i) T;
- return ptr;
-}
-
-/** allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
- * On allocation error, the returned pointer is undefined, but if exceptions are enabled then a std::bad_alloc is thrown.
- * The default constructor of T is called.
- */
-template<typename T> inline T* ei_aligned_new(size_t size)
-{
- T *result = reinterpret_cast<T*>(ei_aligned_malloc(sizeof(T)*size));
- return ei_construct_elements_of_array(result, size);
-}
-
-template<typename T, bool Align> inline T* ei_conditional_aligned_new(size_t size)
-{
- T *result = reinterpret_cast<T*>(ei_conditional_aligned_malloc<Align>(sizeof(T)*size));
- return ei_construct_elements_of_array(result, size);
-}
-
-/** \internal free memory allocated with ei_aligned_malloc
- */
-inline void ei_aligned_free(void *ptr)
-{
- #if !EIGEN_ALIGN
- free(ptr);
- #elif EIGEN_MALLOC_ALREADY_ALIGNED
- free(ptr);
- #elif EIGEN_HAS_POSIX_MEMALIGN
- free(ptr);
- #elif EIGEN_HAS_MM_MALLOC
- _mm_free(ptr);
- #elif defined(_MSC_VER)
- _aligned_free(ptr);
- #else
- ei_handmade_aligned_free(ptr);
- #endif
-}
-
-/** \internal free memory allocated with ei_conditional_aligned_malloc
- */
-template<bool Align> inline void ei_conditional_aligned_free(void *ptr)
-{
- ei_aligned_free(ptr);
-}
-
-template<> inline void ei_conditional_aligned_free<false>(void *ptr)
-{
- free(ptr);
-}
-
-/** \internal destruct the elements of an array.
- * The \a size parameters tells on how many objects to call the destructor of T.
- */
-template<typename T> inline void ei_destruct_elements_of_array(T *ptr, size_t size)
-{
- // always destruct an array starting from the end.
- while(size) ptr[--size].~T();
-}
-
-/** \internal delete objects constructed with ei_aligned_new
- * The \a size parameters tells on how many objects to call the destructor of T.
- */
-template<typename T> inline void ei_aligned_delete(T *ptr, size_t size)
-{
- ei_destruct_elements_of_array<T>(ptr, size);
- ei_aligned_free(ptr);
-}
-
-/** \internal delete objects constructed with ei_conditional_aligned_new
- * The \a size parameters tells on how many objects to call the destructor of T.
- */
-template<typename T, bool Align> inline void ei_conditional_aligned_delete(T *ptr, size_t size)
-{
- ei_destruct_elements_of_array<T>(ptr, size);
- ei_conditional_aligned_free<Align>(ptr);
-}
-
-/** \internal \returns the number of elements which have to be skipped such that data are 16 bytes aligned */
-template<typename Scalar>
-inline static int ei_alignmentOffset(const Scalar* ptr, int maxOffset)
-{
- typedef typename ei_packet_traits<Scalar>::type Packet;
- const int PacketSize = ei_packet_traits<Scalar>::size;
- const int PacketAlignedMask = PacketSize-1;
- const bool Vectorized = PacketSize>1;
- return Vectorized
- ? std::min<int>( (PacketSize - (int((size_t(ptr)/sizeof(Scalar))) & PacketAlignedMask))
- & PacketAlignedMask, maxOffset)
- : 0;
-}
-
-/** \internal
- * ei_aligned_stack_alloc(SIZE) allocates an aligned buffer of SIZE bytes
- * on the stack if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT.
- * Otherwise the memory is allocated on the heap.
- * Data allocated with ei_aligned_stack_alloc \b must be freed by calling ei_aligned_stack_free(PTR,SIZE).
- * \code
- * float * data = ei_aligned_stack_alloc(float,array.size());
- * // ...
- * ei_aligned_stack_free(data,float,array.size());
- * \endcode
- */
-#ifdef __linux__
- #define ei_aligned_stack_alloc(SIZE) (SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) \
- ? alloca(SIZE) \
- : ei_aligned_malloc(SIZE)
- #define ei_aligned_stack_free(PTR,SIZE) if(SIZE>EIGEN_STACK_ALLOCATION_LIMIT) ei_aligned_free(PTR)
-#else
- #define ei_aligned_stack_alloc(SIZE) ei_aligned_malloc(SIZE)
- #define ei_aligned_stack_free(PTR,SIZE) ei_aligned_free(PTR)
-#endif
-
-#define ei_aligned_stack_new(TYPE,SIZE) ei_construct_elements_of_array(reinterpret_cast<TYPE*>(ei_aligned_stack_alloc(sizeof(TYPE)*SIZE)), SIZE)
-#define ei_aligned_stack_delete(TYPE,PTR,SIZE) do {ei_destruct_elements_of_array<TYPE>(PTR, SIZE); \
- ei_aligned_stack_free(PTR,sizeof(TYPE)*SIZE);} while(0)
-
-
-#if EIGEN_ALIGN
- #ifdef EIGEN_EXCEPTIONS
- #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
- void* operator new(size_t size, const std::nothrow_t&) throw() { \
- try { return Eigen::ei_conditional_aligned_malloc<NeedsToAlign>(size); } \
- catch (...) { return 0; } \
- return 0; \
- }
- #else
- #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
- void* operator new(size_t size, const std::nothrow_t&) throw() { \
- return Eigen::ei_conditional_aligned_malloc<NeedsToAlign>(size); \
- }
- #endif
-
- #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
- void *operator new(size_t size) { \
- return Eigen::ei_conditional_aligned_malloc<NeedsToAlign>(size); \
- } \
- void *operator new[](size_t size) { \
- return Eigen::ei_conditional_aligned_malloc<NeedsToAlign>(size); \
- } \
- void operator delete(void * ptr) throw() { Eigen::ei_conditional_aligned_free<NeedsToAlign>(ptr); } \
- void operator delete[](void * ptr) throw() { Eigen::ei_conditional_aligned_free<NeedsToAlign>(ptr); } \
- /* in-place new and delete. since (at least afaik) there is no actual */ \
- /* memory allocated we can safely let the default implementation handle */ \
- /* this particular case. */ \
- static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \
- void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \
- /* nothrow-new (returns zero instead of std::bad_alloc) */ \
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
- void operator delete(void *ptr, const std::nothrow_t&) throw() { \
- Eigen::ei_conditional_aligned_free<NeedsToAlign>(ptr); \
- } \
- typedef void ei_operator_new_marker_type;
-#else
- #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
-#endif
-
-#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
-#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0))
-
-
-/** \class aligned_allocator
-*
-* \brief stl compatible allocator to use with with 16 byte aligned types
-*
-* Example:
-* \code
-* // Matrix4f requires 16 bytes alignment:
-* std::map< int, Matrix4f, std::less<int>, aligned_allocator<Matrix4f> > my_map_mat4;
-* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
-* std::map< int, Vector3f > my_map_vec3;
-* \endcode
-*
-*/
-template<class T>
-class aligned_allocator
-{
-public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef T* pointer;
- typedef const T* const_pointer;
- typedef T& reference;
- typedef const T& const_reference;
- typedef T value_type;
-
- template<class U>
- struct rebind
- {
- typedef aligned_allocator<U> other;
- };
-
- pointer address( reference value ) const
- {
- return &value;
- }
-
- const_pointer address( const_reference value ) const
- {
- return &value;
- }
-
- aligned_allocator() throw()
- {
- }
-
- aligned_allocator( const aligned_allocator& ) throw()
- {
- }
-
- template<class U>
- aligned_allocator( const aligned_allocator<U>& ) throw()
- {
- }
-
- ~aligned_allocator() throw()
- {
- }
-
- size_type max_size() const throw()
- {
- return std::numeric_limits<size_type>::max();
- }
-
- pointer allocate( size_type num, const_pointer* hint = 0 )
- {
- static_cast<void>( hint ); // suppress unused variable warning
- return static_cast<pointer>( ei_aligned_malloc( num * sizeof(T) ) );
- }
-
- void construct( pointer p, const T& value )
- {
- ::new( p ) T( value );
- }
-
- void destroy( pointer p )
- {
- p->~T();
- }
-
- void deallocate( pointer p, size_type /*num*/ )
- {
- ei_aligned_free( p );
- }
-
- bool operator!=(const aligned_allocator<T>& other) const
- { return false; }
-
- bool operator==(const aligned_allocator<T>& other) const
- { return true; }
-};
-
-#endif // EIGEN_MEMORY_H
diff --git a/extern/Eigen2/Eigen/src/Core/util/Meta.h b/extern/Eigen2/Eigen/src/Core/util/Meta.h
deleted file mode 100644
index c65c52ef42f..00000000000
--- a/extern/Eigen2/Eigen/src/Core/util/Meta.h
+++ /dev/null
@@ -1,183 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_META_H
-#define EIGEN_META_H
-
-/** \internal
- * \file Meta.h
- * This file contains generic metaprogramming classes which are not specifically related to Eigen.
- * \note In case you wonder, yes we're aware that Boost already provides all these features,
- * we however don't want to add a dependency to Boost.
- */
-
-struct ei_meta_true { enum { ret = 1 }; };
-struct ei_meta_false { enum { ret = 0 }; };
-
-template<bool Condition, typename Then, typename Else>
-struct ei_meta_if { typedef Then ret; };
-
-template<typename Then, typename Else>
-struct ei_meta_if <false, Then, Else> { typedef Else ret; };
-
-template<typename T, typename U> struct ei_is_same_type { enum { ret = 0 }; };
-template<typename T> struct ei_is_same_type<T,T> { enum { ret = 1 }; };
-
-template<typename T> struct ei_unref { typedef T type; };
-template<typename T> struct ei_unref<T&> { typedef T type; };
-
-template<typename T> struct ei_unpointer { typedef T type; };
-template<typename T> struct ei_unpointer<T*> { typedef T type; };
-template<typename T> struct ei_unpointer<T*const> { typedef T type; };
-
-template<typename T> struct ei_unconst { typedef T type; };
-template<typename T> struct ei_unconst<const T> { typedef T type; };
-template<typename T> struct ei_unconst<T const &> { typedef T & type; };
-template<typename T> struct ei_unconst<T const *> { typedef T * type; };
-
-template<typename T> struct ei_cleantype { typedef T type; };
-template<typename T> struct ei_cleantype<const T> { typedef typename ei_cleantype<T>::type type; };
-template<typename T> struct ei_cleantype<const T&> { typedef typename ei_cleantype<T>::type type; };
-template<typename T> struct ei_cleantype<T&> { typedef typename ei_cleantype<T>::type type; };
-template<typename T> struct ei_cleantype<const T*> { typedef typename ei_cleantype<T>::type type; };
-template<typename T> struct ei_cleantype<T*> { typedef typename ei_cleantype<T>::type type; };
-
-/** \internal
- * Convenient struct to get the result type of a unary or binary functor.
- *
- * It supports both the current STL mechanism (using the result_type member) as well as
- * upcoming next STL generation (using a templated result member).
- * If none of these members is provided, then the type of the first argument is returned. FIXME, that behavior is a pretty bad hack.
- */
-template<typename T> struct ei_result_of {};
-
-struct ei_has_none {int a[1];};
-struct ei_has_std_result_type {int a[2];};
-struct ei_has_tr1_result {int a[3];};
-
-template<typename Func, typename ArgType, int SizeOf=sizeof(ei_has_none)>
-struct ei_unary_result_of_select {typedef ArgType type;};
-
-template<typename Func, typename ArgType>
-struct ei_unary_result_of_select<Func, ArgType, sizeof(ei_has_std_result_type)> {typedef typename Func::result_type type;};
-
-template<typename Func, typename ArgType>
-struct ei_unary_result_of_select<Func, ArgType, sizeof(ei_has_tr1_result)> {typedef typename Func::template result<Func(ArgType)>::type type;};
-
-template<typename Func, typename ArgType>
-struct ei_result_of<Func(ArgType)> {
- template<typename T>
- static ei_has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
- template<typename T>
- static ei_has_tr1_result testFunctor(T const *, typename T::template result<T(ArgType)>::type const * = 0);
- static ei_has_none testFunctor(...);
-
- // note that the following indirection is needed for gcc-3.3
- enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
- typedef typename ei_unary_result_of_select<Func, ArgType, FunctorType>::type type;
-};
-
-template<typename Func, typename ArgType0, typename ArgType1, int SizeOf=sizeof(ei_has_none)>
-struct ei_binary_result_of_select {typedef ArgType0 type;};
-
-template<typename Func, typename ArgType0, typename ArgType1>
-struct ei_binary_result_of_select<Func, ArgType0, ArgType1, sizeof(ei_has_std_result_type)>
-{typedef typename Func::result_type type;};
-
-template<typename Func, typename ArgType0, typename ArgType1>
-struct ei_binary_result_of_select<Func, ArgType0, ArgType1, sizeof(ei_has_tr1_result)>
-{typedef typename Func::template result<Func(ArgType0,ArgType1)>::type type;};
-
-template<typename Func, typename ArgType0, typename ArgType1>
-struct ei_result_of<Func(ArgType0,ArgType1)> {
- template<typename T>
- static ei_has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
- template<typename T>
- static ei_has_tr1_result testFunctor(T const *, typename T::template result<T(ArgType0,ArgType1)>::type const * = 0);
- static ei_has_none testFunctor(...);
-
- // note that the following indirection is needed for gcc-3.3
- enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
- typedef typename ei_binary_result_of_select<Func, ArgType0, ArgType1, FunctorType>::type type;
-};
-
-/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer.
- * Usage example: \code ei_meta_sqrt<1023>::ret \endcode
- */
-template<int Y,
- int InfX = 0,
- int SupX = ((Y==1) ? 1 : Y/2),
- bool Done = ((SupX-InfX)<=1 ? true : ((SupX*SupX <= Y) && ((SupX+1)*(SupX+1) > Y))) >
- // use ?: instead of || just to shut up a stupid gcc 4.3 warning
-class ei_meta_sqrt
-{
- enum {
- MidX = (InfX+SupX)/2,
- TakeInf = MidX*MidX > Y ? 1 : 0,
- NewInf = int(TakeInf) ? InfX : int(MidX),
- NewSup = int(TakeInf) ? int(MidX) : SupX
- };
- public:
- enum { ret = ei_meta_sqrt<Y,NewInf,NewSup>::ret };
-};
-
-template<int Y, int InfX, int SupX>
-class ei_meta_sqrt<Y, InfX, SupX, true> { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; };
-
-/** \internal determines whether the product of two numeric types is allowed and what the return type is */
-template<typename T, typename U> struct ei_scalar_product_traits
-{
- // dummy general case where T and U aren't compatible -- not allowed anyway but we catch it elsewhere
- //enum { Cost = NumTraits<T>::MulCost };
- typedef T ReturnType;
-};
-
-template<typename T> struct ei_scalar_product_traits<T,T>
-{
- //enum { Cost = NumTraits<T>::MulCost };
- typedef T ReturnType;
-};
-
-template<typename T> struct ei_scalar_product_traits<T,std::complex<T> >
-{
- //enum { Cost = 2*NumTraits<T>::MulCost };
- typedef std::complex<T> ReturnType;
-};
-
-template<typename T> struct ei_scalar_product_traits<std::complex<T>, T>
-{
- //enum { Cost = 2*NumTraits<T>::MulCost };
- typedef std::complex<T> ReturnType;
-};
-
-// FIXME quick workaround around current limitation of ei_result_of
-template<typename Scalar, typename ArgType0, typename ArgType1>
-struct ei_result_of<ei_scalar_product_op<Scalar>(ArgType0,ArgType1)> {
-typedef typename ei_scalar_product_traits<typename ei_cleantype<ArgType0>::type, typename ei_cleantype<ArgType1>::type>::ReturnType type;
-};
-
-
-
-#endif // EIGEN_META_H
diff --git a/extern/Eigen2/Eigen/src/Core/util/XprHelper.h b/extern/Eigen2/Eigen/src/Core/util/XprHelper.h
deleted file mode 100644
index 12d6f9a3a3e..00000000000
--- a/extern/Eigen2/Eigen/src/Core/util/XprHelper.h
+++ /dev/null
@@ -1,219 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_XPRHELPER_H
-#define EIGEN_XPRHELPER_H
-
-// just a workaround because GCC seems to not really like empty structs
-#ifdef __GNUG__
- struct ei_empty_struct{char _ei_dummy_;};
- #define EIGEN_EMPTY_STRUCT : Eigen::ei_empty_struct
-#else
- #define EIGEN_EMPTY_STRUCT
-#endif
-
-//classes inheriting ei_no_assignment_operator don't generate a default operator=.
-class ei_no_assignment_operator
-{
- private:
- ei_no_assignment_operator& operator=(const ei_no_assignment_operator&);
-};
-
-/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around an int variable that
- * can be accessed using value() and setValue().
- * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
- */
-template<int Value> class ei_int_if_dynamic EIGEN_EMPTY_STRUCT
-{
- public:
- ei_int_if_dynamic() {}
- explicit ei_int_if_dynamic(int) {}
- static int value() { return Value; }
- void setValue(int) {}
-};
-
-template<> class ei_int_if_dynamic<Dynamic>
-{
- int m_value;
- ei_int_if_dynamic() {}
- public:
- explicit ei_int_if_dynamic(int value) : m_value(value) {}
- int value() const { return m_value; }
- void setValue(int value) { m_value = value; }
-};
-
-template<typename T> struct ei_functor_traits
-{
- enum
- {
- Cost = 10,
- PacketAccess = false
- };
-};
-
-template<typename T> struct ei_packet_traits
-{
- typedef T type;
- enum {size=1};
-};
-
-template<typename T> struct ei_unpacket_traits
-{
- typedef T type;
- enum {size=1};
-};
-
-template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
-class ei_compute_matrix_flags
-{
- enum {
- row_major_bit = Options&RowMajor ? RowMajorBit : 0,
- inner_max_size = row_major_bit ? MaxCols : MaxRows,
- is_big = inner_max_size == Dynamic,
- is_packet_size_multiple = (Cols*Rows) % ei_packet_traits<Scalar>::size == 0,
- aligned_bit = ((Options&AutoAlign) && (is_big || is_packet_size_multiple)) ? AlignedBit : 0,
- packet_access_bit = ei_packet_traits<Scalar>::size > 1 && aligned_bit ? PacketAccessBit : 0
- };
-
- public:
- enum { ret = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit | aligned_bit };
-};
-
-template<int _Rows, int _Cols> struct ei_size_at_compile_time
-{
- enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
-};
-
-/* ei_eval : the return type of eval(). For matrices, this is just a const reference
- * in order to avoid a useless copy
- */
-
-template<typename T, int Sparseness = ei_traits<T>::Flags&SparseBit> class ei_eval;
-
-template<typename T> struct ei_eval<T,IsDense>
-{
- typedef Matrix<typename ei_traits<T>::Scalar,
- ei_traits<T>::RowsAtCompileTime,
- ei_traits<T>::ColsAtCompileTime,
- AutoAlign | (ei_traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
- ei_traits<T>::MaxRowsAtCompileTime,
- ei_traits<T>::MaxColsAtCompileTime
- > type;
-};
-
-// for matrices, no need to evaluate, just use a const reference to avoid a useless copy
-template<typename _Scalar, int _Rows, int _Cols, int _StorageOrder, int _MaxRows, int _MaxCols>
-struct ei_eval<Matrix<_Scalar, _Rows, _Cols, _StorageOrder, _MaxRows, _MaxCols>, IsDense>
-{
- typedef const Matrix<_Scalar, _Rows, _Cols, _StorageOrder, _MaxRows, _MaxCols>& type;
-};
-
-/* ei_plain_matrix_type : the difference from ei_eval is that ei_plain_matrix_type is always a plain matrix type,
- * whereas ei_eval is a const reference in the case of a matrix
- */
-template<typename T> struct ei_plain_matrix_type
-{
- typedef Matrix<typename ei_traits<T>::Scalar,
- ei_traits<T>::RowsAtCompileTime,
- ei_traits<T>::ColsAtCompileTime,
- AutoAlign | (ei_traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
- ei_traits<T>::MaxRowsAtCompileTime,
- ei_traits<T>::MaxColsAtCompileTime
- > type;
-};
-
-/* ei_plain_matrix_type_column_major : same as ei_plain_matrix_type but guaranteed to be column-major
- */
-template<typename T> struct ei_plain_matrix_type_column_major
-{
- typedef Matrix<typename ei_traits<T>::Scalar,
- ei_traits<T>::RowsAtCompileTime,
- ei_traits<T>::ColsAtCompileTime,
- AutoAlign | ColMajor,
- ei_traits<T>::MaxRowsAtCompileTime,
- ei_traits<T>::MaxColsAtCompileTime
- > type;
-};
-
-template<typename T> struct ei_must_nest_by_value { enum { ret = false }; };
-template<typename T> struct ei_must_nest_by_value<NestByValue<T> > { enum { ret = true }; };
-
-/** \internal Determines how a given expression should be nested into another one.
- * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
- * nested into the bigger product expression. The choice is between nesting the expression b+c as-is, or
- * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
- * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
- * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
- *
- * \param T the type of the expression being nested
- * \param n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
- *
- * Example. Suppose that a, b, and c are of type Matrix3d. The user forms the expression a*(b+c).
- * b+c is an expression "sum of matrices", which we will denote by S. In order to determine how to nest it,
- * the Product expression uses: ei_nested<S, 3>::ret, which turns out to be Matrix3d because the internal logic of
- * ei_nested determined that in this case it was better to evaluate the expression b+c into a temporary. On the other hand,
- * since a is of type Matrix3d, the Product expression nests it as ei_nested<Matrix3d, 3>::ret, which turns out to be
- * const Matrix3d&, because the internal logic of ei_nested determined that since a was already a matrix, there was no point
- * in copying it into another matrix.
- */
-template<typename T, int n=1, typename PlainMatrixType = typename ei_eval<T>::type> struct ei_nested
-{
- enum {
- CostEval = (n+1) * int(NumTraits<typename ei_traits<T>::Scalar>::ReadCost),
- CostNoEval = (n-1) * int(ei_traits<T>::CoeffReadCost)
- };
- typedef typename ei_meta_if<
- ei_must_nest_by_value<T>::ret,
- T,
- typename ei_meta_if<
- (int(ei_traits<T>::Flags) & EvalBeforeNestingBit)
- || ( int(CostEval) <= int(CostNoEval) ),
- PlainMatrixType,
- const T&
- >::ret
- >::ret type;
-};
-
-template<unsigned int Flags> struct ei_are_flags_consistent
-{
- enum { ret = !( (Flags&UnitDiagBit && Flags&ZeroDiagBit) )
- };
-};
-
-/** \internal Gives the type of a sub-matrix or sub-vector of a matrix of type \a ExpressionType and size \a Size
- * TODO: could be a good idea to define a big ReturnType struct ??
- */
-template<typename ExpressionType, int RowsOrSize=Dynamic, int Cols=Dynamic> struct BlockReturnType {
- typedef Block<ExpressionType, (ei_traits<ExpressionType>::RowsAtCompileTime == 1 ? 1 : RowsOrSize),
- (ei_traits<ExpressionType>::ColsAtCompileTime == 1 ? 1 : RowsOrSize)> SubVectorType;
- typedef Block<ExpressionType, RowsOrSize, Cols> Type;
-};
-
-template<typename CurrentType, typename NewType> struct ei_cast_return_type
-{
- typedef typename ei_meta_if<ei_is_same_type<CurrentType,NewType>::ret,const CurrentType&,NewType>::ret type;
-};
-
-#endif // EIGEN_XPRHELPER_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/OrthoMethods.h b/extern/Eigen2/Eigen/src/Geometry/OrthoMethods.h
deleted file mode 100644
index 047152d0b99..00000000000
--- a/extern/Eigen2/Eigen/src/Geometry/OrthoMethods.h
+++ /dev/null
@@ -1,119 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_ORTHOMETHODS_H
-#define EIGEN_ORTHOMETHODS_H
-
-/** \geometry_module
- *
- * \returns the cross product of \c *this and \a other
- *
- * Here is a very good explanation of cross-product: http://xkcd.com/199/
- */
-template<typename Derived>
-template<typename OtherDerived>
-inline typename MatrixBase<Derived>::PlainMatrixType
-MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
-{
- EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3)
-
- // Note that there is no need for an expression here since the compiler
- // optimize such a small temporary very well (even within a complex expression)
- const typename ei_nested<Derived,2>::type lhs(derived());
- const typename ei_nested<OtherDerived,2>::type rhs(other.derived());
- return typename ei_plain_matrix_type<Derived>::type(
- lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1),
- lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2),
- lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)
- );
-}
-
-template<typename Derived, int Size = Derived::SizeAtCompileTime>
-struct ei_unitOrthogonal_selector
-{
- typedef typename ei_plain_matrix_type<Derived>::type VectorType;
- typedef typename ei_traits<Derived>::Scalar Scalar;
- typedef typename NumTraits<Scalar>::Real RealScalar;
- inline static VectorType run(const Derived& src)
- {
- VectorType perp(src.size());
- /* Let us compute the crossed product of *this with a vector
- * that is not too close to being colinear to *this.
- */
-
- /* unless the x and y coords are both close to zero, we can
- * simply take ( -y, x, 0 ) and normalize it.
- */
- if((!ei_isMuchSmallerThan(src.x(), src.z()))
- || (!ei_isMuchSmallerThan(src.y(), src.z())))
- {
- RealScalar invnm = RealScalar(1)/src.template start<2>().norm();
- perp.coeffRef(0) = -ei_conj(src.y())*invnm;
- perp.coeffRef(1) = ei_conj(src.x())*invnm;
- perp.coeffRef(2) = 0;
- }
- /* if both x and y are close to zero, then the vector is close
- * to the z-axis, so it's far from colinear to the x-axis for instance.
- * So we take the crossed product with (1,0,0) and normalize it.
- */
- else
- {
- RealScalar invnm = RealScalar(1)/src.template end<2>().norm();
- perp.coeffRef(0) = 0;
- perp.coeffRef(1) = -ei_conj(src.z())*invnm;
- perp.coeffRef(2) = ei_conj(src.y())*invnm;
- }
- if( (Derived::SizeAtCompileTime!=Dynamic && Derived::SizeAtCompileTime>3)
- || (Derived::SizeAtCompileTime==Dynamic && src.size()>3) )
- perp.end(src.size()-3).setZero();
-
- return perp;
- }
-};
-
-template<typename Derived>
-struct ei_unitOrthogonal_selector<Derived,2>
-{
- typedef typename ei_plain_matrix_type<Derived>::type VectorType;
- inline static VectorType run(const Derived& src)
- { return VectorType(-ei_conj(src.y()), ei_conj(src.x())).normalized(); }
-};
-
-/** \returns a unit vector which is orthogonal to \c *this
- *
- * The size of \c *this must be at least 2. If the size is exactly 2,
- * then the returned vector is a counter clock wise rotation of \c *this, i.e., (-y,x).normalized().
- *
- * \sa cross()
- */
-template<typename Derived>
-typename MatrixBase<Derived>::PlainMatrixType
-MatrixBase<Derived>::unitOrthogonal() const
-{
- EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- return ei_unitOrthogonal_selector<Derived>::run(derived());
-}
-
-#endif // EIGEN_ORTHOMETHODS_H
diff --git a/extern/Eigen2/Eigen/src/LU/Inverse.h b/extern/Eigen2/Eigen/src/LU/Inverse.h
deleted file mode 100644
index 3d4d6348949..00000000000
--- a/extern/Eigen2/Eigen/src/LU/Inverse.h
+++ /dev/null
@@ -1,258 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_INVERSE_H
-#define EIGEN_INVERSE_H
-
-/********************************************************************
-*** Part 1 : optimized implementations for fixed-size 2,3,4 cases ***
-********************************************************************/
-
-template<typename MatrixType>
-void ei_compute_inverse_in_size2_case(const MatrixType& matrix, MatrixType* result)
-{
- typedef typename MatrixType::Scalar Scalar;
- const Scalar invdet = Scalar(1) / matrix.determinant();
- result->coeffRef(0,0) = matrix.coeff(1,1) * invdet;
- result->coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
- result->coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
- result->coeffRef(1,1) = matrix.coeff(0,0) * invdet;
-}
-
-template<typename XprType, typename MatrixType>
-bool ei_compute_inverse_in_size2_case_with_check(const XprType& matrix, MatrixType* result)
-{
- typedef typename MatrixType::Scalar Scalar;
- const Scalar det = matrix.determinant();
- if(ei_isMuchSmallerThan(det, matrix.cwise().abs().maxCoeff())) return false;
- const Scalar invdet = Scalar(1) / det;
- result->coeffRef(0,0) = matrix.coeff(1,1) * invdet;
- result->coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
- result->coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
- result->coeffRef(1,1) = matrix.coeff(0,0) * invdet;
- return true;
-}
-
-template<typename MatrixType>
-void ei_compute_inverse_in_size3_case(const MatrixType& matrix, MatrixType* result)
-{
- typedef typename MatrixType::Scalar Scalar;
- const Scalar det_minor00 = matrix.minor(0,0).determinant();
- const Scalar det_minor10 = matrix.minor(1,0).determinant();
- const Scalar det_minor20 = matrix.minor(2,0).determinant();
- const Scalar invdet = Scalar(1) / ( det_minor00 * matrix.coeff(0,0)
- - det_minor10 * matrix.coeff(1,0)
- + det_minor20 * matrix.coeff(2,0) );
- result->coeffRef(0, 0) = det_minor00 * invdet;
- result->coeffRef(0, 1) = -det_minor10 * invdet;
- result->coeffRef(0, 2) = det_minor20 * invdet;
- result->coeffRef(1, 0) = -matrix.minor(0,1).determinant() * invdet;
- result->coeffRef(1, 1) = matrix.minor(1,1).determinant() * invdet;
- result->coeffRef(1, 2) = -matrix.minor(2,1).determinant() * invdet;
- result->coeffRef(2, 0) = matrix.minor(0,2).determinant() * invdet;
- result->coeffRef(2, 1) = -matrix.minor(1,2).determinant() * invdet;
- result->coeffRef(2, 2) = matrix.minor(2,2).determinant() * invdet;
-}
-
-template<typename MatrixType>
-bool ei_compute_inverse_in_size4_case_helper(const MatrixType& matrix, MatrixType* result)
-{
- /* Let's split M into four 2x2 blocks:
- * (P Q)
- * (R S)
- * If P is invertible, with inverse denoted by P_inverse, and if
- * (S - R*P_inverse*Q) is also invertible, then the inverse of M is
- * (P' Q')
- * (R' S')
- * where
- * S' = (S - R*P_inverse*Q)^(-1)
- * P' = P1 + (P1*Q) * S' *(R*P_inverse)
- * Q' = -(P_inverse*Q) * S'
- * R' = -S' * (R*P_inverse)
- */
- typedef Block<MatrixType,2,2> XprBlock22;
- typedef typename MatrixBase<XprBlock22>::PlainMatrixType Block22;
- Block22 P_inverse;
- if(ei_compute_inverse_in_size2_case_with_check(matrix.template block<2,2>(0,0), &P_inverse))
- {
- const Block22 Q = matrix.template block<2,2>(0,2);
- const Block22 P_inverse_times_Q = P_inverse * Q;
- const XprBlock22 R = matrix.template block<2,2>(2,0);
- const Block22 R_times_P_inverse = R * P_inverse;
- const Block22 R_times_P_inverse_times_Q = R_times_P_inverse * Q;
- const XprBlock22 S = matrix.template block<2,2>(2,2);
- const Block22 X = S - R_times_P_inverse_times_Q;
- Block22 Y;
- ei_compute_inverse_in_size2_case(X, &Y);
- result->template block<2,2>(2,2) = Y;
- result->template block<2,2>(2,0) = - Y * R_times_P_inverse;
- const Block22 Z = P_inverse_times_Q * Y;
- result->template block<2,2>(0,2) = - Z;
- result->template block<2,2>(0,0) = P_inverse + Z * R_times_P_inverse;
- return true;
- }
- else
- {
- return false;
- }
-}
-
-template<typename MatrixType>
-void ei_compute_inverse_in_size4_case(const MatrixType& matrix, MatrixType* result)
-{
- if(ei_compute_inverse_in_size4_case_helper(matrix, result))
- {
- // good ! The topleft 2x2 block was invertible, so the 2x2 blocks approach is successful.
- return;
- }
- else
- {
- // rare case: the topleft 2x2 block is not invertible (but the matrix itself is assumed to be).
- // since this is a rare case, we don't need to optimize it. We just want to handle it with little
- // additional code.
- MatrixType m(matrix);
- m.row(0).swap(m.row(2));
- m.row(1).swap(m.row(3));
- if(ei_compute_inverse_in_size4_case_helper(m, result))
- {
- // good, the topleft 2x2 block of m is invertible. Since m is different from matrix in that some
- // rows were permuted, the actual inverse of matrix is derived from the inverse of m by permuting
- // the corresponding columns.
- result->col(0).swap(result->col(2));
- result->col(1).swap(result->col(3));
- }
- else
- {
- // last possible case. Since matrix is assumed to be invertible, this last case has to work.
- // first, undo the swaps previously made
- m.row(0).swap(m.row(2));
- m.row(1).swap(m.row(3));
- // swap row 0 with the the row among 0 and 1 that has the biggest 2 first coeffs
- int swap0with = ei_abs(m.coeff(0,0))+ei_abs(m.coeff(0,1))>ei_abs(m.coeff(1,0))+ei_abs(m.coeff(1,1)) ? 0 : 1;
- m.row(0).swap(m.row(swap0with));
- // swap row 1 with the the row among 2 and 3 that has the biggest 2 first coeffs
- int swap1with = ei_abs(m.coeff(2,0))+ei_abs(m.coeff(2,1))>ei_abs(m.coeff(3,0))+ei_abs(m.coeff(3,1)) ? 2 : 3;
- m.row(1).swap(m.row(swap1with));
- ei_compute_inverse_in_size4_case_helper(m, result);
- result->col(1).swap(result->col(swap1with));
- result->col(0).swap(result->col(swap0with));
- }
- }
-}
-
-/***********************************************
-*** Part 2 : selector and MatrixBase methods ***
-***********************************************/
-
-template<typename MatrixType, int Size = MatrixType::RowsAtCompileTime>
-struct ei_compute_inverse
-{
- static inline void run(const MatrixType& matrix, MatrixType* result)
- {
- LU<MatrixType> lu(matrix);
- lu.computeInverse(result);
- }
-};
-
-template<typename MatrixType>
-struct ei_compute_inverse<MatrixType, 1>
-{
- static inline void run(const MatrixType& matrix, MatrixType* result)
- {
- typedef typename MatrixType::Scalar Scalar;
- result->coeffRef(0,0) = Scalar(1) / matrix.coeff(0,0);
- }
-};
-
-template<typename MatrixType>
-struct ei_compute_inverse<MatrixType, 2>
-{
- static inline void run(const MatrixType& matrix, MatrixType* result)
- {
- ei_compute_inverse_in_size2_case(matrix, result);
- }
-};
-
-template<typename MatrixType>
-struct ei_compute_inverse<MatrixType, 3>
-{
- static inline void run(const MatrixType& matrix, MatrixType* result)
- {
- ei_compute_inverse_in_size3_case(matrix, result);
- }
-};
-
-template<typename MatrixType>
-struct ei_compute_inverse<MatrixType, 4>
-{
- static inline void run(const MatrixType& matrix, MatrixType* result)
- {
- ei_compute_inverse_in_size4_case(matrix, result);
- }
-};
-
-/** \lu_module
- *
- * Computes the matrix inverse of this matrix.
- *
- * \note This matrix must be invertible, otherwise the result is undefined.
- *
- * \param result Pointer to the matrix in which to store the result.
- *
- * Example: \include MatrixBase_computeInverse.cpp
- * Output: \verbinclude MatrixBase_computeInverse.out
- *
- * \sa inverse()
- */
-template<typename Derived>
-inline void MatrixBase<Derived>::computeInverse(PlainMatrixType *result) const
-{
- ei_assert(rows() == cols());
- EIGEN_STATIC_ASSERT(NumTraits<Scalar>::HasFloatingPoint,NUMERIC_TYPE_MUST_BE_FLOATING_POINT)
- ei_compute_inverse<PlainMatrixType>::run(eval(), result);
-}
-
-/** \lu_module
- *
- * \returns the matrix inverse of this matrix.
- *
- * \note This matrix must be invertible, otherwise the result is undefined.
- *
- * \note This method returns a matrix by value, which can be inefficient. To avoid that overhead,
- * use computeInverse() instead.
- *
- * Example: \include MatrixBase_inverse.cpp
- * Output: \verbinclude MatrixBase_inverse.out
- *
- * \sa computeInverse()
- */
-template<typename Derived>
-inline const typename MatrixBase<Derived>::PlainMatrixType MatrixBase<Derived>::inverse() const
-{
- PlainMatrixType result(rows(), cols());
- computeInverse(&result);
- return result;
-}
-
-#endif // EIGEN_INVERSE_H
diff --git a/extern/Eigen2/Eigen/src/LU/LU.h b/extern/Eigen2/Eigen/src/LU/LU.h
deleted file mode 100644
index 176e76a91a3..00000000000
--- a/extern/Eigen2/Eigen/src/LU/LU.h
+++ /dev/null
@@ -1,541 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_LU_H
-#define EIGEN_LU_H
-
-/** \ingroup LU_Module
- *
- * \class LU
- *
- * \brief LU decomposition of a matrix with complete pivoting, and related features
- *
- * \param MatrixType the type of the matrix of which we are computing the LU decomposition
- *
- * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A
- * is decomposed as A = PLUQ where L is unit-lower-triangular, U is upper-triangular, and P and Q
- * are permutation matrices. This is a rank-revealing LU decomposition. The eigenvalues (diagonal
- * coefficients) of U are sorted in such a way that any zeros are at the end, so that the rank
- * of A is the index of the first zero on the diagonal of U (with indices starting at 0) if any.
- *
- * This decomposition provides the generic approach to solving systems of linear equations, computing
- * the rank, invertibility, inverse, kernel, and determinant.
- *
- * This LU decomposition is very stable and well tested with large matrices. Even exact rank computation
- * works at sizes larger than 1000x1000. However there are use cases where the SVD decomposition is inherently
- * more stable when dealing with numerically damaged input. For example, computing the kernel is more stable with
- * SVD because the SVD can determine which singular values are negligible while LU has to work at the level of matrix
- * coefficients that are less meaningful in this respect.
- *
- * The data of the LU decomposition can be directly accessed through the methods matrixLU(),
- * permutationP(), permutationQ().
- *
- * As an exemple, here is how the original matrix can be retrieved:
- * \include class_LU.cpp
- * Output: \verbinclude class_LU.out
- *
- * \sa MatrixBase::lu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse()
- */
-template<typename MatrixType> class LU
-{
- public:
-
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
- typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
- typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType;
- typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVectorType;
-
- enum { MaxSmallDimAtCompileTime = EIGEN_ENUM_MIN(
- MatrixType::MaxColsAtCompileTime,
- MatrixType::MaxRowsAtCompileTime)
- };
-
- typedef Matrix<typename MatrixType::Scalar,
- MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix" is the number of cols of the original matrix
- // so that the product "matrix * kernel = zero" makes sense
- Dynamic, // we don't know at compile-time the dimension of the kernel
- MatrixType::Options,
- MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter
- MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space, whose dimension is the number
- // of columns of the original matrix
- > KernelResultType;
-
- typedef Matrix<typename MatrixType::Scalar,
- MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose dimension is the number
- // of rows of the original matrix
- Dynamic, // we don't know at compile time the dimension of the image (the rank)
- MatrixType::Options,
- MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix,
- MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns.
- > ImageResultType;
-
- /** Constructor.
- *
- * \param matrix the matrix of which to compute the LU decomposition.
- */
- LU(const MatrixType& matrix);
-
- /** \returns the LU decomposition matrix: the upper-triangular part is U, the
- * unit-lower-triangular part is L (at least for square matrices; in the non-square
- * case, special care is needed, see the documentation of class LU).
- *
- * \sa matrixL(), matrixU()
- */
- inline const MatrixType& matrixLU() const
- {
- return m_lu;
- }
-
- /** \returns a vector of integers, whose size is the number of rows of the matrix being decomposed,
- * representing the P permutation i.e. the permutation of the rows. For its precise meaning,
- * see the examples given in the documentation of class LU.
- *
- * \sa permutationQ()
- */
- inline const IntColVectorType& permutationP() const
- {
- return m_p;
- }
-
- /** \returns a vector of integers, whose size is the number of columns of the matrix being
- * decomposed, representing the Q permutation i.e. the permutation of the columns.
- * For its precise meaning, see the examples given in the documentation of class LU.
- *
- * \sa permutationP()
- */
- inline const IntRowVectorType& permutationQ() const
- {
- return m_q;
- }
-
- /** Computes a basis of the kernel of the matrix, also called the null-space of the matrix.
- *
- * \note This method is only allowed on non-invertible matrices, as determined by
- * isInvertible(). Calling it on an invertible matrix will make an assertion fail.
- *
- * \param result a pointer to the matrix in which to store the kernel. The columns of this
- * matrix will be set to form a basis of the kernel (it will be resized
- * if necessary).
- *
- * Example: \include LU_computeKernel.cpp
- * Output: \verbinclude LU_computeKernel.out
- *
- * \sa kernel(), computeImage(), image()
- */
- template<typename KernelMatrixType>
- void computeKernel(KernelMatrixType *result) const;
-
- /** Computes a basis of the image of the matrix, also called the column-space or range of he matrix.
- *
- * \note Calling this method on the zero matrix will make an assertion fail.
- *
- * \param result a pointer to the matrix in which to store the image. The columns of this
- * matrix will be set to form a basis of the image (it will be resized
- * if necessary).
- *
- * Example: \include LU_computeImage.cpp
- * Output: \verbinclude LU_computeImage.out
- *
- * \sa image(), computeKernel(), kernel()
- */
- template<typename ImageMatrixType>
- void computeImage(ImageMatrixType *result) const;
-
- /** \returns the kernel of the matrix, also called its null-space. The columns of the returned matrix
- * will form a basis of the kernel.
- *
- * \note: this method is only allowed on non-invertible matrices, as determined by
- * isInvertible(). Calling it on an invertible matrix will make an assertion fail.
- *
- * \note: this method returns a matrix by value, which induces some inefficiency.
- * If you prefer to avoid this overhead, use computeKernel() instead.
- *
- * Example: \include LU_kernel.cpp
- * Output: \verbinclude LU_kernel.out
- *
- * \sa computeKernel(), image()
- */
- const KernelResultType kernel() const;
-
- /** \returns the image of the matrix, also called its column-space. The columns of the returned matrix
- * will form a basis of the kernel.
- *
- * \note: Calling this method on the zero matrix will make an assertion fail.
- *
- * \note: this method returns a matrix by value, which induces some inefficiency.
- * If you prefer to avoid this overhead, use computeImage() instead.
- *
- * Example: \include LU_image.cpp
- * Output: \verbinclude LU_image.out
- *
- * \sa computeImage(), kernel()
- */
- const ImageResultType image() const;
-
- /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
- * *this is the LU decomposition, if any exists.
- *
- * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix,
- * the only requirement in order for the equation to make sense is that
- * b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.
- * \param result a pointer to the vector or matrix in which to store the solution, if any exists.
- * Resized if necessary, so that result->rows()==A.cols() and result->cols()==b.cols().
- * If no solution exists, *result is left with undefined coefficients.
- *
- * \returns true if any solution exists, false if no solution exists.
- *
- * \note If there exist more than one solution, this method will arbitrarily choose one.
- * If you need a complete analysis of the space of solutions, take the one solution obtained
- * by this method and add to it elements of the kernel, as determined by kernel().
- *
- * Example: \include LU_solve.cpp
- * Output: \verbinclude LU_solve.out
- *
- * \sa MatrixBase::solveTriangular(), kernel(), computeKernel(), inverse(), computeInverse()
- */
- template<typename OtherDerived, typename ResultType>
- bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const;
-
- /** \returns the determinant of the matrix of which
- * *this is the LU decomposition. It has only linear complexity
- * (that is, O(n) where n is the dimension of the square matrix)
- * as the LU decomposition has already been computed.
- *
- * \note This is only for square matrices.
- *
- * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers
- * optimized paths.
- *
- * \warning a determinant can be very big or small, so for matrices
- * of large enough dimension, there is a risk of overflow/underflow.
- *
- * \sa MatrixBase::determinant()
- */
- typename ei_traits<MatrixType>::Scalar determinant() const;
-
- /** \returns the rank of the matrix of which *this is the LU decomposition.
- *
- * \note This is computed at the time of the construction of the LU decomposition. This
- * method does not perform any further computation.
- */
- inline int rank() const
- {
- return m_rank;
- }
-
- /** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition.
- *
- * \note Since the rank is computed at the time of the construction of the LU decomposition, this
- * method almost does not perform any further computation.
- */
- inline int dimensionOfKernel() const
- {
- return m_lu.cols() - m_rank;
- }
-
- /** \returns true if the matrix of which *this is the LU decomposition represents an injective
- * linear map, i.e. has trivial kernel; false otherwise.
- *
- * \note Since the rank is computed at the time of the construction of the LU decomposition, this
- * method almost does not perform any further computation.
- */
- inline bool isInjective() const
- {
- return m_rank == m_lu.cols();
- }
-
- /** \returns true if the matrix of which *this is the LU decomposition represents a surjective
- * linear map; false otherwise.
- *
- * \note Since the rank is computed at the time of the construction of the LU decomposition, this
- * method almost does not perform any further computation.
- */
- inline bool isSurjective() const
- {
- return m_rank == m_lu.rows();
- }
-
- /** \returns true if the matrix of which *this is the LU decomposition is invertible.
- *
- * \note Since the rank is computed at the time of the construction of the LU decomposition, this
- * method almost does not perform any further computation.
- */
- inline bool isInvertible() const
- {
- return isInjective() && isSurjective();
- }
-
- /** Computes the inverse of the matrix of which *this is the LU decomposition.
- *
- * \param result a pointer to the matrix into which to store the inverse. Resized if needed.
- *
- * \note If this matrix is not invertible, *result is left with undefined coefficients.
- * Use isInvertible() to first determine whether this matrix is invertible.
- *
- * \sa MatrixBase::computeInverse(), inverse()
- */
- inline void computeInverse(MatrixType *result) const
- {
- solve(MatrixType::Identity(m_lu.rows(), m_lu.cols()), result);
- }
-
- /** \returns the inverse of the matrix of which *this is the LU decomposition.
- *
- * \note If this matrix is not invertible, the returned matrix has undefined coefficients.
- * Use isInvertible() to first determine whether this matrix is invertible.
- *
- * \sa computeInverse(), MatrixBase::inverse()
- */
- inline MatrixType inverse() const
- {
- MatrixType result;
- computeInverse(&result);
- return result;
- }
-
- protected:
- const MatrixType& m_originalMatrix;
- MatrixType m_lu;
- IntColVectorType m_p;
- IntRowVectorType m_q;
- int m_det_pq;
- int m_rank;
- RealScalar m_precision;
-};
-
-template<typename MatrixType>
-LU<MatrixType>::LU(const MatrixType& matrix)
- : m_originalMatrix(matrix),
- m_lu(matrix),
- m_p(matrix.rows()),
- m_q(matrix.cols())
-{
- const int size = matrix.diagonal().size();
- const int rows = matrix.rows();
- const int cols = matrix.cols();
-
- // this formula comes from experimenting (see "LU precision tuning" thread on the list)
- // and turns out to be identical to Higham's formula used already in LDLt.
- m_precision = machine_epsilon<Scalar>() * size;
-
- IntColVectorType rows_transpositions(matrix.rows());
- IntRowVectorType cols_transpositions(matrix.cols());
- int number_of_transpositions = 0;
-
- RealScalar biggest = RealScalar(0);
- m_rank = size;
- for(int k = 0; k < size; ++k)
- {
- int row_of_biggest_in_corner, col_of_biggest_in_corner;
- RealScalar biggest_in_corner;
-
- biggest_in_corner = m_lu.corner(Eigen::BottomRight, rows-k, cols-k)
- .cwise().abs()
- .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);
- row_of_biggest_in_corner += k;
- col_of_biggest_in_corner += k;
- if(k==0) biggest = biggest_in_corner;
-
- // if the corner is negligible, then we have less than full rank, and we can finish early
- if(ei_isMuchSmallerThan(biggest_in_corner, biggest, m_precision))
- {
- m_rank = k;
- for(int i = k; i < size; i++)
- {
- rows_transpositions.coeffRef(i) = i;
- cols_transpositions.coeffRef(i) = i;
- }
- break;
- }
-
- rows_transpositions.coeffRef(k) = row_of_biggest_in_corner;
- cols_transpositions.coeffRef(k) = col_of_biggest_in_corner;
- if(k != row_of_biggest_in_corner) {
- m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner));
- ++number_of_transpositions;
- }
- if(k != col_of_biggest_in_corner) {
- m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner));
- ++number_of_transpositions;
- }
- if(k<rows-1)
- m_lu.col(k).end(rows-k-1) /= m_lu.coeff(k,k);
- if(k<size-1)
- for(int col = k + 1; col < cols; ++col)
- m_lu.col(col).end(rows-k-1) -= m_lu.col(k).end(rows-k-1) * m_lu.coeff(k,col);
- }
-
- for(int k = 0; k < matrix.rows(); ++k) m_p.coeffRef(k) = k;
- for(int k = size-1; k >= 0; --k)
- std::swap(m_p.coeffRef(k), m_p.coeffRef(rows_transpositions.coeff(k)));
-
- for(int k = 0; k < matrix.cols(); ++k) m_q.coeffRef(k) = k;
- for(int k = 0; k < size; ++k)
- std::swap(m_q.coeffRef(k), m_q.coeffRef(cols_transpositions.coeff(k)));
-
- m_det_pq = (number_of_transpositions%2) ? -1 : 1;
-}
-
-template<typename MatrixType>
-typename ei_traits<MatrixType>::Scalar LU<MatrixType>::determinant() const
-{
- return Scalar(m_det_pq) * m_lu.diagonal().redux(ei_scalar_product_op<Scalar>());
-}
-
-template<typename MatrixType>
-template<typename KernelMatrixType>
-void LU<MatrixType>::computeKernel(KernelMatrixType *result) const
-{
- ei_assert(!isInvertible());
- const int dimker = dimensionOfKernel(), cols = m_lu.cols();
- result->resize(cols, dimker);
-
- /* Let us use the following lemma:
- *
- * Lemma: If the matrix A has the LU decomposition PAQ = LU,
- * then Ker A = Q(Ker U).
- *
- * Proof: trivial: just keep in mind that P, Q, L are invertible.
- */
-
- /* Thus, all we need to do is to compute Ker U, and then apply Q.
- *
- * U is upper triangular, with eigenvalues sorted so that any zeros appear at the end.
- * Thus, the diagonal of U ends with exactly
- * m_dimKer zero's. Let us use that to construct m_dimKer linearly
- * independent vectors in Ker U.
- */
-
- Matrix<Scalar, Dynamic, Dynamic, MatrixType::Options,
- MatrixType::MaxColsAtCompileTime, MatrixType::MaxColsAtCompileTime>
- y(-m_lu.corner(TopRight, m_rank, dimker));
-
- m_lu.corner(TopLeft, m_rank, m_rank)
- .template marked<UpperTriangular>()
- .solveTriangularInPlace(y);
-
- for(int i = 0; i < m_rank; ++i) result->row(m_q.coeff(i)) = y.row(i);
- for(int i = m_rank; i < cols; ++i) result->row(m_q.coeff(i)).setZero();
- for(int k = 0; k < dimker; ++k) result->coeffRef(m_q.coeff(m_rank+k), k) = Scalar(1);
-}
-
-template<typename MatrixType>
-const typename LU<MatrixType>::KernelResultType
-LU<MatrixType>::kernel() const
-{
- KernelResultType result(m_lu.cols(), dimensionOfKernel());
- computeKernel(&result);
- return result;
-}
-
-template<typename MatrixType>
-template<typename ImageMatrixType>
-void LU<MatrixType>::computeImage(ImageMatrixType *result) const
-{
- ei_assert(m_rank > 0);
- result->resize(m_originalMatrix.rows(), m_rank);
- for(int i = 0; i < m_rank; ++i)
- result->col(i) = m_originalMatrix.col(m_q.coeff(i));
-}
-
-template<typename MatrixType>
-const typename LU<MatrixType>::ImageResultType
-LU<MatrixType>::image() const
-{
- ImageResultType result(m_originalMatrix.rows(), m_rank);
- computeImage(&result);
- return result;
-}
-
-template<typename MatrixType>
-template<typename OtherDerived, typename ResultType>
-bool LU<MatrixType>::solve(
- const MatrixBase<OtherDerived>& b,
- ResultType *result
-) const
-{
- /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}.
- * So we proceed as follows:
- * Step 1: compute c = Pb.
- * Step 2: replace c by the solution x to Lx = c. Exists because L is invertible.
- * Step 3: replace c by the solution x to Ux = c. Check if a solution really exists.
- * Step 4: result = Qc;
- */
-
- const int rows = m_lu.rows(), cols = m_lu.cols();
- ei_assert(b.rows() == rows);
- const int smalldim = std::min(rows, cols);
-
- typename OtherDerived::PlainMatrixType c(b.rows(), b.cols());
-
- // Step 1
- for(int i = 0; i < rows; ++i) c.row(m_p.coeff(i)) = b.row(i);
-
- // Step 2
- m_lu.corner(Eigen::TopLeft,smalldim,smalldim).template marked<UnitLowerTriangular>()
- .solveTriangularInPlace(
- c.corner(Eigen::TopLeft, smalldim, c.cols()));
- if(rows>cols)
- {
- c.corner(Eigen::BottomLeft, rows-cols, c.cols())
- -= m_lu.corner(Eigen::BottomLeft, rows-cols, cols) * c.corner(Eigen::TopLeft, cols, c.cols());
- }
-
- // Step 3
- if(!isSurjective())
- {
- // is c is in the image of U ?
- RealScalar biggest_in_c = m_rank>0 ? c.corner(TopLeft, m_rank, c.cols()).cwise().abs().maxCoeff() : 0;
- for(int col = 0; col < c.cols(); ++col)
- for(int row = m_rank; row < c.rows(); ++row)
- if(!ei_isMuchSmallerThan(c.coeff(row,col), biggest_in_c, m_precision))
- return false;
- }
- m_lu.corner(TopLeft, m_rank, m_rank)
- .template marked<UpperTriangular>()
- .solveTriangularInPlace(c.corner(TopLeft, m_rank, c.cols()));
-
- // Step 4
- result->resize(m_lu.cols(), b.cols());
- for(int i = 0; i < m_rank; ++i) result->row(m_q.coeff(i)) = c.row(i);
- for(int i = m_rank; i < m_lu.cols(); ++i) result->row(m_q.coeff(i)).setZero();
- return true;
-}
-
-/** \lu_module
- *
- * \return the LU decomposition of \c *this.
- *
- * \sa class LU
- */
-template<typename Derived>
-inline const LU<typename MatrixBase<Derived>::PlainMatrixType>
-MatrixBase<Derived>::lu() const
-{
- return LU<PlainMatrixType>(eval());
-}
-
-#endif // EIGEN_LU_H
diff --git a/extern/Eigen2/Eigen/src/QR/EigenSolver.h b/extern/Eigen2/Eigen/src/QR/EigenSolver.h
deleted file mode 100644
index 70f21cebcdb..00000000000
--- a/extern/Eigen2/Eigen/src/QR/EigenSolver.h
+++ /dev/null
@@ -1,722 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_EIGENSOLVER_H
-#define EIGEN_EIGENSOLVER_H
-
-/** \ingroup QR_Module
- * \nonstableyet
- *
- * \class EigenSolver
- *
- * \brief Eigen values/vectors solver for non selfadjoint matrices
- *
- * \param MatrixType the type of the matrix of which we are computing the eigen decomposition
- *
- * Currently it only support real matrices.
- *
- * \note this code was adapted from JAMA (public domain)
- *
- * \sa MatrixBase::eigenvalues(), SelfAdjointEigenSolver
- */
-template<typename _MatrixType> class EigenSolver
-{
- public:
-
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef std::complex<RealScalar> Complex;
- typedef Matrix<Complex, MatrixType::ColsAtCompileTime, 1> EigenvalueType;
- typedef Matrix<Complex, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime> EigenvectorType;
- typedef Matrix<RealScalar, MatrixType::ColsAtCompileTime, 1> RealVectorType;
- typedef Matrix<RealScalar, Dynamic, 1> RealVectorTypeX;
-
- /**
- * \brief Default Constructor.
- *
- * The default constructor is useful in cases in which the user intends to
- * perform decompositions via EigenSolver::compute(const MatrixType&).
- */
- EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false) {}
-
- EigenSolver(const MatrixType& matrix)
- : m_eivec(matrix.rows(), matrix.cols()),
- m_eivalues(matrix.cols()),
- m_isInitialized(false)
- {
- compute(matrix);
- }
-
-
- EigenvectorType eigenvectors(void) const;
-
- /** \returns a real matrix V of pseudo eigenvectors.
- *
- * Let D be the block diagonal matrix with the real eigenvalues in 1x1 blocks,
- * and any complex values u+iv in 2x2 blocks [u v ; -v u]. Then, the matrices D
- * and V satisfy A*V = V*D.
- *
- * More precisely, if the diagonal matrix of the eigen values is:\n
- * \f$
- * \left[ \begin{array}{cccccc}
- * u+iv & & & & & \\
- * & u-iv & & & & \\
- * & & a+ib & & & \\
- * & & & a-ib & & \\
- * & & & & x & \\
- * & & & & & y \\
- * \end{array} \right]
- * \f$ \n
- * then, we have:\n
- * \f$
- * D =\left[ \begin{array}{cccccc}
- * u & v & & & & \\
- * -v & u & & & & \\
- * & & a & b & & \\
- * & & -b & a & & \\
- * & & & & x & \\
- * & & & & & y \\
- * \end{array} \right]
- * \f$
- *
- * \sa pseudoEigenvalueMatrix()
- */
- const MatrixType& pseudoEigenvectors() const
- {
- ei_assert(m_isInitialized && "EigenSolver is not initialized.");
- return m_eivec;
- }
-
- MatrixType pseudoEigenvalueMatrix() const;
-
- /** \returns the eigenvalues as a column vector */
- EigenvalueType eigenvalues() const
- {
- ei_assert(m_isInitialized && "EigenSolver is not initialized.");
- return m_eivalues;
- }
-
- void compute(const MatrixType& matrix);
-
- private:
-
- void orthes(MatrixType& matH, RealVectorType& ort);
- void hqr2(MatrixType& matH);
-
- protected:
- MatrixType m_eivec;
- EigenvalueType m_eivalues;
- bool m_isInitialized;
-};
-
-/** \returns the real block diagonal matrix D of the eigenvalues.
- *
- * See pseudoEigenvectors() for the details.
- */
-template<typename MatrixType>
-MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
-{
- ei_assert(m_isInitialized && "EigenSolver is not initialized.");
- int n = m_eivec.cols();
- MatrixType matD = MatrixType::Zero(n,n);
- for (int i=0; i<n; ++i)
- {
- if (ei_isMuchSmallerThan(ei_imag(m_eivalues.coeff(i)), ei_real(m_eivalues.coeff(i))))
- matD.coeffRef(i,i) = ei_real(m_eivalues.coeff(i));
- else
- {
- matD.template block<2,2>(i,i) << ei_real(m_eivalues.coeff(i)), ei_imag(m_eivalues.coeff(i)),
- -ei_imag(m_eivalues.coeff(i)), ei_real(m_eivalues.coeff(i));
- ++i;
- }
- }
- return matD;
-}
-
-/** \returns the normalized complex eigenvectors as a matrix of column vectors.
- *
- * \sa eigenvalues(), pseudoEigenvectors()
- */
-template<typename MatrixType>
-typename EigenSolver<MatrixType>::EigenvectorType EigenSolver<MatrixType>::eigenvectors(void) const
-{
- ei_assert(m_isInitialized && "EigenSolver is not initialized.");
- int n = m_eivec.cols();
- EigenvectorType matV(n,n);
- for (int j=0; j<n; ++j)
- {
- if (ei_isMuchSmallerThan(ei_abs(ei_imag(m_eivalues.coeff(j))), ei_abs(ei_real(m_eivalues.coeff(j)))))
- {
- // we have a real eigen value
- matV.col(j) = m_eivec.col(j).template cast<Complex>();
- }
- else
- {
- // we have a pair of complex eigen values
- for (int i=0; i<n; ++i)
- {
- matV.coeffRef(i,j) = Complex(m_eivec.coeff(i,j), m_eivec.coeff(i,j+1));
- matV.coeffRef(i,j+1) = Complex(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
- }
- matV.col(j).normalize();
- matV.col(j+1).normalize();
- ++j;
- }
- }
- return matV;
-}
-
-template<typename MatrixType>
-void EigenSolver<MatrixType>::compute(const MatrixType& matrix)
-{
- assert(matrix.cols() == matrix.rows());
- int n = matrix.cols();
- m_eivalues.resize(n,1);
-
- MatrixType matH = matrix;
- RealVectorType ort(n);
-
- // Reduce to Hessenberg form.
- orthes(matH, ort);
-
- // Reduce Hessenberg to real Schur form.
- hqr2(matH);
-
- m_isInitialized = true;
-}
-
-// Nonsymmetric reduction to Hessenberg form.
-template<typename MatrixType>
-void EigenSolver<MatrixType>::orthes(MatrixType& matH, RealVectorType& ort)
-{
- // This is derived from the Algol procedures orthes and ortran,
- // by Martin and Wilkinson, Handbook for Auto. Comp.,
- // Vol.ii-Linear Algebra, and the corresponding
- // Fortran subroutines in EISPACK.
-
- int n = m_eivec.cols();
- int low = 0;
- int high = n-1;
-
- for (int m = low+1; m <= high-1; ++m)
- {
- // Scale column.
- RealScalar scale = matH.block(m, m-1, high-m+1, 1).cwise().abs().sum();
- if (scale != 0.0)
- {
- // Compute Householder transformation.
- RealScalar h = 0.0;
- // FIXME could be rewritten, but this one looks better wrt cache
- for (int i = high; i >= m; i--)
- {
- ort.coeffRef(i) = matH.coeff(i,m-1)/scale;
- h += ort.coeff(i) * ort.coeff(i);
- }
- RealScalar g = ei_sqrt(h);
- if (ort.coeff(m) > 0)
- g = -g;
- h = h - ort.coeff(m) * g;
- ort.coeffRef(m) = ort.coeff(m) - g;
-
- // Apply Householder similarity transformation
- // H = (I-u*u'/h)*H*(I-u*u')/h)
- int bSize = high-m+1;
- matH.block(m, m, bSize, n-m) -= ((ort.segment(m, bSize)/h)
- * (ort.segment(m, bSize).transpose() * matH.block(m, m, bSize, n-m)).lazy()).lazy();
-
- matH.block(0, m, high+1, bSize) -= ((matH.block(0, m, high+1, bSize) * ort.segment(m, bSize)).lazy()
- * (ort.segment(m, bSize)/h).transpose()).lazy();
-
- ort.coeffRef(m) = scale*ort.coeff(m);
- matH.coeffRef(m,m-1) = scale*g;
- }
- }
-
- // Accumulate transformations (Algol's ortran).
- m_eivec.setIdentity();
-
- for (int m = high-1; m >= low+1; m--)
- {
- if (matH.coeff(m,m-1) != 0.0)
- {
- ort.segment(m+1, high-m) = matH.col(m-1).segment(m+1, high-m);
-
- int bSize = high-m+1;
- m_eivec.block(m, m, bSize, bSize) += ( (ort.segment(m, bSize) / (matH.coeff(m,m-1) * ort.coeff(m) ) )
- * (ort.segment(m, bSize).transpose() * m_eivec.block(m, m, bSize, bSize)).lazy());
- }
- }
-}
-
-// Complex scalar division.
-template<typename Scalar>
-std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
-{
- Scalar r,d;
- if (ei_abs(yr) > ei_abs(yi))
- {
- r = yi/yr;
- d = yr + r*yi;
- return std::complex<Scalar>((xr + r*xi)/d, (xi - r*xr)/d);
- }
- else
- {
- r = yr/yi;
- d = yi + r*yr;
- return std::complex<Scalar>((r*xr + xi)/d, (r*xi - xr)/d);
- }
-}
-
-
-// Nonsymmetric reduction from Hessenberg to real Schur form.
-template<typename MatrixType>
-void EigenSolver<MatrixType>::hqr2(MatrixType& matH)
-{
- // This is derived from the Algol procedure hqr2,
- // by Martin and Wilkinson, Handbook for Auto. Comp.,
- // Vol.ii-Linear Algebra, and the corresponding
- // Fortran subroutine in EISPACK.
-
- // Initialize
- int nn = m_eivec.cols();
- int n = nn-1;
- int low = 0;
- int high = nn-1;
- Scalar eps = ei_pow(Scalar(2),ei_is_same_type<Scalar,float>::ret ? Scalar(-23) : Scalar(-52));
- Scalar exshift = 0.0;
- Scalar p=0,q=0,r=0,s=0,z=0,t,w,x,y;
-
- // Store roots isolated by balanc and compute matrix norm
- // FIXME to be efficient the following would requires a triangular reduxion code
- // Scalar norm = matH.upper().cwise().abs().sum() + matH.corner(BottomLeft,n,n).diagonal().cwise().abs().sum();
- Scalar norm = 0.0;
- for (int j = 0; j < nn; ++j)
- {
- // FIXME what's the purpose of the following since the condition is always false
- if ((j < low) || (j > high))
- {
- m_eivalues.coeffRef(j) = Complex(matH.coeff(j,j), 0.0);
- }
- norm += matH.row(j).segment(std::max(j-1,0), nn-std::max(j-1,0)).cwise().abs().sum();
- }
-
- // Outer loop over eigenvalue index
- int iter = 0;
- while (n >= low)
- {
- // Look for single small sub-diagonal element
- int l = n;
- while (l > low)
- {
- s = ei_abs(matH.coeff(l-1,l-1)) + ei_abs(matH.coeff(l,l));
- if (s == 0.0)
- s = norm;
- if (ei_abs(matH.coeff(l,l-1)) < eps * s)
- break;
- l--;
- }
-
- // Check for convergence
- // One root found
- if (l == n)
- {
- matH.coeffRef(n,n) = matH.coeff(n,n) + exshift;
- m_eivalues.coeffRef(n) = Complex(matH.coeff(n,n), 0.0);
- n--;
- iter = 0;
- }
- else if (l == n-1) // Two roots found
- {
- w = matH.coeff(n,n-1) * matH.coeff(n-1,n);
- p = (matH.coeff(n-1,n-1) - matH.coeff(n,n)) * Scalar(0.5);
- q = p * p + w;
- z = ei_sqrt(ei_abs(q));
- matH.coeffRef(n,n) = matH.coeff(n,n) + exshift;
- matH.coeffRef(n-1,n-1) = matH.coeff(n-1,n-1) + exshift;
- x = matH.coeff(n,n);
-
- // Scalar pair
- if (q >= 0)
- {
- if (p >= 0)
- z = p + z;
- else
- z = p - z;
-
- m_eivalues.coeffRef(n-1) = Complex(x + z, 0.0);
- m_eivalues.coeffRef(n) = Complex(z!=0.0 ? x - w / z : m_eivalues.coeff(n-1).real(), 0.0);
-
- x = matH.coeff(n,n-1);
- s = ei_abs(x) + ei_abs(z);
- p = x / s;
- q = z / s;
- r = ei_sqrt(p * p+q * q);
- p = p / r;
- q = q / r;
-
- // Row modification
- for (int j = n-1; j < nn; ++j)
- {
- z = matH.coeff(n-1,j);
- matH.coeffRef(n-1,j) = q * z + p * matH.coeff(n,j);
- matH.coeffRef(n,j) = q * matH.coeff(n,j) - p * z;
- }
-
- // Column modification
- for (int i = 0; i <= n; ++i)
- {
- z = matH.coeff(i,n-1);
- matH.coeffRef(i,n-1) = q * z + p * matH.coeff(i,n);
- matH.coeffRef(i,n) = q * matH.coeff(i,n) - p * z;
- }
-
- // Accumulate transformations
- for (int i = low; i <= high; ++i)
- {
- z = m_eivec.coeff(i,n-1);
- m_eivec.coeffRef(i,n-1) = q * z + p * m_eivec.coeff(i,n);
- m_eivec.coeffRef(i,n) = q * m_eivec.coeff(i,n) - p * z;
- }
- }
- else // Complex pair
- {
- m_eivalues.coeffRef(n-1) = Complex(x + p, z);
- m_eivalues.coeffRef(n) = Complex(x + p, -z);
- }
- n = n - 2;
- iter = 0;
- }
- else // No convergence yet
- {
- // Form shift
- x = matH.coeff(n,n);
- y = 0.0;
- w = 0.0;
- if (l < n)
- {
- y = matH.coeff(n-1,n-1);
- w = matH.coeff(n,n-1) * matH.coeff(n-1,n);
- }
-
- // Wilkinson's original ad hoc shift
- if (iter == 10)
- {
- exshift += x;
- for (int i = low; i <= n; ++i)
- matH.coeffRef(i,i) -= x;
- s = ei_abs(matH.coeff(n,n-1)) + ei_abs(matH.coeff(n-1,n-2));
- x = y = Scalar(0.75) * s;
- w = Scalar(-0.4375) * s * s;
- }
-
- // MATLAB's new ad hoc shift
- if (iter == 30)
- {
- s = Scalar((y - x) / 2.0);
- s = s * s + w;
- if (s > 0)
- {
- s = ei_sqrt(s);
- if (y < x)
- s = -s;
- s = Scalar(x - w / ((y - x) / 2.0 + s));
- for (int i = low; i <= n; ++i)
- matH.coeffRef(i,i) -= s;
- exshift += s;
- x = y = w = Scalar(0.964);
- }
- }
-
- iter = iter + 1; // (Could check iteration count here.)
-
- // Look for two consecutive small sub-diagonal elements
- int m = n-2;
- while (m >= l)
- {
- z = matH.coeff(m,m);
- r = x - z;
- s = y - z;
- p = (r * s - w) / matH.coeff(m+1,m) + matH.coeff(m,m+1);
- q = matH.coeff(m+1,m+1) - z - r - s;
- r = matH.coeff(m+2,m+1);
- s = ei_abs(p) + ei_abs(q) + ei_abs(r);
- p = p / s;
- q = q / s;
- r = r / s;
- if (m == l) {
- break;
- }
- if (ei_abs(matH.coeff(m,m-1)) * (ei_abs(q) + ei_abs(r)) <
- eps * (ei_abs(p) * (ei_abs(matH.coeff(m-1,m-1)) + ei_abs(z) +
- ei_abs(matH.coeff(m+1,m+1)))))
- {
- break;
- }
- m--;
- }
-
- for (int i = m+2; i <= n; ++i)
- {
- matH.coeffRef(i,i-2) = 0.0;
- if (i > m+2)
- matH.coeffRef(i,i-3) = 0.0;
- }
-
- // Double QR step involving rows l:n and columns m:n
- for (int k = m; k <= n-1; ++k)
- {
- int notlast = (k != n-1);
- if (k != m) {
- p = matH.coeff(k,k-1);
- q = matH.coeff(k+1,k-1);
- r = notlast ? matH.coeff(k+2,k-1) : Scalar(0);
- x = ei_abs(p) + ei_abs(q) + ei_abs(r);
- if (x != 0.0)
- {
- p = p / x;
- q = q / x;
- r = r / x;
- }
- }
-
- if (x == 0.0)
- break;
-
- s = ei_sqrt(p * p + q * q + r * r);
-
- if (p < 0)
- s = -s;
-
- if (s != 0)
- {
- if (k != m)
- matH.coeffRef(k,k-1) = -s * x;
- else if (l != m)
- matH.coeffRef(k,k-1) = -matH.coeff(k,k-1);
-
- p = p + s;
- x = p / s;
- y = q / s;
- z = r / s;
- q = q / p;
- r = r / p;
-
- // Row modification
- for (int j = k; j < nn; ++j)
- {
- p = matH.coeff(k,j) + q * matH.coeff(k+1,j);
- if (notlast)
- {
- p = p + r * matH.coeff(k+2,j);
- matH.coeffRef(k+2,j) = matH.coeff(k+2,j) - p * z;
- }
- matH.coeffRef(k,j) = matH.coeff(k,j) - p * x;
- matH.coeffRef(k+1,j) = matH.coeff(k+1,j) - p * y;
- }
-
- // Column modification
- for (int i = 0; i <= std::min(n,k+3); ++i)
- {
- p = x * matH.coeff(i,k) + y * matH.coeff(i,k+1);
- if (notlast)
- {
- p = p + z * matH.coeff(i,k+2);
- matH.coeffRef(i,k+2) = matH.coeff(i,k+2) - p * r;
- }
- matH.coeffRef(i,k) = matH.coeff(i,k) - p;
- matH.coeffRef(i,k+1) = matH.coeff(i,k+1) - p * q;
- }
-
- // Accumulate transformations
- for (int i = low; i <= high; ++i)
- {
- p = x * m_eivec.coeff(i,k) + y * m_eivec.coeff(i,k+1);
- if (notlast)
- {
- p = p + z * m_eivec.coeff(i,k+2);
- m_eivec.coeffRef(i,k+2) = m_eivec.coeff(i,k+2) - p * r;
- }
- m_eivec.coeffRef(i,k) = m_eivec.coeff(i,k) - p;
- m_eivec.coeffRef(i,k+1) = m_eivec.coeff(i,k+1) - p * q;
- }
- } // (s != 0)
- } // k loop
- } // check convergence
- } // while (n >= low)
-
- // Backsubstitute to find vectors of upper triangular form
- if (norm == 0.0)
- {
- return;
- }
-
- for (n = nn-1; n >= 0; n--)
- {
- p = m_eivalues.coeff(n).real();
- q = m_eivalues.coeff(n).imag();
-
- // Scalar vector
- if (q == 0)
- {
- int l = n;
- matH.coeffRef(n,n) = 1.0;
- for (int i = n-1; i >= 0; i--)
- {
- w = matH.coeff(i,i) - p;
- r = (matH.row(i).segment(l,n-l+1) * matH.col(n).segment(l, n-l+1))(0,0);
-
- if (m_eivalues.coeff(i).imag() < 0.0)
- {
- z = w;
- s = r;
- }
- else
- {
- l = i;
- if (m_eivalues.coeff(i).imag() == 0.0)
- {
- if (w != 0.0)
- matH.coeffRef(i,n) = -r / w;
- else
- matH.coeffRef(i,n) = -r / (eps * norm);
- }
- else // Solve real equations
- {
- x = matH.coeff(i,i+1);
- y = matH.coeff(i+1,i);
- q = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
- t = (x * s - z * r) / q;
- matH.coeffRef(i,n) = t;
- if (ei_abs(x) > ei_abs(z))
- matH.coeffRef(i+1,n) = (-r - w * t) / x;
- else
- matH.coeffRef(i+1,n) = (-s - y * t) / z;
- }
-
- // Overflow control
- t = ei_abs(matH.coeff(i,n));
- if ((eps * t) * t > 1)
- matH.col(n).end(nn-i) /= t;
- }
- }
- }
- else if (q < 0) // Complex vector
- {
- std::complex<Scalar> cc;
- int l = n-1;
-
- // Last vector component imaginary so matrix is triangular
- if (ei_abs(matH.coeff(n,n-1)) > ei_abs(matH.coeff(n-1,n)))
- {
- matH.coeffRef(n-1,n-1) = q / matH.coeff(n,n-1);
- matH.coeffRef(n-1,n) = -(matH.coeff(n,n) - p) / matH.coeff(n,n-1);
- }
- else
- {
- cc = cdiv<Scalar>(0.0,-matH.coeff(n-1,n),matH.coeff(n-1,n-1)-p,q);
- matH.coeffRef(n-1,n-1) = ei_real(cc);
- matH.coeffRef(n-1,n) = ei_imag(cc);
- }
- matH.coeffRef(n,n-1) = 0.0;
- matH.coeffRef(n,n) = 1.0;
- for (int i = n-2; i >= 0; i--)
- {
- Scalar ra,sa,vr,vi;
- ra = (matH.block(i,l, 1, n-l+1) * matH.block(l,n-1, n-l+1, 1)).lazy()(0,0);
- sa = (matH.block(i,l, 1, n-l+1) * matH.block(l,n, n-l+1, 1)).lazy()(0,0);
- w = matH.coeff(i,i) - p;
-
- if (m_eivalues.coeff(i).imag() < 0.0)
- {
- z = w;
- r = ra;
- s = sa;
- }
- else
- {
- l = i;
- if (m_eivalues.coeff(i).imag() == 0)
- {
- cc = cdiv(-ra,-sa,w,q);
- matH.coeffRef(i,n-1) = ei_real(cc);
- matH.coeffRef(i,n) = ei_imag(cc);
- }
- else
- {
- // Solve complex equations
- x = matH.coeff(i,i+1);
- y = matH.coeff(i+1,i);
- vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
- vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
- if ((vr == 0.0) && (vi == 0.0))
- vr = eps * norm * (ei_abs(w) + ei_abs(q) + ei_abs(x) + ei_abs(y) + ei_abs(z));
-
- cc= cdiv(x*r-z*ra+q*sa,x*s-z*sa-q*ra,vr,vi);
- matH.coeffRef(i,n-1) = ei_real(cc);
- matH.coeffRef(i,n) = ei_imag(cc);
- if (ei_abs(x) > (ei_abs(z) + ei_abs(q)))
- {
- matH.coeffRef(i+1,n-1) = (-ra - w * matH.coeff(i,n-1) + q * matH.coeff(i,n)) / x;
- matH.coeffRef(i+1,n) = (-sa - w * matH.coeff(i,n) - q * matH.coeff(i,n-1)) / x;
- }
- else
- {
- cc = cdiv(-r-y*matH.coeff(i,n-1),-s-y*matH.coeff(i,n),z,q);
- matH.coeffRef(i+1,n-1) = ei_real(cc);
- matH.coeffRef(i+1,n) = ei_imag(cc);
- }
- }
-
- // Overflow control
- t = std::max(ei_abs(matH.coeff(i,n-1)),ei_abs(matH.coeff(i,n)));
- if ((eps * t) * t > 1)
- matH.block(i, n-1, nn-i, 2) /= t;
-
- }
- }
- }
- }
-
- // Vectors of isolated roots
- for (int i = 0; i < nn; ++i)
- {
- // FIXME again what's the purpose of this test ?
- // in this algo low==0 and high==nn-1 !!
- if (i < low || i > high)
- {
- m_eivec.row(i).end(nn-i) = matH.row(i).end(nn-i);
- }
- }
-
- // Back transformation to get eigenvectors of original matrix
- int bRows = high-low+1;
- for (int j = nn-1; j >= low; j--)
- {
- int bSize = std::min(j,high)-low+1;
- m_eivec.col(j).segment(low, bRows) = (m_eivec.block(low, low, bRows, bSize) * matH.col(j).segment(low, bSize));
- }
-}
-
-#endif // EIGEN_EIGENSOLVER_H
diff --git a/extern/Eigen2/Eigen/src/QR/HessenbergDecomposition.h b/extern/Eigen2/Eigen/src/QR/HessenbergDecomposition.h
deleted file mode 100644
index 6d0ff794ec2..00000000000
--- a/extern/Eigen2/Eigen/src/QR/HessenbergDecomposition.h
+++ /dev/null
@@ -1,250 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_HESSENBERGDECOMPOSITION_H
-#define EIGEN_HESSENBERGDECOMPOSITION_H
-
-/** \ingroup QR_Module
- * \nonstableyet
- *
- * \class HessenbergDecomposition
- *
- * \brief Reduces a squared matrix to an Hessemberg form
- *
- * \param MatrixType the type of the matrix of which we are computing the Hessenberg decomposition
- *
- * This class performs an Hessenberg decomposition of a matrix \f$ A \f$ such that:
- * \f$ A = Q H Q^* \f$ where \f$ Q \f$ is unitary and \f$ H \f$ a Hessenberg matrix.
- *
- * \sa class Tridiagonalization, class Qr
- */
-template<typename _MatrixType> class HessenbergDecomposition
-{
- public:
-
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<Scalar>::Real RealScalar;
-
- enum {
- Size = MatrixType::RowsAtCompileTime,
- SizeMinusOne = MatrixType::RowsAtCompileTime==Dynamic
- ? Dynamic
- : MatrixType::RowsAtCompileTime-1
- };
-
- typedef Matrix<Scalar, SizeMinusOne, 1> CoeffVectorType;
- typedef Matrix<RealScalar, Size, 1> DiagonalType;
- typedef Matrix<RealScalar, SizeMinusOne, 1> SubDiagonalType;
-
- typedef typename NestByValue<DiagonalCoeffs<MatrixType> >::RealReturnType DiagonalReturnType;
-
- typedef typename NestByValue<DiagonalCoeffs<
- NestByValue<Block<MatrixType,SizeMinusOne,SizeMinusOne> > > >::RealReturnType SubDiagonalReturnType;
-
- /** This constructor initializes a HessenbergDecomposition object for
- * further use with HessenbergDecomposition::compute()
- */
- HessenbergDecomposition(int size = Size==Dynamic ? 2 : Size)
- : m_matrix(size,size), m_hCoeffs(size-1)
- {}
-
- HessenbergDecomposition(const MatrixType& matrix)
- : m_matrix(matrix),
- m_hCoeffs(matrix.cols()-1)
- {
- _compute(m_matrix, m_hCoeffs);
- }
-
- /** Computes or re-compute the Hessenberg decomposition for the matrix \a matrix.
- *
- * This method allows to re-use the allocated data.
- */
- void compute(const MatrixType& matrix)
- {
- m_matrix = matrix;
- m_hCoeffs.resize(matrix.rows()-1,1);
- _compute(m_matrix, m_hCoeffs);
- }
-
- /** \returns the householder coefficients allowing to
- * reconstruct the matrix Q from the packed data.
- *
- * \sa packedMatrix()
- */
- CoeffVectorType householderCoefficients(void) const { return m_hCoeffs; }
-
- /** \returns the internal result of the decomposition.
- *
- * The returned matrix contains the following information:
- * - the upper part and lower sub-diagonal represent the Hessenberg matrix H
- * - the rest of the lower part contains the Householder vectors that, combined with
- * Householder coefficients returned by householderCoefficients(),
- * allows to reconstruct the matrix Q as follow:
- * Q = H_{N-1} ... H_1 H_0
- * where the matrices H are the Householder transformation:
- * H_i = (I - h_i * v_i * v_i')
- * where h_i == householderCoefficients()[i] and v_i is a Householder vector:
- * v_i = [ 0, ..., 0, 1, M(i+2,i), ..., M(N-1,i) ]
- *
- * See LAPACK for further details on this packed storage.
- */
- const MatrixType& packedMatrix(void) const { return m_matrix; }
-
- MatrixType matrixQ(void) const;
- MatrixType matrixH(void) const;
-
- private:
-
- static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs);
-
- protected:
- MatrixType m_matrix;
- CoeffVectorType m_hCoeffs;
-};
-
-#ifndef EIGEN_HIDE_HEAVY_CODE
-
-/** \internal
- * Performs a tridiagonal decomposition of \a matA in place.
- *
- * \param matA the input selfadjoint matrix
- * \param hCoeffs returned Householder coefficients
- *
- * The result is written in the lower triangular part of \a matA.
- *
- * Implemented from Golub's "Matrix Computations", algorithm 8.3.1.
- *
- * \sa packedMatrix()
- */
-template<typename MatrixType>
-void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs)
-{
- assert(matA.rows()==matA.cols());
- int n = matA.rows();
- for (int i = 0; i<n-2; ++i)
- {
- // let's consider the vector v = i-th column starting at position i+1
-
- // start of the householder transformation
- // squared norm of the vector v skipping the first element
- RealScalar v1norm2 = matA.col(i).end(n-(i+2)).squaredNorm();
-
- if (ei_isMuchSmallerThan(v1norm2,static_cast<Scalar>(1)))
- {
- hCoeffs.coeffRef(i) = 0.;
- }
- else
- {
- Scalar v0 = matA.col(i).coeff(i+1);
- RealScalar beta = ei_sqrt(ei_abs2(v0)+v1norm2);
- if (ei_real(v0)>=0.)
- beta = -beta;
- matA.col(i).end(n-(i+2)) *= (Scalar(1)/(v0-beta));
- matA.col(i).coeffRef(i+1) = beta;
- Scalar h = (beta - v0) / beta;
- // end of the householder transformation
-
- // Apply similarity transformation to remaining columns,
- // i.e., A = H' A H where H = I - h v v' and v = matA.col(i).end(n-i-1)
- matA.col(i).coeffRef(i+1) = 1;
-
- // first let's do A = H A
- matA.corner(BottomRight,n-i-1,n-i-1) -= ((ei_conj(h) * matA.col(i).end(n-i-1)) *
- (matA.col(i).end(n-i-1).adjoint() * matA.corner(BottomRight,n-i-1,n-i-1))).lazy();
-
- // now let's do A = A H
- matA.corner(BottomRight,n,n-i-1) -= ((matA.corner(BottomRight,n,n-i-1) * matA.col(i).end(n-i-1))
- * (h * matA.col(i).end(n-i-1).adjoint())).lazy();
-
- matA.col(i).coeffRef(i+1) = beta;
- hCoeffs.coeffRef(i) = h;
- }
- }
- if (NumTraits<Scalar>::IsComplex)
- {
- // Householder transformation on the remaining single scalar
- int i = n-2;
- Scalar v0 = matA.coeff(i+1,i);
-
- RealScalar beta = ei_sqrt(ei_abs2(v0));
- if (ei_real(v0)>=0.)
- beta = -beta;
- Scalar h = (beta - v0) / beta;
- hCoeffs.coeffRef(i) = h;
-
- // A = H* A
- matA.corner(BottomRight,n-i-1,n-i) -= ei_conj(h) * matA.corner(BottomRight,n-i-1,n-i);
-
- // A = A H
- matA.col(n-1) -= h * matA.col(n-1);
- }
- else
- {
- hCoeffs.coeffRef(n-2) = 0;
- }
-}
-
-/** reconstructs and returns the matrix Q */
-template<typename MatrixType>
-typename HessenbergDecomposition<MatrixType>::MatrixType
-HessenbergDecomposition<MatrixType>::matrixQ(void) const
-{
- int n = m_matrix.rows();
- MatrixType matQ = MatrixType::Identity(n,n);
- for (int i = n-2; i>=0; i--)
- {
- Scalar tmp = m_matrix.coeff(i+1,i);
- m_matrix.const_cast_derived().coeffRef(i+1,i) = 1;
-
- matQ.corner(BottomRight,n-i-1,n-i-1) -=
- ((m_hCoeffs.coeff(i) * m_matrix.col(i).end(n-i-1)) *
- (m_matrix.col(i).end(n-i-1).adjoint() * matQ.corner(BottomRight,n-i-1,n-i-1)).lazy()).lazy();
-
- m_matrix.const_cast_derived().coeffRef(i+1,i) = tmp;
- }
- return matQ;
-}
-
-#endif // EIGEN_HIDE_HEAVY_CODE
-
-/** constructs and returns the matrix H.
- * Note that the matrix H is equivalent to the upper part of the packed matrix
- * (including the lower sub-diagonal). Therefore, it might be often sufficient
- * to directly use the packed matrix instead of creating a new one.
- */
-template<typename MatrixType>
-typename HessenbergDecomposition<MatrixType>::MatrixType
-HessenbergDecomposition<MatrixType>::matrixH(void) const
-{
- // FIXME should this function (and other similar) rather take a matrix as argument
- // and fill it (to avoid temporaries)
- int n = m_matrix.rows();
- MatrixType matH = m_matrix;
- if (n>2)
- matH.corner(BottomLeft,n-2, n-2).template part<LowerTriangular>().setZero();
- return matH;
-}
-
-#endif // EIGEN_HESSENBERGDECOMPOSITION_H
diff --git a/extern/Eigen2/Eigen/src/QR/QR.h b/extern/Eigen2/Eigen/src/QR/QR.h
deleted file mode 100644
index 90751dd428d..00000000000
--- a/extern/Eigen2/Eigen/src/QR/QR.h
+++ /dev/null
@@ -1,334 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_QR_H
-#define EIGEN_QR_H
-
-/** \ingroup QR_Module
- * \nonstableyet
- *
- * \class QR
- *
- * \brief QR decomposition of a matrix
- *
- * \param MatrixType the type of the matrix of which we are computing the QR decomposition
- *
- * This class performs a QR decomposition using Householder transformations. The result is
- * stored in a compact way compatible with LAPACK.
- *
- * \sa MatrixBase::qr()
- */
-template<typename MatrixType> class QR
-{
- public:
-
- typedef typename MatrixType::Scalar Scalar;
- typedef typename MatrixType::RealScalar RealScalar;
- typedef Block<MatrixType, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> MatrixRBlockType;
- typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> MatrixTypeR;
- typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> VectorType;
-
- /**
- * \brief Default Constructor.
- *
- * The default constructor is useful in cases in which the user intends to
- * perform decompositions via QR::compute(const MatrixType&).
- */
- QR() : m_qr(), m_hCoeffs(), m_isInitialized(false) {}
-
- QR(const MatrixType& matrix)
- : m_qr(matrix.rows(), matrix.cols()),
- m_hCoeffs(matrix.cols()),
- m_isInitialized(false)
- {
- compute(matrix);
- }
-
- /** \deprecated use isInjective()
- * \returns whether or not the matrix is of full rank
- *
- * \note Since the rank is computed only once, i.e. the first time it is needed, this
- * method almost does not perform any further computation.
- */
- EIGEN_DEPRECATED bool isFullRank() const
- {
- ei_assert(m_isInitialized && "QR is not initialized.");
- return rank() == m_qr.cols();
- }
-
- /** \returns the rank of the matrix of which *this is the QR decomposition.
- *
- * \note Since the rank is computed only once, i.e. the first time it is needed, this
- * method almost does not perform any further computation.
- */
- int rank() const;
-
- /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition.
- *
- * \note Since the rank is computed only once, i.e. the first time it is needed, this
- * method almost does not perform any further computation.
- */
- inline int dimensionOfKernel() const
- {
- ei_assert(m_isInitialized && "QR is not initialized.");
- return m_qr.cols() - rank();
- }
-
- /** \returns true if the matrix of which *this is the QR decomposition represents an injective
- * linear map, i.e. has trivial kernel; false otherwise.
- *
- * \note Since the rank is computed only once, i.e. the first time it is needed, this
- * method almost does not perform any further computation.
- */
- inline bool isInjective() const
- {
- ei_assert(m_isInitialized && "QR is not initialized.");
- return rank() == m_qr.cols();
- }
-
- /** \returns true if the matrix of which *this is the QR decomposition represents a surjective
- * linear map; false otherwise.
- *
- * \note Since the rank is computed only once, i.e. the first time it is needed, this
- * method almost does not perform any further computation.
- */
- inline bool isSurjective() const
- {
- ei_assert(m_isInitialized && "QR is not initialized.");
- return rank() == m_qr.rows();
- }
-
- /** \returns true if the matrix of which *this is the QR decomposition is invertible.
- *
- * \note Since the rank is computed only once, i.e. the first time it is needed, this
- * method almost does not perform any further computation.
- */
- inline bool isInvertible() const
- {
- ei_assert(m_isInitialized && "QR is not initialized.");
- return isInjective() && isSurjective();
- }
-
- /** \returns a read-only expression of the matrix R of the actual the QR decomposition */
- const Part<NestByValue<MatrixRBlockType>, UpperTriangular>
- matrixR(void) const
- {
- ei_assert(m_isInitialized && "QR is not initialized.");
- int cols = m_qr.cols();
- return MatrixRBlockType(m_qr, 0, 0, cols, cols).nestByValue().template part<UpperTriangular>();
- }
-
- /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
- * *this is the QR decomposition, if any exists.
- *
- * \param b the right-hand-side of the equation to solve.
- *
- * \param result a pointer to the vector/matrix in which to store the solution, if any exists.
- * Resized if necessary, so that result->rows()==A.cols() and result->cols()==b.cols().
- * If no solution exists, *result is left with undefined coefficients.
- *
- * \returns true if any solution exists, false if no solution exists.
- *
- * \note If there exist more than one solution, this method will arbitrarily choose one.
- * If you need a complete analysis of the space of solutions, take the one solution obtained
- * by this method and add to it elements of the kernel, as determined by kernel().
- *
- * \note The case where b is a matrix is not yet implemented. Also, this
- * code is space inefficient.
- *
- * Example: \include QR_solve.cpp
- * Output: \verbinclude QR_solve.out
- *
- * \sa MatrixBase::solveTriangular(), kernel(), computeKernel(), inverse(), computeInverse()
- */
- template<typename OtherDerived, typename ResultType>
- bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const;
-
- MatrixType matrixQ(void) const;
-
- void compute(const MatrixType& matrix);
-
- protected:
- MatrixType m_qr;
- VectorType m_hCoeffs;
- mutable int m_rank;
- mutable bool m_rankIsUptodate;
- bool m_isInitialized;
-};
-
-/** \returns the rank of the matrix of which *this is the QR decomposition. */
-template<typename MatrixType>
-int QR<MatrixType>::rank() const
-{
- ei_assert(m_isInitialized && "QR is not initialized.");
- if (!m_rankIsUptodate)
- {
- RealScalar maxCoeff = m_qr.diagonal().cwise().abs().maxCoeff();
- int n = m_qr.cols();
- m_rank = 0;
- while(m_rank<n && !ei_isMuchSmallerThan(m_qr.diagonal().coeff(m_rank), maxCoeff))
- ++m_rank;
- m_rankIsUptodate = true;
- }
- return m_rank;
-}
-
-#ifndef EIGEN_HIDE_HEAVY_CODE
-
-template<typename MatrixType>
-void QR<MatrixType>::compute(const MatrixType& matrix)
-{
- m_rankIsUptodate = false;
- m_qr = matrix;
- m_hCoeffs.resize(matrix.cols());
-
- int rows = matrix.rows();
- int cols = matrix.cols();
- RealScalar eps2 = precision<RealScalar>()*precision<RealScalar>();
-
- for (int k = 0; k < cols; ++k)
- {
- int remainingSize = rows-k;
-
- RealScalar beta;
- Scalar v0 = m_qr.col(k).coeff(k);
-
- if (remainingSize==1)
- {
- if (NumTraits<Scalar>::IsComplex)
- {
- // Householder transformation on the remaining single scalar
- beta = ei_abs(v0);
- if (ei_real(v0)>0)
- beta = -beta;
- m_qr.coeffRef(k,k) = beta;
- m_hCoeffs.coeffRef(k) = (beta - v0) / beta;
- }
- else
- {
- m_hCoeffs.coeffRef(k) = 0;
- }
- }
- else if ((beta=m_qr.col(k).end(remainingSize-1).squaredNorm())>eps2)
- // FIXME what about ei_imag(v0) ??
- {
- // form k-th Householder vector
- beta = ei_sqrt(ei_abs2(v0)+beta);
- if (ei_real(v0)>=0.)
- beta = -beta;
- m_qr.col(k).end(remainingSize-1) /= v0-beta;
- m_qr.coeffRef(k,k) = beta;
- Scalar h = m_hCoeffs.coeffRef(k) = (beta - v0) / beta;
-
- // apply the Householder transformation (I - h v v') to remaining columns, i.e.,
- // R <- (I - h v v') * R where v = [1,m_qr(k+1,k), m_qr(k+2,k), ...]
- int remainingCols = cols - k -1;
- if (remainingCols>0)
- {
- m_qr.coeffRef(k,k) = Scalar(1);
- m_qr.corner(BottomRight, remainingSize, remainingCols) -= ei_conj(h) * m_qr.col(k).end(remainingSize)
- * (m_qr.col(k).end(remainingSize).adjoint() * m_qr.corner(BottomRight, remainingSize, remainingCols));
- m_qr.coeffRef(k,k) = beta;
- }
- }
- else
- {
- m_hCoeffs.coeffRef(k) = 0;
- }
- }
- m_isInitialized = true;
-}
-
-template<typename MatrixType>
-template<typename OtherDerived, typename ResultType>
-bool QR<MatrixType>::solve(
- const MatrixBase<OtherDerived>& b,
- ResultType *result
-) const
-{
- ei_assert(m_isInitialized && "QR is not initialized.");
- const int rows = m_qr.rows();
- ei_assert(b.rows() == rows);
- result->resize(rows, b.cols());
-
- // TODO(keir): There is almost certainly a faster way to multiply by
- // Q^T without explicitly forming matrixQ(). Investigate.
- *result = matrixQ().transpose()*b;
-
- if(!isSurjective())
- {
- // is result is in the image of R ?
- RealScalar biggest_in_res = result->corner(TopLeft, m_rank, result->cols()).cwise().abs().maxCoeff();
- for(int col = 0; col < result->cols(); ++col)
- for(int row = m_rank; row < result->rows(); ++row)
- if(!ei_isMuchSmallerThan(result->coeff(row,col), biggest_in_res))
- return false;
- }
- m_qr.corner(TopLeft, m_rank, m_rank)
- .template marked<UpperTriangular>()
- .solveTriangularInPlace(result->corner(TopLeft, m_rank, result->cols()));
-
- return true;
-}
-
-/** \returns the matrix Q */
-template<typename MatrixType>
-MatrixType QR<MatrixType>::matrixQ() const
-{
- ei_assert(m_isInitialized && "QR is not initialized.");
- // compute the product Q_0 Q_1 ... Q_n-1,
- // where Q_k is the k-th Householder transformation I - h_k v_k v_k'
- // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
- int rows = m_qr.rows();
- int cols = m_qr.cols();
- MatrixType res = MatrixType::Identity(rows, cols);
- for (int k = cols-1; k >= 0; k--)
- {
- // to make easier the computation of the transformation, let's temporarily
- // overwrite m_qr(k,k) such that the end of m_qr.col(k) is exactly our Householder vector.
- Scalar beta = m_qr.coeff(k,k);
- m_qr.const_cast_derived().coeffRef(k,k) = 1;
- int endLength = rows-k;
- res.corner(BottomRight,endLength, cols-k) -= ((m_hCoeffs.coeff(k) * m_qr.col(k).end(endLength))
- * (m_qr.col(k).end(endLength).adjoint() * res.corner(BottomRight,endLength, cols-k)).lazy()).lazy();
- m_qr.const_cast_derived().coeffRef(k,k) = beta;
- }
- return res;
-}
-
-#endif // EIGEN_HIDE_HEAVY_CODE
-
-/** \return the QR decomposition of \c *this.
- *
- * \sa class QR
- */
-template<typename Derived>
-const QR<typename MatrixBase<Derived>::PlainMatrixType>
-MatrixBase<Derived>::qr() const
-{
- return QR<PlainMatrixType>(eval());
-}
-
-
-#endif // EIGEN_QR_H
diff --git a/extern/Eigen2/Eigen/src/QR/SelfAdjointEigenSolver.h b/extern/Eigen2/Eigen/src/QR/SelfAdjointEigenSolver.h
deleted file mode 100644
index 70984efab9d..00000000000
--- a/extern/Eigen2/Eigen/src/QR/SelfAdjointEigenSolver.h
+++ /dev/null
@@ -1,402 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
-#define EIGEN_SELFADJOINTEIGENSOLVER_H
-
-/** \qr_module \ingroup QR_Module
- * \nonstableyet
- *
- * \class SelfAdjointEigenSolver
- *
- * \brief Eigen values/vectors solver for selfadjoint matrix
- *
- * \param MatrixType the type of the matrix of which we are computing the eigen decomposition
- *
- * \note MatrixType must be an actual Matrix type, it can't be an expression type.
- *
- * \sa MatrixBase::eigenvalues(), class EigenSolver
- */
-template<typename _MatrixType> class SelfAdjointEigenSolver
-{
- public:
-
- enum {Size = _MatrixType::RowsAtCompileTime };
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef std::complex<RealScalar> Complex;
- typedef Matrix<RealScalar, MatrixType::ColsAtCompileTime, 1> RealVectorType;
- typedef Matrix<RealScalar, Dynamic, 1> RealVectorTypeX;
- typedef Tridiagonalization<MatrixType> TridiagonalizationType;
-
- SelfAdjointEigenSolver()
- : m_eivec(int(Size), int(Size)),
- m_eivalues(int(Size))
- {
- ei_assert(Size!=Dynamic);
- }
-
- SelfAdjointEigenSolver(int size)
- : m_eivec(size, size),
- m_eivalues(size)
- {}
-
- /** Constructors computing the eigenvalues of the selfadjoint matrix \a matrix,
- * as well as the eigenvectors if \a computeEigenvectors is true.
- *
- * \sa compute(MatrixType,bool), SelfAdjointEigenSolver(MatrixType,MatrixType,bool)
- */
- SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
- : m_eivec(matrix.rows(), matrix.cols()),
- m_eivalues(matrix.cols())
- {
- compute(matrix, computeEigenvectors);
- }
-
- /** Constructors computing the eigenvalues of the generalized eigen problem
- * \f$ Ax = lambda B x \f$ with \a matA the selfadjoint matrix \f$ A \f$
- * and \a matB the positive definite matrix \f$ B \f$ . The eigenvectors
- * are computed if \a computeEigenvectors is true.
- *
- * \sa compute(MatrixType,MatrixType,bool), SelfAdjointEigenSolver(MatrixType,bool)
- */
- SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
- : m_eivec(matA.rows(), matA.cols()),
- m_eivalues(matA.cols())
- {
- compute(matA, matB, computeEigenvectors);
- }
-
- void compute(const MatrixType& matrix, bool computeEigenvectors = true);
-
- void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true);
-
- /** \returns the computed eigen vectors as a matrix of column vectors */
- MatrixType eigenvectors(void) const
- {
- #ifndef NDEBUG
- ei_assert(m_eigenvectorsOk);
- #endif
- return m_eivec;
- }
-
- /** \returns the computed eigen values */
- RealVectorType eigenvalues(void) const { return m_eivalues; }
-
- /** \returns the positive square root of the matrix
- *
- * \note the matrix itself must be positive in order for this to make sense.
- */
- MatrixType operatorSqrt() const
- {
- return m_eivec * m_eivalues.cwise().sqrt().asDiagonal() * m_eivec.adjoint();
- }
-
- /** \returns the positive inverse square root of the matrix
- *
- * \note the matrix itself must be positive definite in order for this to make sense.
- */
- MatrixType operatorInverseSqrt() const
- {
- return m_eivec * m_eivalues.cwise().inverse().cwise().sqrt().asDiagonal() * m_eivec.adjoint();
- }
-
-
- protected:
- MatrixType m_eivec;
- RealVectorType m_eivalues;
- #ifndef NDEBUG
- bool m_eigenvectorsOk;
- #endif
-};
-
-#ifndef EIGEN_HIDE_HEAVY_CODE
-
-// from Golub's "Matrix Computations", algorithm 5.1.3
-template<typename Scalar>
-static void ei_givens_rotation(Scalar a, Scalar b, Scalar& c, Scalar& s)
-{
- if (b==0)
- {
- c = 1; s = 0;
- }
- else if (ei_abs(b)>ei_abs(a))
- {
- Scalar t = -a/b;
- s = Scalar(1)/ei_sqrt(1+t*t);
- c = s * t;
- }
- else
- {
- Scalar t = -b/a;
- c = Scalar(1)/ei_sqrt(1+t*t);
- s = c * t;
- }
-}
-
-/** \internal
- *
- * \qr_module
- *
- * Performs a QR step on a tridiagonal symmetric matrix represented as a
- * pair of two vectors \a diag and \a subdiag.
- *
- * \param matA the input selfadjoint matrix
- * \param hCoeffs returned Householder coefficients
- *
- * For compilation efficiency reasons, this procedure does not use eigen expression
- * for its arguments.
- *
- * Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
- * "implicit symmetric QR step with Wilkinson shift"
- */
-template<typename RealScalar, typename Scalar>
-static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n);
-
-/** Computes the eigenvalues of the selfadjoint matrix \a matrix,
- * as well as the eigenvectors if \a computeEigenvectors is true.
- *
- * \sa SelfAdjointEigenSolver(MatrixType,bool), compute(MatrixType,MatrixType,bool)
- */
-template<typename MatrixType>
-void SelfAdjointEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
-{
- #ifndef NDEBUG
- m_eigenvectorsOk = computeEigenvectors;
- #endif
- assert(matrix.cols() == matrix.rows());
- int n = matrix.cols();
- m_eivalues.resize(n,1);
-
- if(n==1)
- {
- m_eivalues.coeffRef(0,0) = ei_real(matrix.coeff(0,0));
- m_eivec.setOnes();
- return;
- }
-
- m_eivec = matrix;
-
- // FIXME, should tridiag be a local variable of this function or an attribute of SelfAdjointEigenSolver ?
- // the latter avoids multiple memory allocation when the same SelfAdjointEigenSolver is used multiple times...
- // (same for diag and subdiag)
- RealVectorType& diag = m_eivalues;
- typename TridiagonalizationType::SubDiagonalType subdiag(n-1);
- TridiagonalizationType::decomposeInPlace(m_eivec, diag, subdiag, computeEigenvectors);
-
- int end = n-1;
- int start = 0;
- while (end>0)
- {
- for (int i = start; i<end; ++i)
- if (ei_isMuchSmallerThan(ei_abs(subdiag[i]),(ei_abs(diag[i])+ei_abs(diag[i+1]))))
- subdiag[i] = 0;
-
- // find the largest unreduced block
- while (end>0 && subdiag[end-1]==0)
- end--;
- if (end<=0)
- break;
- start = end - 1;
- while (start>0 && subdiag[start-1]!=0)
- start--;
-
- ei_tridiagonal_qr_step(diag.data(), subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
- }
-
- // Sort eigenvalues and corresponding vectors.
- // TODO make the sort optional ?
- // TODO use a better sort algorithm !!
- for (int i = 0; i < n-1; ++i)
- {
- int k;
- m_eivalues.segment(i,n-i).minCoeff(&k);
- if (k > 0)
- {
- std::swap(m_eivalues[i], m_eivalues[k+i]);
- m_eivec.col(i).swap(m_eivec.col(k+i));
- }
- }
-}
-
-/** Computes the eigenvalues of the generalized eigen problem
- * \f$ Ax = lambda B x \f$ with \a matA the selfadjoint matrix \f$ A \f$
- * and \a matB the positive definite matrix \f$ B \f$ . The eigenvectors
- * are computed if \a computeEigenvectors is true.
- *
- * \sa SelfAdjointEigenSolver(MatrixType,MatrixType,bool), compute(MatrixType,bool)
- */
-template<typename MatrixType>
-void SelfAdjointEigenSolver<MatrixType>::
-compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors)
-{
- ei_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows());
-
- // Compute the cholesky decomposition of matB = L L'
- LLT<MatrixType> cholB(matB);
-
- // compute C = inv(L) A inv(L')
- MatrixType matC = matA;
- cholB.matrixL().solveTriangularInPlace(matC);
- // FIXME since we currently do not support A * inv(L'), let's do (inv(L) A')' :
- matC = matC.adjoint().eval();
- cholB.matrixL().template marked<LowerTriangular>().solveTriangularInPlace(matC);
- matC = matC.adjoint().eval();
- // this version works too:
-// matC = matC.transpose();
-// cholB.matrixL().conjugate().template marked<LowerTriangular>().solveTriangularInPlace(matC);
-// matC = matC.transpose();
- // FIXME: this should work: (currently it only does for small matrices)
-// Transpose<MatrixType> trMatC(matC);
-// cholB.matrixL().conjugate().eval().template marked<LowerTriangular>().solveTriangularInPlace(trMatC);
-
- compute(matC, computeEigenvectors);
-
- if (computeEigenvectors)
- {
- // transform back the eigen vectors: evecs = inv(U) * evecs
- cholB.matrixL().adjoint().template marked<UpperTriangular>().solveTriangularInPlace(m_eivec);
- for (int i=0; i<m_eivec.cols(); ++i)
- m_eivec.col(i) = m_eivec.col(i).normalized();
- }
-}
-
-#endif // EIGEN_HIDE_HEAVY_CODE
-
-/** \qr_module
- *
- * \returns a vector listing the eigenvalues of this matrix.
- */
-template<typename Derived>
-inline Matrix<typename NumTraits<typename ei_traits<Derived>::Scalar>::Real, ei_traits<Derived>::ColsAtCompileTime, 1>
-MatrixBase<Derived>::eigenvalues() const
-{
- ei_assert(Flags&SelfAdjointBit);
- return SelfAdjointEigenSolver<typename Derived::PlainMatrixType>(eval(),false).eigenvalues();
-}
-
-template<typename Derived, bool IsSelfAdjoint>
-struct ei_operatorNorm_selector
-{
- static inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
- operatorNorm(const MatrixBase<Derived>& m)
- {
- // FIXME if it is really guaranteed that the eigenvalues are already sorted,
- // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
- return m.eigenvalues().cwise().abs().maxCoeff();
- }
-};
-
-template<typename Derived> struct ei_operatorNorm_selector<Derived, false>
-{
- static inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
- operatorNorm(const MatrixBase<Derived>& m)
- {
- typename Derived::PlainMatrixType m_eval(m);
- // FIXME if it is really guaranteed that the eigenvalues are already sorted,
- // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
- return ei_sqrt(
- (m_eval*m_eval.adjoint())
- .template marked<SelfAdjoint>()
- .eigenvalues()
- .maxCoeff()
- );
- }
-};
-
-/** \qr_module
- *
- * \returns the matrix norm of this matrix.
- */
-template<typename Derived>
-inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
-MatrixBase<Derived>::operatorNorm() const
-{
- return ei_operatorNorm_selector<Derived, Flags&SelfAdjointBit>
- ::operatorNorm(derived());
-}
-
-#ifndef EIGEN_EXTERN_INSTANTIATIONS
-template<typename RealScalar, typename Scalar>
-static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n)
-{
- RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
- RealScalar e2 = ei_abs2(subdiag[end-1]);
- RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * ei_sqrt(td*td + e2));
- RealScalar x = diag[start] - mu;
- RealScalar z = subdiag[start];
-
- for (int k = start; k < end; ++k)
- {
- RealScalar c, s;
- ei_givens_rotation(x, z, c, s);
-
- // do T = G' T G
- RealScalar sdk = s * diag[k] + c * subdiag[k];
- RealScalar dkp1 = s * subdiag[k] + c * diag[k+1];
-
- diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k+1]);
- diag[k+1] = s * sdk + c * dkp1;
- subdiag[k] = c * sdk - s * dkp1;
-
- if (k > start)
- subdiag[k - 1] = c * subdiag[k-1] - s * z;
-
- x = subdiag[k];
-
- if (k < end - 1)
- {
- z = -s * subdiag[k+1];
- subdiag[k + 1] = c * subdiag[k+1];
- }
-
- // apply the givens rotation to the unit matrix Q = Q * G
- // G only modifies the two columns k and k+1
- if (matrixQ)
- {
- #ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
- #else
- int kn = k*n;
- int kn1 = (k+1)*n;
- #endif
- // let's do the product manually to avoid the need of temporaries...
- for (int i=0; i<n; ++i)
- {
- #ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
- Scalar matrixQ_i_k = matrixQ[i*n+k];
- matrixQ[i*n+k] = c * matrixQ_i_k - s * matrixQ[i*n+k+1];
- matrixQ[i*n+k+1] = s * matrixQ_i_k + c * matrixQ[i*n+k+1];
- #else
- Scalar matrixQ_i_k = matrixQ[i+kn];
- matrixQ[i+kn] = c * matrixQ_i_k - s * matrixQ[i+kn1];
- matrixQ[i+kn1] = s * matrixQ_i_k + c * matrixQ[i+kn1];
- #endif
- }
- }
- }
-}
-#endif
-
-#endif // EIGEN_SELFADJOINTEIGENSOLVER_H
diff --git a/extern/Eigen2/Eigen/src/QR/Tridiagonalization.h b/extern/Eigen2/Eigen/src/QR/Tridiagonalization.h
deleted file mode 100644
index 9ea39be717c..00000000000
--- a/extern/Eigen2/Eigen/src/QR/Tridiagonalization.h
+++ /dev/null
@@ -1,431 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_TRIDIAGONALIZATION_H
-#define EIGEN_TRIDIAGONALIZATION_H
-
-/** \ingroup QR_Module
- * \nonstableyet
- *
- * \class Tridiagonalization
- *
- * \brief Trigiagonal decomposition of a selfadjoint matrix
- *
- * \param MatrixType the type of the matrix of which we are performing the tridiagonalization
- *
- * This class performs a tridiagonal decomposition of a selfadjoint matrix \f$ A \f$ such that:
- * \f$ A = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real symmetric tridiagonal matrix.
- *
- * \sa MatrixBase::tridiagonalize()
- */
-template<typename _MatrixType> class Tridiagonalization
-{
- public:
-
- typedef _MatrixType MatrixType;
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<Scalar>::Real RealScalar;
- typedef typename ei_packet_traits<Scalar>::type Packet;
-
- enum {
- Size = MatrixType::RowsAtCompileTime,
- SizeMinusOne = MatrixType::RowsAtCompileTime==Dynamic
- ? Dynamic
- : MatrixType::RowsAtCompileTime-1,
- PacketSize = ei_packet_traits<Scalar>::size
- };
-
- typedef Matrix<Scalar, SizeMinusOne, 1> CoeffVectorType;
- typedef Matrix<RealScalar, Size, 1> DiagonalType;
- typedef Matrix<RealScalar, SizeMinusOne, 1> SubDiagonalType;
-
- typedef typename NestByValue<DiagonalCoeffs<MatrixType> >::RealReturnType DiagonalReturnType;
-
- typedef typename NestByValue<DiagonalCoeffs<
- NestByValue<Block<MatrixType,SizeMinusOne,SizeMinusOne> > > >::RealReturnType SubDiagonalReturnType;
-
- /** This constructor initializes a Tridiagonalization object for
- * further use with Tridiagonalization::compute()
- */
- Tridiagonalization(int size = Size==Dynamic ? 2 : Size)
- : m_matrix(size,size), m_hCoeffs(size-1)
- {}
-
- Tridiagonalization(const MatrixType& matrix)
- : m_matrix(matrix),
- m_hCoeffs(matrix.cols()-1)
- {
- _compute(m_matrix, m_hCoeffs);
- }
-
- /** Computes or re-compute the tridiagonalization for the matrix \a matrix.
- *
- * This method allows to re-use the allocated data.
- */
- void compute(const MatrixType& matrix)
- {
- m_matrix = matrix;
- m_hCoeffs.resize(matrix.rows()-1, 1);
- _compute(m_matrix, m_hCoeffs);
- }
-
- /** \returns the householder coefficients allowing to
- * reconstruct the matrix Q from the packed data.
- *
- * \sa packedMatrix()
- */
- inline CoeffVectorType householderCoefficients(void) const { return m_hCoeffs; }
-
- /** \returns the internal result of the decomposition.
- *
- * The returned matrix contains the following information:
- * - the strict upper part is equal to the input matrix A
- * - the diagonal and lower sub-diagonal represent the tridiagonal symmetric matrix (real).
- * - the rest of the lower part contains the Householder vectors that, combined with
- * Householder coefficients returned by householderCoefficients(),
- * allows to reconstruct the matrix Q as follow:
- * Q = H_{N-1} ... H_1 H_0
- * where the matrices H are the Householder transformations:
- * H_i = (I - h_i * v_i * v_i')
- * where h_i == householderCoefficients()[i] and v_i is a Householder vector:
- * v_i = [ 0, ..., 0, 1, M(i+2,i), ..., M(N-1,i) ]
- *
- * See LAPACK for further details on this packed storage.
- */
- inline const MatrixType& packedMatrix(void) const { return m_matrix; }
-
- MatrixType matrixQ(void) const;
- MatrixType matrixT(void) const;
- const DiagonalReturnType diagonal(void) const;
- const SubDiagonalReturnType subDiagonal(void) const;
-
- static void decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ = true);
-
- private:
-
- static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs);
-
- static void _decomposeInPlace3x3(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ = true);
-
- protected:
- MatrixType m_matrix;
- CoeffVectorType m_hCoeffs;
-};
-
-/** \returns an expression of the diagonal vector */
-template<typename MatrixType>
-const typename Tridiagonalization<MatrixType>::DiagonalReturnType
-Tridiagonalization<MatrixType>::diagonal(void) const
-{
- return m_matrix.diagonal().nestByValue().real();
-}
-
-/** \returns an expression of the sub-diagonal vector */
-template<typename MatrixType>
-const typename Tridiagonalization<MatrixType>::SubDiagonalReturnType
-Tridiagonalization<MatrixType>::subDiagonal(void) const
-{
- int n = m_matrix.rows();
- return Block<MatrixType,SizeMinusOne,SizeMinusOne>(m_matrix, 1, 0, n-1,n-1)
- .nestByValue().diagonal().nestByValue().real();
-}
-
-/** constructs and returns the tridiagonal matrix T.
- * Note that the matrix T is equivalent to the diagonal and sub-diagonal of the packed matrix.
- * Therefore, it might be often sufficient to directly use the packed matrix, or the vector
- * expressions returned by diagonal() and subDiagonal() instead of creating a new matrix.
- */
-template<typename MatrixType>
-typename Tridiagonalization<MatrixType>::MatrixType
-Tridiagonalization<MatrixType>::matrixT(void) const
-{
- // FIXME should this function (and other similar ones) rather take a matrix as argument
- // and fill it ? (to avoid temporaries)
- int n = m_matrix.rows();
- MatrixType matT = m_matrix;
- matT.corner(TopRight,n-1, n-1).diagonal() = subDiagonal().template cast<Scalar>().conjugate();
- if (n>2)
- {
- matT.corner(TopRight,n-2, n-2).template part<UpperTriangular>().setZero();
- matT.corner(BottomLeft,n-2, n-2).template part<LowerTriangular>().setZero();
- }
- return matT;
-}
-
-#ifndef EIGEN_HIDE_HEAVY_CODE
-
-/** \internal
- * Performs a tridiagonal decomposition of \a matA in place.
- *
- * \param matA the input selfadjoint matrix
- * \param hCoeffs returned Householder coefficients
- *
- * The result is written in the lower triangular part of \a matA.
- *
- * Implemented from Golub's "Matrix Computations", algorithm 8.3.1.
- *
- * \sa packedMatrix()
- */
-template<typename MatrixType>
-void Tridiagonalization<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs)
-{
- assert(matA.rows()==matA.cols());
- int n = matA.rows();
-// std::cerr << matA << "\n\n";
- for (int i = 0; i<n-2; ++i)
- {
- // let's consider the vector v = i-th column starting at position i+1
-
- // start of the householder transformation
- // squared norm of the vector v skipping the first element
- RealScalar v1norm2 = matA.col(i).end(n-(i+2)).squaredNorm();
-
- // FIXME comparing against 1
- if (ei_isMuchSmallerThan(v1norm2,static_cast<Scalar>(1)))
- {
- hCoeffs.coeffRef(i) = 0.;
- }
- else
- {
- Scalar v0 = matA.col(i).coeff(i+1);
- RealScalar beta = ei_sqrt(ei_abs2(v0)+v1norm2);
- if (ei_real(v0)>=0.)
- beta = -beta;
- matA.col(i).end(n-(i+2)) *= (Scalar(1)/(v0-beta));
- matA.col(i).coeffRef(i+1) = beta;
- Scalar h = (beta - v0) / beta;
- // end of the householder transformation
-
- // Apply similarity transformation to remaining columns,
- // i.e., A = H' A H where H = I - h v v' and v = matA.col(i).end(n-i-1)
-
- matA.col(i).coeffRef(i+1) = 1;
-
- /* This is the initial algorithm which minimize operation counts and maximize
- * the use of Eigen's expression. Unfortunately, the first matrix-vector product
- * using Part<LowerTriangular|Selfadjoint> is very very slow */
- #ifdef EIGEN_NEVER_DEFINED
- // matrix - vector product
- hCoeffs.end(n-i-1) = (matA.corner(BottomRight,n-i-1,n-i-1).template part<LowerTriangular|SelfAdjoint>()
- * (h * matA.col(i).end(n-i-1))).lazy();
- // simple axpy
- hCoeffs.end(n-i-1) += (h * Scalar(-0.5) * matA.col(i).end(n-i-1).dot(hCoeffs.end(n-i-1)))
- * matA.col(i).end(n-i-1);
- // rank-2 update
- //Block<MatrixType,Dynamic,1> B(matA,i+1,i,n-i-1,1);
- matA.corner(BottomRight,n-i-1,n-i-1).template part<LowerTriangular>() -=
- (matA.col(i).end(n-i-1) * hCoeffs.end(n-i-1).adjoint()).lazy()
- + (hCoeffs.end(n-i-1) * matA.col(i).end(n-i-1).adjoint()).lazy();
- #endif
- /* end initial algorithm */
-
- /* If we still want to minimize operation count (i.e., perform operation on the lower part only)
- * then we could provide the following algorithm for selfadjoint - vector product. However, a full
- * matrix-vector product is still faster (at least for dynamic size, and not too small, did not check
- * small matrices). The algo performs block matrix-vector and transposed matrix vector products. */
- #ifdef EIGEN_NEVER_DEFINED
- int n4 = (std::max(0,n-4)/4)*4;
- hCoeffs.end(n-i-1).setZero();
- for (int b=i+1; b<n4; b+=4)
- {
- // the ?x4 part:
- hCoeffs.end(b-4) +=
- Block<MatrixType,Dynamic,4>(matA,b+4,b,n-b-4,4) * matA.template block<4,1>(b,i);
- // the respective transposed part:
- Block<CoeffVectorType,4,1>(hCoeffs, b, 0, 4,1) +=
- Block<MatrixType,Dynamic,4>(matA,b+4,b,n-b-4,4).adjoint() * Block<MatrixType,Dynamic,1>(matA,b+4,i,n-b-4,1);
- // the 4x4 block diagonal:
- Block<CoeffVectorType,4,1>(hCoeffs, b, 0, 4,1) +=
- (Block<MatrixType,4,4>(matA,b,b,4,4).template part<LowerTriangular|SelfAdjoint>()
- * (h * Block<MatrixType,4,1>(matA,b,i,4,1))).lazy();
- }
- #endif
- // todo: handle the remaining part
- /* end optimized selfadjoint - vector product */
-
- /* Another interesting note: the above rank-2 update is much slower than the following hand written loop.
- * After an analyze of the ASM, it seems GCC (4.2) generate poor code because of the Block. Moreover,
- * if we remove the specialization of Block for Matrix then it is even worse, much worse ! */
- #ifdef EIGEN_NEVER_DEFINED
- for (int j1=i+1; j1<n; ++j1)
- for (int i1=j1; i1<n; ++i1)
- matA.coeffRef(i1,j1) -= matA.coeff(i1,i)*ei_conj(hCoeffs.coeff(j1-1))
- + hCoeffs.coeff(i1-1)*ei_conj(matA.coeff(j1,i));
- #endif
- /* end hand writen partial rank-2 update */
-
- /* The current fastest implementation: the full matrix is used, no "optimization" to use/compute
- * only half of the matrix. Custom vectorization of the inner col -= alpha X + beta Y such that access
- * to col are always aligned. Once we support that in Assign, then the algorithm could be rewriten as
- * a single compact expression. This code is therefore a good benchmark when will do that. */
-
- // let's use the end of hCoeffs to store temporary values:
- hCoeffs.end(n-i-1) = (matA.corner(BottomRight,n-i-1,n-i-1) * (h * matA.col(i).end(n-i-1))).lazy();
- // FIXME in the above expr a temporary is created because of the scalar multiple by h
-
- hCoeffs.end(n-i-1) += (h * Scalar(-0.5) * matA.col(i).end(n-i-1).dot(hCoeffs.end(n-i-1)))
- * matA.col(i).end(n-i-1);
-
- const Scalar* EIGEN_RESTRICT pb = &matA.coeffRef(0,i);
- const Scalar* EIGEN_RESTRICT pa = (&hCoeffs.coeffRef(0)) - 1;
- for (int j1=i+1; j1<n; ++j1)
- {
- int starti = i+1;
- int alignedEnd = starti;
- if (PacketSize>1)
- {
- int alignedStart = (starti) + ei_alignmentOffset(&matA.coeffRef(starti,j1), n-starti);
- alignedEnd = alignedStart + ((n-alignedStart)/PacketSize)*PacketSize;
-
- for (int i1=starti; i1<alignedStart; ++i1)
- matA.coeffRef(i1,j1) -= matA.coeff(i1,i)*ei_conj(hCoeffs.coeff(j1-1))
- + hCoeffs.coeff(i1-1)*ei_conj(matA.coeff(j1,i));
-
- Packet tmp0 = ei_pset1(hCoeffs.coeff(j1-1));
- Packet tmp1 = ei_pset1(matA.coeff(j1,i));
- Scalar* pc = &matA.coeffRef(0,j1);
- for (int i1=alignedStart ; i1<alignedEnd; i1+=PacketSize)
- ei_pstore(pc+i1,ei_psub(ei_pload(pc+i1),
- ei_padd(ei_pmul(tmp0, ei_ploadu(pb+i1)),
- ei_pmul(tmp1, ei_ploadu(pa+i1)))));
- }
- for (int i1=alignedEnd; i1<n; ++i1)
- matA.coeffRef(i1,j1) -= matA.coeff(i1,i)*ei_conj(hCoeffs.coeff(j1-1))
- + hCoeffs.coeff(i1-1)*ei_conj(matA.coeff(j1,i));
- }
- /* end optimized implementation */
-
- // note: at that point matA(i+1,i+1) is the (i+1)-th element of the final diagonal
- // note: the sequence of the beta values leads to the subdiagonal entries
- matA.col(i).coeffRef(i+1) = beta;
-
- hCoeffs.coeffRef(i) = h;
- }
- }
- if (NumTraits<Scalar>::IsComplex)
- {
- // Householder transformation on the remaining single scalar
- int i = n-2;
- Scalar v0 = matA.col(i).coeff(i+1);
- RealScalar beta = ei_abs(v0);
- if (ei_real(v0)>=0.)
- beta = -beta;
- matA.col(i).coeffRef(i+1) = beta;
- if(ei_isMuchSmallerThan(beta, Scalar(1))) hCoeffs.coeffRef(i) = Scalar(0);
- else hCoeffs.coeffRef(i) = (beta - v0) / beta;
- }
- else
- {
- hCoeffs.coeffRef(n-2) = 0;
- }
-}
-
-/** reconstructs and returns the matrix Q */
-template<typename MatrixType>
-typename Tridiagonalization<MatrixType>::MatrixType
-Tridiagonalization<MatrixType>::matrixQ(void) const
-{
- int n = m_matrix.rows();
- MatrixType matQ = MatrixType::Identity(n,n);
- for (int i = n-2; i>=0; i--)
- {
- Scalar tmp = m_matrix.coeff(i+1,i);
- m_matrix.const_cast_derived().coeffRef(i+1,i) = 1;
-
- matQ.corner(BottomRight,n-i-1,n-i-1) -=
- ((m_hCoeffs.coeff(i) * m_matrix.col(i).end(n-i-1)) *
- (m_matrix.col(i).end(n-i-1).adjoint() * matQ.corner(BottomRight,n-i-1,n-i-1)).lazy()).lazy();
-
- m_matrix.const_cast_derived().coeffRef(i+1,i) = tmp;
- }
- return matQ;
-}
-
-/** Performs a full decomposition in place */
-template<typename MatrixType>
-void Tridiagonalization<MatrixType>::decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
-{
- int n = mat.rows();
- ei_assert(mat.cols()==n && diag.size()==n && subdiag.size()==n-1);
- if (n==3 && (!NumTraits<Scalar>::IsComplex) )
- {
- _decomposeInPlace3x3(mat, diag, subdiag, extractQ);
- }
- else
- {
- Tridiagonalization tridiag(mat);
- diag = tridiag.diagonal();
- subdiag = tridiag.subDiagonal();
- if (extractQ)
- mat = tridiag.matrixQ();
- }
-}
-
-/** \internal
- * Optimized path for 3x3 matrices.
- * Especially useful for plane fitting.
- */
-template<typename MatrixType>
-void Tridiagonalization<MatrixType>::_decomposeInPlace3x3(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
-{
- diag[0] = ei_real(mat(0,0));
- RealScalar v1norm2 = ei_abs2(mat(0,2));
- if (ei_isMuchSmallerThan(v1norm2, RealScalar(1)))
- {
- diag[1] = ei_real(mat(1,1));
- diag[2] = ei_real(mat(2,2));
- subdiag[0] = ei_real(mat(0,1));
- subdiag[1] = ei_real(mat(1,2));
- if (extractQ)
- mat.setIdentity();
- }
- else
- {
- RealScalar beta = ei_sqrt(ei_abs2(mat(0,1))+v1norm2);
- RealScalar invBeta = RealScalar(1)/beta;
- Scalar m01 = mat(0,1) * invBeta;
- Scalar m02 = mat(0,2) * invBeta;
- Scalar q = RealScalar(2)*m01*mat(1,2) + m02*(mat(2,2) - mat(1,1));
- diag[1] = ei_real(mat(1,1) + m02*q);
- diag[2] = ei_real(mat(2,2) - m02*q);
- subdiag[0] = beta;
- subdiag[1] = ei_real(mat(1,2) - m01 * q);
- if (extractQ)
- {
- mat(0,0) = 1;
- mat(0,1) = 0;
- mat(0,2) = 0;
- mat(1,0) = 0;
- mat(1,1) = m01;
- mat(1,2) = m02;
- mat(2,0) = 0;
- mat(2,1) = m02;
- mat(2,2) = -m01;
- }
- }
-}
-
-#endif // EIGEN_HIDE_HEAVY_CODE
-
-#endif // EIGEN_TRIDIAGONALIZATION_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/CholmodSupport.h b/extern/Eigen2/Eigen/src/Sparse/CholmodSupport.h
deleted file mode 100644
index dfd9c787ae9..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/CholmodSupport.h
+++ /dev/null
@@ -1,236 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_CHOLMODSUPPORT_H
-#define EIGEN_CHOLMODSUPPORT_H
-
-template<typename Scalar, typename CholmodType>
-void ei_cholmod_configure_matrix(CholmodType& mat)
-{
- if (ei_is_same_type<Scalar,float>::ret)
- {
- mat.xtype = CHOLMOD_REAL;
- mat.dtype = 1;
- }
- else if (ei_is_same_type<Scalar,double>::ret)
- {
- mat.xtype = CHOLMOD_REAL;
- mat.dtype = 0;
- }
- else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
- {
- mat.xtype = CHOLMOD_COMPLEX;
- mat.dtype = 1;
- }
- else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
- {
- mat.xtype = CHOLMOD_COMPLEX;
- mat.dtype = 0;
- }
- else
- {
- ei_assert(false && "Scalar type not supported by CHOLMOD");
- }
-}
-
-template<typename Derived>
-cholmod_sparse SparseMatrixBase<Derived>::asCholmodMatrix()
-{
- typedef typename Derived::Scalar Scalar;
- cholmod_sparse res;
- res.nzmax = nonZeros();
- res.nrow = rows();;
- res.ncol = cols();
- res.p = derived()._outerIndexPtr();
- res.i = derived()._innerIndexPtr();
- res.x = derived()._valuePtr();
- res.xtype = CHOLMOD_REAL;
- res.itype = CHOLMOD_INT;
- res.sorted = 1;
- res.packed = 1;
- res.dtype = 0;
- res.stype = -1;
-
- ei_cholmod_configure_matrix<Scalar>(res);
-
- if (Derived::Flags & SelfAdjoint)
- {
- if (Derived::Flags & UpperTriangular)
- res.stype = 1;
- else if (Derived::Flags & LowerTriangular)
- res.stype = -1;
- else
- res.stype = 0;
- }
- else
- res.stype = 0;
-
- return res;
-}
-
-template<typename Derived>
-cholmod_dense ei_cholmod_map_eigen_to_dense(MatrixBase<Derived>& mat)
-{
- EIGEN_STATIC_ASSERT((ei_traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
- typedef typename Derived::Scalar Scalar;
-
- cholmod_dense res;
- res.nrow = mat.rows();
- res.ncol = mat.cols();
- res.nzmax = res.nrow * res.ncol;
- res.d = mat.derived().stride();
- res.x = mat.derived().data();
- res.z = 0;
-
- ei_cholmod_configure_matrix<Scalar>(res);
-
- return res;
-}
-
-template<typename Scalar, int Flags>
-MappedSparseMatrix<Scalar,Flags>::MappedSparseMatrix(cholmod_sparse& cm)
-{
- m_innerSize = cm.nrow;
- m_outerSize = cm.ncol;
- m_outerIndex = reinterpret_cast<int*>(cm.p);
- m_innerIndices = reinterpret_cast<int*>(cm.i);
- m_values = reinterpret_cast<Scalar*>(cm.x);
- m_nnz = m_outerIndex[cm.ncol];
-}
-
-template<typename MatrixType>
-class SparseLLT<MatrixType,Cholmod> : public SparseLLT<MatrixType>
-{
- protected:
- typedef SparseLLT<MatrixType> Base;
- typedef typename Base::Scalar Scalar;
- typedef typename Base::RealScalar RealScalar;
- using Base::MatrixLIsDirty;
- using Base::SupernodalFactorIsDirty;
- using Base::m_flags;
- using Base::m_matrix;
- using Base::m_status;
-
- public:
-
- SparseLLT(int flags = 0)
- : Base(flags), m_cholmodFactor(0)
- {
- cholmod_start(&m_cholmod);
- }
-
- SparseLLT(const MatrixType& matrix, int flags = 0)
- : Base(flags), m_cholmodFactor(0)
- {
- cholmod_start(&m_cholmod);
- compute(matrix);
- }
-
- ~SparseLLT()
- {
- if (m_cholmodFactor)
- cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
- cholmod_finish(&m_cholmod);
- }
-
- inline const typename Base::CholMatrixType& matrixL(void) const;
-
- template<typename Derived>
- void solveInPlace(MatrixBase<Derived> &b) const;
-
- void compute(const MatrixType& matrix);
-
- protected:
- mutable cholmod_common m_cholmod;
- cholmod_factor* m_cholmodFactor;
-};
-
-template<typename MatrixType>
-void SparseLLT<MatrixType,Cholmod>::compute(const MatrixType& a)
-{
- if (m_cholmodFactor)
- {
- cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
- m_cholmodFactor = 0;
- }
-
- cholmod_sparse A = const_cast<MatrixType&>(a).asCholmodMatrix();
- m_cholmod.supernodal = CHOLMOD_AUTO;
- // TODO
- if (m_flags&IncompleteFactorization)
- {
- m_cholmod.nmethods = 1;
- m_cholmod.method[0].ordering = CHOLMOD_NATURAL;
- m_cholmod.postorder = 0;
- }
- else
- {
- m_cholmod.nmethods = 1;
- m_cholmod.method[0].ordering = CHOLMOD_NATURAL;
- m_cholmod.postorder = 0;
- }
- m_cholmod.final_ll = 1;
- m_cholmodFactor = cholmod_analyze(&A, &m_cholmod);
- cholmod_factorize(&A, m_cholmodFactor, &m_cholmod);
-
- m_status = (m_status & ~SupernodalFactorIsDirty) | MatrixLIsDirty;
-}
-
-template<typename MatrixType>
-inline const typename SparseLLT<MatrixType>::CholMatrixType&
-SparseLLT<MatrixType,Cholmod>::matrixL() const
-{
- if (m_status & MatrixLIsDirty)
- {
- ei_assert(!(m_status & SupernodalFactorIsDirty));
-
- cholmod_sparse* cmRes = cholmod_factor_to_sparse(m_cholmodFactor, &m_cholmod);
- const_cast<typename Base::CholMatrixType&>(m_matrix) = MappedSparseMatrix<Scalar>(*cmRes);
- free(cmRes);
-
- m_status = (m_status & ~MatrixLIsDirty);
- }
- return m_matrix;
-}
-
-template<typename MatrixType>
-template<typename Derived>
-void SparseLLT<MatrixType,Cholmod>::solveInPlace(MatrixBase<Derived> &b) const
-{
- const int size = m_cholmodFactor->n;
- ei_assert(size==b.rows());
-
- // this uses Eigen's triangular sparse solver
-// if (m_status & MatrixLIsDirty)
-// matrixL();
-// Base::solveInPlace(b);
- // as long as our own triangular sparse solver is not fully optimal,
- // let's use CHOLMOD's one:
- cholmod_dense cdb = ei_cholmod_map_eigen_to_dense(b);
- cholmod_dense* x = cholmod_solve(CHOLMOD_LDLt, m_cholmodFactor, &cdb, &m_cholmod);
- b = Matrix<typename Base::Scalar,Dynamic,1>::Map(reinterpret_cast<typename Base::Scalar*>(x->x),b.rows());
- cholmod_free_dense(&x, &m_cholmod);
-}
-
-#endif // EIGEN_CHOLMODSUPPORT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/RandomSetter.h b/extern/Eigen2/Eigen/src/Sparse/RandomSetter.h
deleted file mode 100644
index d908e315f3b..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/RandomSetter.h
+++ /dev/null
@@ -1,330 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_RANDOMSETTER_H
-#define EIGEN_RANDOMSETTER_H
-
-/** Represents a std::map
- *
- * \see RandomSetter
- */
-template<typename Scalar> struct StdMapTraits
-{
- typedef int KeyType;
- typedef std::map<KeyType,Scalar> Type;
- enum {
- IsSorted = 1
- };
-
- static void setInvalidKey(Type&, const KeyType&) {}
-};
-
-#ifdef EIGEN_UNORDERED_MAP_SUPPORT
-/** Represents a std::unordered_map
- *
- * To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file
- * yourself making sure that unordered_map is defined in the std namespace.
- *
- * For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do:
- * \code
- * #include <tr1/unordered_map>
- * #define EIGEN_UNORDERED_MAP_SUPPORT
- * namespace std {
- * using std::tr1::unordered_map;
- * }
- * \endcode
- *
- * \see RandomSetter
- */
-template<typename Scalar> struct StdUnorderedMapTraits
-{
- typedef int KeyType;
- typedef std::unordered_map<KeyType,Scalar> Type;
- enum {
- IsSorted = 0
- };
-
- static void setInvalidKey(Type&, const KeyType&) {}
-};
-#endif // EIGEN_UNORDERED_MAP_SUPPORT
-
-#ifdef _DENSE_HASH_MAP_H_
-/** Represents a google::dense_hash_map
- *
- * \see RandomSetter
- */
-template<typename Scalar> struct GoogleDenseHashMapTraits
-{
- typedef int KeyType;
- typedef google::dense_hash_map<KeyType,Scalar> Type;
- enum {
- IsSorted = 0
- };
-
- static void setInvalidKey(Type& map, const KeyType& k)
- { map.set_empty_key(k); }
-};
-#endif
-
-#ifdef _SPARSE_HASH_MAP_H_
-/** Represents a google::sparse_hash_map
- *
- * \see RandomSetter
- */
-template<typename Scalar> struct GoogleSparseHashMapTraits
-{
- typedef int KeyType;
- typedef google::sparse_hash_map<KeyType,Scalar> Type;
- enum {
- IsSorted = 0
- };
-
- static void setInvalidKey(Type&, const KeyType&) {}
-};
-#endif
-
-/** \class RandomSetter
- *
- * \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access
- *
- * \param SparseMatrixType the type of the sparse matrix we are updating
- * \param MapTraits a traits class representing the map implementation used for the temporary sparse storage.
- * Its default value depends on the system.
- * \param OuterPacketBits defines the number of rows (or columns) manage by a single map object
- * as a power of two exponent.
- *
- * This class temporarily represents a sparse matrix object using a generic map implementation allowing for
- * efficient random access. The conversion from the compressed representation to a hash_map object is performed
- * in the RandomSetter constructor, while the sparse matrix is updated back at destruction time. This strategy
- * suggest the use of nested blocks as in this example:
- *
- * \code
- * SparseMatrix<double> m(rows,cols);
- * {
- * RandomSetter<SparseMatrix<double> > w(m);
- * // don't use m but w instead with read/write random access to the coefficients:
- * for(;;)
- * w(rand(),rand()) = rand;
- * }
- * // when w is deleted, the data are copied back to m
- * // and m is ready to use.
- * \endcode
- *
- * Since hash_map objects are not fully sorted, representing a full matrix as a single hash_map would
- * involve a big and costly sort to update the compressed matrix back. To overcome this issue, a RandomSetter
- * use multiple hash_map, each representing 2^OuterPacketBits columns or rows according to the storage order.
- * To reach optimal performance, this value should be adjusted according to the average number of nonzeros
- * per rows/columns.
- *
- * The possible values for the template parameter MapTraits are:
- * - \b StdMapTraits: corresponds to std::map. (does not perform very well)
- * - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC)
- * - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption)
- * - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance)
- *
- * The default map implementation depends on the availability, and the preferred order is:
- * GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits.
- *
- * For performance and memory consumption reasons it is highly recommended to use one of
- * the Google's hash_map implementation. To enable the support for them, you have two options:
- * - \#include <google/dense_hash_map> yourself \b before Eigen/Sparse header
- * - define EIGEN_GOOGLEHASH_SUPPORT
- * In the later case the inclusion of <google/dense_hash_map> is made for you.
- *
- * \see http://code.google.com/p/google-sparsehash/
- */
-template<typename SparseMatrixType,
- template <typename T> class MapTraits =
-#if defined _DENSE_HASH_MAP_H_
- GoogleDenseHashMapTraits
-#elif defined _HASH_MAP
- GnuHashMapTraits
-#else
- StdMapTraits
-#endif
- ,int OuterPacketBits = 6>
-class RandomSetter
-{
- typedef typename ei_traits<SparseMatrixType>::Scalar Scalar;
- struct ScalarWrapper
- {
- ScalarWrapper() : value(0) {}
- Scalar value;
- };
- typedef typename MapTraits<ScalarWrapper>::KeyType KeyType;
- typedef typename MapTraits<ScalarWrapper>::Type HashMapType;
- static const int OuterPacketMask = (1 << OuterPacketBits) - 1;
- enum {
- SwapStorage = 1 - MapTraits<ScalarWrapper>::IsSorted,
- TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0,
- SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor,
- IsUpperTriangular = SparseMatrixType::Flags & UpperTriangularBit,
- IsLowerTriangular = SparseMatrixType::Flags & LowerTriangularBit
- };
-
- public:
-
- /** Constructs a random setter object from the sparse matrix \a target
- *
- * Note that the initial value of \a target are imported. If you want to re-set
- * a sparse matrix from scratch, then you must set it to zero first using the
- * setZero() function.
- */
- inline RandomSetter(SparseMatrixType& target)
- : mp_target(&target)
- {
- const int outerSize = SwapStorage ? target.innerSize() : target.outerSize();
- const int innerSize = SwapStorage ? target.outerSize() : target.innerSize();
- m_outerPackets = outerSize >> OuterPacketBits;
- if (outerSize&OuterPacketMask)
- m_outerPackets += 1;
- m_hashmaps = new HashMapType[m_outerPackets];
- // compute number of bits needed to store inner indices
- int aux = innerSize - 1;
- m_keyBitsOffset = 0;
- while (aux)
- {
- ++m_keyBitsOffset;
- aux = aux >> 1;
- }
- KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset));
- for (int k=0; k<m_outerPackets; ++k)
- MapTraits<ScalarWrapper>::setInvalidKey(m_hashmaps[k],ik);
-
- // insert current coeffs
- for (int j=0; j<mp_target->outerSize(); ++j)
- for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it)
- (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value();
- }
-
- /** Destructor updating back the sparse matrix target */
- ~RandomSetter()
- {
- KeyType keyBitsMask = (1<<m_keyBitsOffset)-1;
- if (!SwapStorage) // also means the map is sorted
- {
- mp_target->startFill(nonZeros());
- for (int k=0; k<m_outerPackets; ++k)
- {
- const int outerOffset = (1<<OuterPacketBits) * k;
- typename HashMapType::iterator end = m_hashmaps[k].end();
- for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
- {
- const int outer = (it->first >> m_keyBitsOffset) + outerOffset;
- const int inner = it->first & keyBitsMask;
- mp_target->fill(TargetRowMajor ? outer : inner, TargetRowMajor ? inner : outer) = it->second.value;
- }
- }
- mp_target->endFill();
- }
- else
- {
- VectorXi positions(mp_target->outerSize());
- positions.setZero();
- // pass 1
- for (int k=0; k<m_outerPackets; ++k)
- {
- typename HashMapType::iterator end = m_hashmaps[k].end();
- for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
- {
- const int outer = it->first & keyBitsMask;
- ++positions[outer];
- }
- }
- // prefix sum
- int count = 0;
- for (int j=0; j<mp_target->outerSize(); ++j)
- {
- int tmp = positions[j];
- mp_target->_outerIndexPtr()[j] = count;
- positions[j] = count;
- count += tmp;
- }
- mp_target->_outerIndexPtr()[mp_target->outerSize()] = count;
- mp_target->resizeNonZeros(count);
- // pass 2
- for (int k=0; k<m_outerPackets; ++k)
- {
- const int outerOffset = (1<<OuterPacketBits) * k;
- typename HashMapType::iterator end = m_hashmaps[k].end();
- for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
- {
- const int inner = (it->first >> m_keyBitsOffset) + outerOffset;
- const int outer = it->first & keyBitsMask;
- // sorted insertion
- // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients,
- // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a
- // small fraction of them have to be sorted, whence the following simple procedure:
- int posStart = mp_target->_outerIndexPtr()[outer];
- int i = (positions[outer]++) - 1;
- while ( (i >= posStart) && (mp_target->_innerIndexPtr()[i] > inner) )
- {
- mp_target->_valuePtr()[i+1] = mp_target->_valuePtr()[i];
- mp_target->_innerIndexPtr()[i+1] = mp_target->_innerIndexPtr()[i];
- --i;
- }
- mp_target->_innerIndexPtr()[i+1] = inner;
- mp_target->_valuePtr()[i+1] = it->second.value;
- }
- }
- }
- delete[] m_hashmaps;
- }
-
- /** \returns a reference to the coefficient at given coordinates \a row, \a col */
- Scalar& operator() (int row, int col)
- {
- ei_assert(((!IsUpperTriangular) || (row<=col)) && "Invalid access to an upper triangular matrix");
- ei_assert(((!IsLowerTriangular) || (col<=row)) && "Invalid access to an upper triangular matrix");
- const int outer = SetterRowMajor ? row : col;
- const int inner = SetterRowMajor ? col : row;
- const int outerMajor = outer >> OuterPacketBits; // index of the packet/map
- const int outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet
- const KeyType key = (KeyType(outerMinor)<<m_keyBitsOffset) | inner;
- return m_hashmaps[outerMajor][key].value;
- }
-
- /** \returns the number of non zero coefficients
- *
- * \note According to the underlying map/hash_map implementation,
- * this function might be quite expensive.
- */
- int nonZeros() const
- {
- int nz = 0;
- for (int k=0; k<m_outerPackets; ++k)
- nz += m_hashmaps[k].size();
- return nz;
- }
-
-
- protected:
-
- HashMapType* m_hashmaps;
- SparseMatrixType* mp_target;
- int m_outerPackets;
- unsigned char m_keyBitsOffset;
-};
-
-#endif // EIGEN_RANDOMSETTER_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseCwise.h b/extern/Eigen2/Eigen/src/Sparse/SparseCwise.h
deleted file mode 100644
index ac285ec1aa3..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseCwise.h
+++ /dev/null
@@ -1,178 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSE_CWISE_H
-#define EIGEN_SPARSE_CWISE_H
-
-/** \internal
- * convenient macro to defined the return type of a cwise binary operation */
-#define EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(OP) \
- CwiseBinaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType, OtherDerived>
-
-#define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
- SparseCwiseBinaryOp< \
- ei_scalar_product_op< \
- typename ei_scalar_product_traits< \
- typename ei_traits<ExpressionType>::Scalar, \
- typename ei_traits<OtherDerived>::Scalar \
- >::ReturnType \
- >, \
- ExpressionType, \
- OtherDerived \
- >
-
-/** \internal
- * convenient macro to defined the return type of a cwise unary operation */
-#define EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(OP) \
- SparseCwiseUnaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType>
-
-/** \internal
- * convenient macro to defined the return type of a cwise comparison to a scalar */
-/*#define EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(OP) \
- CwiseBinaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType, \
- NestByValue<typename ExpressionType::ConstantReturnType> >*/
-
-template<typename ExpressionType> class SparseCwise
-{
- public:
-
- typedef typename ei_traits<ExpressionType>::Scalar Scalar;
- typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
- ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
- typedef CwiseUnaryOp<ei_scalar_add_op<Scalar>, ExpressionType> ScalarAddReturnType;
-
- inline SparseCwise(const ExpressionType& matrix) : m_matrix(matrix) {}
-
- /** \internal */
- inline const ExpressionType& _expression() const { return m_matrix; }
-
- template<typename OtherDerived>
- const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
- operator*(const SparseMatrixBase<OtherDerived> &other) const;
-
- template<typename OtherDerived>
- const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
- operator*(const MatrixBase<OtherDerived> &other) const;
-
-// template<typename OtherDerived>
-// const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
-// operator/(const SparseMatrixBase<OtherDerived> &other) const;
-//
-// template<typename OtherDerived>
-// const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
-// operator/(const MatrixBase<OtherDerived> &other) const;
-
- template<typename OtherDerived>
- const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)
- min(const SparseMatrixBase<OtherDerived> &other) const;
-
- template<typename OtherDerived>
- const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)
- max(const SparseMatrixBase<OtherDerived> &other) const;
-
- const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op) abs() const;
- const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op) abs2() const;
-// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op) square() const;
-// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op) cube() const;
-// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op) inverse() const;
-// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op) sqrt() const;
-// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op) exp() const;
-// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op) log() const;
-// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op) cos() const;
-// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op) sin() const;
-// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op) pow(const Scalar& exponent) const;
-
- template<typename OtherDerived>
- inline ExpressionType& operator*=(const SparseMatrixBase<OtherDerived> &other);
-
-// template<typename OtherDerived>
-// inline ExpressionType& operator/=(const SparseMatrixBase<OtherDerived> &other);
-
- /*
- template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)
- operator<(const MatrixBase<OtherDerived>& other) const;
-
- template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)
- operator<=(const MatrixBase<OtherDerived>& other) const;
-
- template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)
- operator>(const MatrixBase<OtherDerived>& other) const;
-
- template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)
- operator>=(const MatrixBase<OtherDerived>& other) const;
-
- template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)
- operator==(const MatrixBase<OtherDerived>& other) const;
-
- template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)
- operator!=(const MatrixBase<OtherDerived>& other) const;
-
- // comparisons to a scalar value
- const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less)
- operator<(Scalar s) const;
-
- const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal)
- operator<=(Scalar s) const;
-
- const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater)
- operator>(Scalar s) const;
-
- const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal)
- operator>=(Scalar s) const;
-
- const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to)
- operator==(Scalar s) const;
-
- const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to)
- operator!=(Scalar s) const;
- */
-
- // allow to extend SparseCwise outside Eigen
- #ifdef EIGEN_SPARSE_CWISE_PLUGIN
- #include EIGEN_SPARSE_CWISE_PLUGIN
- #endif
-
- protected:
- ExpressionTypeNested m_matrix;
-
- private:
- SparseCwise& operator=(const SparseCwise&);
-};
-
-template<typename Derived>
-inline const SparseCwise<Derived>
-SparseMatrixBase<Derived>::cwise() const
-{
- return derived();
-}
-
-template<typename Derived>
-inline SparseCwise<Derived>
-SparseMatrixBase<Derived>::cwise()
-{
- return derived();
-}
-
-#endif // EIGEN_SPARSE_CWISE_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h
deleted file mode 100644
index da9746e2099..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h
+++ /dev/null
@@ -1,453 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
-#define EIGEN_SPARSE_CWISE_BINARY_OP_H
-
-// Here we have to handle 3 cases:
-// 1 - sparse op dense
-// 2 - dense op sparse
-// 3 - sparse op sparse
-// We also need to implement a 4th iterator for:
-// 4 - dense op dense
-// Finally, we also need to distinguish between the product and other operations :
-// configuration returned mode
-// 1 - sparse op dense product sparse
-// generic dense
-// 2 - dense op sparse product sparse
-// generic dense
-// 3 - sparse op sparse product sparse
-// generic sparse
-// 4 - dense op dense product dense
-// generic dense
-
-template<typename BinaryOp, typename Lhs, typename Rhs>
-struct ei_traits<SparseCwiseBinaryOp<BinaryOp, Lhs, Rhs> >
-{
- typedef typename ei_result_of<
- BinaryOp(
- typename Lhs::Scalar,
- typename Rhs::Scalar
- )
- >::type Scalar;
- typedef typename Lhs::Nested LhsNested;
- typedef typename Rhs::Nested RhsNested;
- typedef typename ei_unref<LhsNested>::type _LhsNested;
- typedef typename ei_unref<RhsNested>::type _RhsNested;
- enum {
- LhsCoeffReadCost = _LhsNested::CoeffReadCost,
- RhsCoeffReadCost = _RhsNested::CoeffReadCost,
- LhsFlags = _LhsNested::Flags,
- RhsFlags = _RhsNested::Flags,
- RowsAtCompileTime = Lhs::RowsAtCompileTime,
- ColsAtCompileTime = Lhs::ColsAtCompileTime,
- MaxRowsAtCompileTime = Lhs::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = Lhs::MaxColsAtCompileTime,
- Flags = (int(LhsFlags) | int(RhsFlags)) & HereditaryBits,
- CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + ei_functor_traits<BinaryOp>::Cost
- };
-};
-
-template<typename BinaryOp, typename Lhs, typename Rhs>
-class SparseCwiseBinaryOp : ei_no_assignment_operator,
- public SparseMatrixBase<SparseCwiseBinaryOp<BinaryOp, Lhs, Rhs> >
-{
- public:
-
- class InnerIterator;
-
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseCwiseBinaryOp)
- typedef typename ei_traits<SparseCwiseBinaryOp>::LhsNested LhsNested;
- typedef typename ei_traits<SparseCwiseBinaryOp>::RhsNested RhsNested;
- typedef typename ei_unref<LhsNested>::type _LhsNested;
- typedef typename ei_unref<RhsNested>::type _RhsNested;
-
- EIGEN_STRONG_INLINE SparseCwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp())
- : m_lhs(lhs), m_rhs(rhs), m_functor(func)
- {
- EIGEN_STATIC_ASSERT((_LhsNested::Flags&RowMajorBit)==(_RhsNested::Flags&RowMajorBit),
- BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER)
- EIGEN_STATIC_ASSERT((ei_functor_allows_mixing_real_and_complex<BinaryOp>::ret
- ? int(ei_is_same_type<typename Lhs::RealScalar, typename Rhs::RealScalar>::ret)
- : int(ei_is_same_type<typename Lhs::Scalar, typename Rhs::Scalar>::ret)),
- YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
- // require the sizes to match
- EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
- ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols());
- }
-
- EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_lhs.cols(); }
-
- EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
- EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
- EIGEN_STRONG_INLINE const BinaryOp& functor() const { return m_functor; }
-
- protected:
- const LhsNested m_lhs;
- const RhsNested m_rhs;
- const BinaryOp m_functor;
-};
-
-template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
- int _LhsStorageMode = int(Lhs::Flags) & SparseBit,
- int _RhsStorageMode = int(Rhs::Flags) & SparseBit>
-class ei_sparse_cwise_binary_op_inner_iterator_selector;
-
-template<typename BinaryOp, typename Lhs, typename Rhs>
-class SparseCwiseBinaryOp<BinaryOp,Lhs,Rhs>::InnerIterator
- : public ei_sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs, typename SparseCwiseBinaryOp<BinaryOp,Lhs,Rhs>::InnerIterator>
-{
- public:
- typedef ei_sparse_cwise_binary_op_inner_iterator_selector<
- BinaryOp,Lhs,Rhs, InnerIterator> Base;
-
- EIGEN_STRONG_INLINE InnerIterator(const SparseCwiseBinaryOp& binOp, int outer)
- : Base(binOp,outer)
- {}
- private:
- InnerIterator& operator=(const InnerIterator&);
-};
-
-/***************************************************************************
-* Implementation of inner-iterators
-***************************************************************************/
-
-// template<typename T> struct ei_func_is_conjunction { enum { ret = false }; };
-// template<typename T> struct ei_func_is_conjunction<ei_scalar_product_op<T> > { enum { ret = true }; };
-
-// TODO generalize the ei_scalar_product_op specialization to all conjunctions if any !
-
-// sparse - sparse (generic)
-template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
-class ei_sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived, IsSparse, IsSparse>
-{
- typedef SparseCwiseBinaryOp<BinaryOp, Lhs, Rhs> CwiseBinaryXpr;
- typedef typename ei_traits<CwiseBinaryXpr>::Scalar Scalar;
- typedef typename ei_traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
- typedef typename ei_traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
- typedef typename _LhsNested::InnerIterator LhsIterator;
- typedef typename _RhsNested::InnerIterator RhsIterator;
- public:
-
- EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
- : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
- {
- this->operator++();
- }
-
- EIGEN_STRONG_INLINE Derived& operator++()
- {
- if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
- {
- m_id = m_lhsIter.index();
- m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
- ++m_lhsIter;
- ++m_rhsIter;
- }
- else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
- {
- m_id = m_lhsIter.index();
- m_value = m_functor(m_lhsIter.value(), Scalar(0));
- ++m_lhsIter;
- }
- else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
- {
- m_id = m_rhsIter.index();
- m_value = m_functor(Scalar(0), m_rhsIter.value());
- ++m_rhsIter;
- }
- else
- {
- m_id = -1;
- }
- return *static_cast<Derived*>(this);
- }
-
- EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
-
- EIGEN_STRONG_INLINE int index() const { return m_id; }
- EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); }
- EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); }
-
- EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
-
- protected:
- LhsIterator m_lhsIter;
- RhsIterator m_rhsIter;
- const BinaryOp& m_functor;
- Scalar m_value;
- int m_id;
-
- private:
- ei_sparse_cwise_binary_op_inner_iterator_selector& operator=(const ei_sparse_cwise_binary_op_inner_iterator_selector&);
-};
-
-// sparse - sparse (product)
-template<typename T, typename Lhs, typename Rhs, typename Derived>
-class ei_sparse_cwise_binary_op_inner_iterator_selector<ei_scalar_product_op<T>, Lhs, Rhs, Derived, IsSparse, IsSparse>
-{
- typedef ei_scalar_product_op<T> BinaryFunc;
- typedef SparseCwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
- typedef typename CwiseBinaryXpr::Scalar Scalar;
- typedef typename ei_traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
- typedef typename _LhsNested::InnerIterator LhsIterator;
- typedef typename ei_traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
- typedef typename _RhsNested::InnerIterator RhsIterator;
- public:
-
- EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
- : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
- {
- while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
- {
- if (m_lhsIter.index() < m_rhsIter.index())
- ++m_lhsIter;
- else
- ++m_rhsIter;
- }
- }
-
- EIGEN_STRONG_INLINE Derived& operator++()
- {
- ++m_lhsIter;
- ++m_rhsIter;
- while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
- {
- if (m_lhsIter.index() < m_rhsIter.index())
- ++m_lhsIter;
- else
- ++m_rhsIter;
- }
- return *static_cast<Derived*>(this);
- }
-
- EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
-
- EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); }
- EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); }
- EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); }
-
- EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
-
- protected:
- LhsIterator m_lhsIter;
- RhsIterator m_rhsIter;
- const BinaryFunc& m_functor;
-
- private:
- ei_sparse_cwise_binary_op_inner_iterator_selector& operator=(const ei_sparse_cwise_binary_op_inner_iterator_selector&);
-};
-
-// sparse - dense (product)
-template<typename T, typename Lhs, typename Rhs, typename Derived>
-class ei_sparse_cwise_binary_op_inner_iterator_selector<ei_scalar_product_op<T>, Lhs, Rhs, Derived, IsSparse, IsDense>
-{
- typedef ei_scalar_product_op<T> BinaryFunc;
- typedef SparseCwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
- typedef typename CwiseBinaryXpr::Scalar Scalar;
- typedef typename ei_traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
- typedef typename ei_traits<CwiseBinaryXpr>::RhsNested RhsNested;
- typedef typename _LhsNested::InnerIterator LhsIterator;
- enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
- public:
-
- EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
- : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
- {}
-
- EIGEN_STRONG_INLINE Derived& operator++()
- {
- ++m_lhsIter;
- return *static_cast<Derived*>(this);
- }
-
- EIGEN_STRONG_INLINE Scalar value() const
- { return m_functor(m_lhsIter.value(),
- m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
-
- EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); }
- EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); }
- EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); }
-
- EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
-
- protected:
- const RhsNested m_rhs;
- LhsIterator m_lhsIter;
- const BinaryFunc m_functor;
- const int m_outer;
-
- private:
- ei_sparse_cwise_binary_op_inner_iterator_selector& operator=(const ei_sparse_cwise_binary_op_inner_iterator_selector&);
-};
-
-// sparse - dense (product)
-template<typename T, typename Lhs, typename Rhs, typename Derived>
-class ei_sparse_cwise_binary_op_inner_iterator_selector<ei_scalar_product_op<T>, Lhs, Rhs, Derived, IsDense, IsSparse>
-{
- typedef ei_scalar_product_op<T> BinaryFunc;
- typedef SparseCwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
- typedef typename CwiseBinaryXpr::Scalar Scalar;
- typedef typename ei_traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
- typedef typename _RhsNested::InnerIterator RhsIterator;
- enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
- public:
-
- EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer)
- : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer)
- {}
-
- EIGEN_STRONG_INLINE Derived& operator++()
- {
- ++m_rhsIter;
- return *static_cast<Derived*>(this);
- }
-
- EIGEN_STRONG_INLINE Scalar value() const
- { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
-
- EIGEN_STRONG_INLINE int index() const { return m_rhsIter.index(); }
- EIGEN_STRONG_INLINE int row() const { return m_rhsIter.row(); }
- EIGEN_STRONG_INLINE int col() const { return m_rhsIter.col(); }
-
- EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
-
- protected:
- const CwiseBinaryXpr& m_xpr;
- RhsIterator m_rhsIter;
- const BinaryFunc& m_functor;
- const int m_outer;
-};
-
-
-/***************************************************************************
-* Implementation of SparseMatrixBase and SparseCwise functions/operators
-***************************************************************************/
-
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const SparseCwiseBinaryOp<ei_scalar_difference_op<typename ei_traits<Derived>::Scalar>,
- Derived, OtherDerived>
-SparseMatrixBase<Derived>::operator-(const SparseMatrixBase<OtherDerived> &other) const
-{
- return SparseCwiseBinaryOp<ei_scalar_difference_op<Scalar>,
- Derived, OtherDerived>(derived(), other.derived());
-}
-
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
-SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
-{
- return *this = derived() - other.derived();
-}
-
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const SparseCwiseBinaryOp<ei_scalar_sum_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
-SparseMatrixBase<Derived>::operator+(const SparseMatrixBase<OtherDerived> &other) const
-{
- return SparseCwiseBinaryOp<ei_scalar_sum_op<Scalar>, Derived, OtherDerived>(derived(), other.derived());
-}
-
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE Derived &
-SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
-{
- return *this = derived() + other.derived();
-}
-
-template<typename ExpressionType>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
-SparseCwise<ExpressionType>::operator*(const SparseMatrixBase<OtherDerived> &other) const
-{
- return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived());
-}
-
-template<typename ExpressionType>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
-SparseCwise<ExpressionType>::operator*(const MatrixBase<OtherDerived> &other) const
-{
- return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived());
-}
-
-// template<typename ExpressionType>
-// template<typename OtherDerived>
-// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
-// SparseCwise<ExpressionType>::operator/(const SparseMatrixBase<OtherDerived> &other) const
-// {
-// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived());
-// }
-//
-// template<typename ExpressionType>
-// template<typename OtherDerived>
-// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
-// SparseCwise<ExpressionType>::operator/(const MatrixBase<OtherDerived> &other) const
-// {
-// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived());
-// }
-
-template<typename ExpressionType>
-template<typename OtherDerived>
-inline ExpressionType& SparseCwise<ExpressionType>::operator*=(const SparseMatrixBase<OtherDerived> &other)
-{
- return m_matrix.const_cast_derived() = _expression() * other.derived();
-}
-
-// template<typename ExpressionType>
-// template<typename OtherDerived>
-// inline ExpressionType& SparseCwise<ExpressionType>::operator/=(const SparseMatrixBase<OtherDerived> &other)
-// {
-// return m_matrix.const_cast_derived() = *this / other;
-// }
-
-template<typename ExpressionType>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)
-SparseCwise<ExpressionType>::min(const SparseMatrixBase<OtherDerived> &other) const
-{
- return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)(_expression(), other.derived());
-}
-
-template<typename ExpressionType>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)
-SparseCwise<ExpressionType>::max(const SparseMatrixBase<OtherDerived> &other) const
-{
- return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)(_expression(), other.derived());
-}
-
-// template<typename Derived>
-// template<typename CustomBinaryOp, typename OtherDerived>
-// EIGEN_STRONG_INLINE const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
-// SparseMatrixBase<Derived>::binaryExpr(const SparseMatrixBase<OtherDerived> &other, const CustomBinaryOp& func) const
-// {
-// return CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>(derived(), other.derived(), func);
-// }
-
-#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h
deleted file mode 100644
index 2ed7a15579f..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h
+++ /dev/null
@@ -1,186 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
-#define EIGEN_SPARSE_CWISE_UNARY_OP_H
-
-template<typename UnaryOp, typename MatrixType>
-struct ei_traits<SparseCwiseUnaryOp<UnaryOp, MatrixType> > : ei_traits<MatrixType>
-{
- typedef typename ei_result_of<
- UnaryOp(typename MatrixType::Scalar)
- >::type Scalar;
- typedef typename MatrixType::Nested MatrixTypeNested;
- typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
- enum {
- CoeffReadCost = _MatrixTypeNested::CoeffReadCost + ei_functor_traits<UnaryOp>::Cost
- };
-};
-
-template<typename UnaryOp, typename MatrixType>
-class SparseCwiseUnaryOp : ei_no_assignment_operator,
- public SparseMatrixBase<SparseCwiseUnaryOp<UnaryOp, MatrixType> >
-{
- public:
-
- class InnerIterator;
-// typedef typename ei_unref<LhsNested>::type _LhsNested;
-
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseCwiseUnaryOp)
-
- inline SparseCwiseUnaryOp(const MatrixType& mat, const UnaryOp& func = UnaryOp())
- : m_matrix(mat), m_functor(func) {}
-
- EIGEN_STRONG_INLINE int rows() const { return m_matrix.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_matrix.cols(); }
-
-// EIGEN_STRONG_INLINE const typename MatrixType::Nested& _matrix() const { return m_matrix; }
-// EIGEN_STRONG_INLINE const UnaryOp& _functor() const { return m_functor; }
-
- protected:
- const typename MatrixType::Nested m_matrix;
- const UnaryOp m_functor;
-};
-
-
-template<typename UnaryOp, typename MatrixType>
-class SparseCwiseUnaryOp<UnaryOp,MatrixType>::InnerIterator
-{
- typedef typename SparseCwiseUnaryOp::Scalar Scalar;
- typedef typename ei_traits<SparseCwiseUnaryOp>::_MatrixTypeNested _MatrixTypeNested;
- typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
- public:
-
- EIGEN_STRONG_INLINE InnerIterator(const SparseCwiseUnaryOp& unaryOp, int outer)
- : m_iter(unaryOp.m_matrix,outer), m_functor(unaryOp.m_functor)
- {}
-
- EIGEN_STRONG_INLINE InnerIterator& operator++()
- { ++m_iter; return *this; }
-
- EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); }
-
- EIGEN_STRONG_INLINE int index() const { return m_iter.index(); }
- EIGEN_STRONG_INLINE int row() const { return m_iter.row(); }
- EIGEN_STRONG_INLINE int col() const { return m_iter.col(); }
-
- EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
-
- protected:
- MatrixTypeIterator m_iter;
- const UnaryOp m_functor;
-
- private:
- InnerIterator& operator=(const InnerIterator&);
-};
-
-template<typename Derived>
-template<typename CustomUnaryOp>
-EIGEN_STRONG_INLINE const SparseCwiseUnaryOp<CustomUnaryOp, Derived>
-SparseMatrixBase<Derived>::unaryExpr(const CustomUnaryOp& func) const
-{
- return SparseCwiseUnaryOp<CustomUnaryOp, Derived>(derived(), func);
-}
-
-template<typename Derived>
-EIGEN_STRONG_INLINE const SparseCwiseUnaryOp<ei_scalar_opposite_op<typename ei_traits<Derived>::Scalar>,Derived>
-SparseMatrixBase<Derived>::operator-() const
-{
- return derived();
-}
-
-template<typename ExpressionType>
-EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op)
-SparseCwise<ExpressionType>::abs() const
-{
- return _expression();
-}
-
-template<typename ExpressionType>
-EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op)
-SparseCwise<ExpressionType>::abs2() const
-{
- return _expression();
-}
-
-template<typename Derived>
-EIGEN_STRONG_INLINE typename SparseMatrixBase<Derived>::ConjugateReturnType
-SparseMatrixBase<Derived>::conjugate() const
-{
- return ConjugateReturnType(derived());
-}
-
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::RealReturnType
-SparseMatrixBase<Derived>::real() const { return derived(); }
-
-template<typename Derived>
-EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::ImagReturnType
-SparseMatrixBase<Derived>::imag() const { return derived(); }
-
-template<typename Derived>
-template<typename NewType>
-EIGEN_STRONG_INLINE const SparseCwiseUnaryOp<ei_scalar_cast_op<typename ei_traits<Derived>::Scalar, NewType>, Derived>
-SparseMatrixBase<Derived>::cast() const
-{
- return derived();
-}
-
-template<typename Derived>
-EIGEN_STRONG_INLINE const SparseCwiseUnaryOp<ei_scalar_multiple_op<typename ei_traits<Derived>::Scalar>, Derived>
-SparseMatrixBase<Derived>::operator*(const Scalar& scalar) const
-{
- return SparseCwiseUnaryOp<ei_scalar_multiple_op<Scalar>, Derived>
- (derived(), ei_scalar_multiple_op<Scalar>(scalar));
-}
-
-template<typename Derived>
-EIGEN_STRONG_INLINE const SparseCwiseUnaryOp<ei_scalar_quotient1_op<typename ei_traits<Derived>::Scalar>, Derived>
-SparseMatrixBase<Derived>::operator/(const Scalar& scalar) const
-{
- return SparseCwiseUnaryOp<ei_scalar_quotient1_op<Scalar>, Derived>
- (derived(), ei_scalar_quotient1_op<Scalar>(scalar));
-}
-
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-SparseMatrixBase<Derived>::operator*=(const Scalar& other)
-{
- for (int j=0; j<outerSize(); ++j)
- for (typename Derived::InnerIterator i(derived(),j); i; ++i)
- i.valueRef() *= other;
- return derived();
-}
-
-template<typename Derived>
-EIGEN_STRONG_INLINE Derived&
-SparseMatrixBase<Derived>::operator/=(const Scalar& other)
-{
- for (int j=0; j<outerSize(); ++j)
- for (typename Derived::InnerIterator i(derived(),j); i; ++i)
- i.valueRef() /= other;
- return derived();
-}
-
-#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseDiagonalProduct.h b/extern/Eigen2/Eigen/src/Sparse/SparseDiagonalProduct.h
deleted file mode 100644
index 9b7432a8216..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseDiagonalProduct.h
+++ /dev/null
@@ -1,159 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
-#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
-
-// the product a diagonal matrix with a sparse matrix can be easily
-// implemented using expression template. We have two very different cases:
-// 1 - diag * row-major sparse
-// => each inner vector <=> scalar * sparse vector product
-// => so we can reuse CwiseUnaryOp::InnerIterator
-// 2 - diag * col-major sparse
-// => each inner vector <=> densevector * sparse vector cwise product
-// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
-// for that particular case
-// The two other cases are symmetric.
-
-template<typename Lhs, typename Rhs>
-struct ei_traits<SparseDiagonalProduct<Lhs, Rhs> > : ei_traits<SparseProduct<Lhs, Rhs, DiagonalProduct> >
-{
- typedef typename ei_cleantype<Lhs>::type _Lhs;
- typedef typename ei_cleantype<Rhs>::type _Rhs;
- enum {
- SparseFlags = ((int(_Lhs::Flags)&Diagonal)==Diagonal) ? int(_Rhs::Flags) : int(_Lhs::Flags),
- Flags = SparseBit | (SparseFlags&RowMajorBit)
- };
-};
-
-enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
-class ei_sparse_diagonal_product_inner_iterator_selector;
-
-template<typename LhsNested, typename RhsNested>
-class SparseDiagonalProduct : public SparseMatrixBase<SparseDiagonalProduct<LhsNested,RhsNested> >, ei_no_assignment_operator
-{
- typedef typename ei_traits<SparseDiagonalProduct>::_LhsNested _LhsNested;
- typedef typename ei_traits<SparseDiagonalProduct>::_RhsNested _RhsNested;
-
- enum {
- LhsMode = (_LhsNested::Flags&Diagonal)==Diagonal ? SDP_IsDiagonal
- : (_LhsNested::Flags&RowMajorBit) ? SDP_IsSparseRowMajor : SDP_IsSparseColMajor,
- RhsMode = (_RhsNested::Flags&Diagonal)==Diagonal ? SDP_IsDiagonal
- : (_RhsNested::Flags&RowMajorBit) ? SDP_IsSparseRowMajor : SDP_IsSparseColMajor
- };
-
- public:
-
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseDiagonalProduct)
-
- typedef ei_sparse_diagonal_product_inner_iterator_selector
- <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
-
- template<typename Lhs, typename Rhs>
- EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
- : m_lhs(lhs), m_rhs(rhs)
- {
- ei_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
- }
-
- EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); }
-
- EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
- EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
-
- protected:
- LhsNested m_lhs;
- RhsNested m_rhs;
-};
-
-
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class ei_sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
- : public SparseCwiseUnaryOp<ei_scalar_multiple_op<typename Lhs::Scalar>,Rhs>::InnerIterator
-{
- typedef typename SparseCwiseUnaryOp<ei_scalar_multiple_op<typename Lhs::Scalar>,Rhs>::InnerIterator Base;
- public:
- inline ei_sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, int outer)
- : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
- {}
-};
-
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class ei_sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
- : public SparseCwiseBinaryOp<
- ei_scalar_product_op<typename Lhs::Scalar>,
- SparseInnerVectorSet<Rhs,1>,
- typename Lhs::_CoeffsVectorType>::InnerIterator
-{
- typedef typename SparseCwiseBinaryOp<
- ei_scalar_product_op<typename Lhs::Scalar>,
- SparseInnerVectorSet<Rhs,1>,
- typename Lhs::_CoeffsVectorType>::InnerIterator Base;
- public:
- inline ei_sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, int outer)
- : Base(expr.rhs().innerVector(outer) .cwise()* expr.lhs().diagonal(), 0)
- {}
- private:
- ei_sparse_diagonal_product_inner_iterator_selector& operator=(const ei_sparse_diagonal_product_inner_iterator_selector&);
-};
-
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class ei_sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
- : public SparseCwiseUnaryOp<ei_scalar_multiple_op<typename Rhs::Scalar>,Lhs>::InnerIterator
-{
- typedef typename SparseCwiseUnaryOp<ei_scalar_multiple_op<typename Rhs::Scalar>,Lhs>::InnerIterator Base;
- public:
- inline ei_sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, int outer)
- : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
- {}
-};
-
-template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
-class ei_sparse_diagonal_product_inner_iterator_selector
-<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
- : public SparseCwiseBinaryOp<
- ei_scalar_product_op<typename Rhs::Scalar>,
- SparseInnerVectorSet<Lhs,1>,
- NestByValue<Transpose<typename Rhs::_CoeffsVectorType> > >::InnerIterator
-{
- typedef typename SparseCwiseBinaryOp<
- ei_scalar_product_op<typename Rhs::Scalar>,
- SparseInnerVectorSet<Lhs,1>,
- NestByValue<Transpose<typename Rhs::_CoeffsVectorType> > >::InnerIterator Base;
- public:
- inline ei_sparse_diagonal_product_inner_iterator_selector(
- const SparseDiagonalProductType& expr, int outer)
- : Base(expr.lhs().innerVector(outer) .cwise()* expr.rhs().diagonal().transpose().nestByValue(), 0)
- {}
-};
-
-#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h b/extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h
deleted file mode 100644
index 315ec4af39f..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSE_FLAGGED_H
-#define EIGEN_SPARSE_FLAGGED_H
-
-template<typename ExpressionType, unsigned int Added, unsigned int Removed>
-struct ei_traits<SparseFlagged<ExpressionType, Added, Removed> > : ei_traits<ExpressionType>
-{
- enum { Flags = (ExpressionType::Flags | Added) & ~Removed };
-};
-
-template<typename ExpressionType, unsigned int Added, unsigned int Removed> class SparseFlagged
- : public SparseMatrixBase<SparseFlagged<ExpressionType, Added, Removed> >
-{
- public:
-
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseFlagged)
- class InnerIterator;
- class ReverseInnerIterator;
-
- typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
- ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
-
- inline SparseFlagged(const ExpressionType& matrix) : m_matrix(matrix) {}
-
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
-
- // FIXME should be keep them ?
- inline Scalar& coeffRef(int row, int col)
- { return m_matrix.const_cast_derived().coeffRef(col, row); }
-
- inline const Scalar coeff(int row, int col) const
- { return m_matrix.coeff(col, row); }
-
- inline const Scalar coeff(int index) const
- { return m_matrix.coeff(index); }
-
- inline Scalar& coeffRef(int index)
- { return m_matrix.const_cast_derived().coeffRef(index); }
-
- protected:
- ExpressionTypeNested m_matrix;
-
- private:
- SparseFlagged& operator=(const SparseFlagged&);
-};
-
-template<typename ExpressionType, unsigned int Added, unsigned int Removed>
- class SparseFlagged<ExpressionType,Added,Removed>::InnerIterator : public ExpressionType::InnerIterator
-{
- public:
- EIGEN_STRONG_INLINE InnerIterator(const SparseFlagged& xpr, int outer)
- : ExpressionType::InnerIterator(xpr.m_matrix, outer)
- {}
-
- private:
- InnerIterator& operator=(const InnerIterator&);
-};
-
-template<typename ExpressionType, unsigned int Added, unsigned int Removed>
- class SparseFlagged<ExpressionType,Added,Removed>::ReverseInnerIterator : public ExpressionType::ReverseInnerIterator
-{
- public:
-
- EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseFlagged& xpr, int outer)
- : ExpressionType::ReverseInnerIterator(xpr.m_matrix, outer)
- {}
-};
-
-template<typename Derived>
-template<unsigned int Added>
-inline const SparseFlagged<Derived, Added, 0>
-SparseMatrixBase<Derived>::marked() const
-{
- return derived();
-}
-
-#endif // EIGEN_SPARSE_FLAGGED_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h b/extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h
deleted file mode 100644
index a1bac4d084d..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h
+++ /dev/null
@@ -1,346 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-/*
-
-NOTE: the _symbolic, and _numeric functions has been adapted from
- the LDL library:
-
-LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
-
-LDL License:
-
- Your use or distribution of LDL or any modified version of
- LDL implies that you agree to this License.
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
- USA
-
- Permission is hereby granted to use or copy this program under the
- terms of the GNU LGPL, provided that the Copyright, this License,
- and the Availability of the original version is retained on all copies.
- User documentation of any code that uses this code or any modified
- version of this code must cite the Copyright, this License, the
- Availability note, and "Used by permission." Permission to modify
- the code and to distribute modified code is granted, provided the
- Copyright, this License, and the Availability note are retained,
- and a notice that the code was modified is included.
- */
-
-#ifndef EIGEN_SPARSELDLT_H
-#define EIGEN_SPARSELDLT_H
-
-/** \ingroup Sparse_Module
- *
- * \class SparseLDLT
- *
- * \brief LDLT Cholesky decomposition of a sparse matrix and associated features
- *
- * \param MatrixType the type of the matrix of which we are computing the LDLT Cholesky decomposition
- *
- * \sa class LDLT, class LDLT
- */
-template<typename MatrixType, int Backend = DefaultBackend>
-class SparseLDLT
-{
- protected:
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef SparseMatrix<Scalar,LowerTriangular|UnitDiagBit> CholMatrixType;
- typedef Matrix<Scalar,MatrixType::ColsAtCompileTime,1> VectorType;
-
- enum {
- SupernodalFactorIsDirty = 0x10000,
- MatrixLIsDirty = 0x20000
- };
-
- public:
-
- /** Creates a dummy LDLT factorization object with flags \a flags. */
- SparseLDLT(int flags = 0)
- : m_flags(flags), m_status(0)
- {
- ei_assert((MatrixType::Flags&RowMajorBit)==0);
- m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
- }
-
- /** Creates a LDLT object and compute the respective factorization of \a matrix using
- * flags \a flags. */
- SparseLDLT(const MatrixType& matrix, int flags = 0)
- : m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0)
- {
- ei_assert((MatrixType::Flags&RowMajorBit)==0);
- m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
- compute(matrix);
- }
-
- /** Sets the relative threshold value used to prune zero coefficients during the decomposition.
- *
- * Setting a value greater than zero speeds up computation, and yields to an imcomplete
- * factorization with fewer non zero coefficients. Such approximate factors are especially
- * useful to initialize an iterative solver.
- *
- * \warning if precision is greater that zero, the LDLT factorization is not guaranteed to succeed
- * even if the matrix is positive definite.
- *
- * Note that the exact meaning of this parameter might depends on the actual
- * backend. Moreover, not all backends support this feature.
- *
- * \sa precision() */
- void setPrecision(RealScalar v) { m_precision = v; }
-
- /** \returns the current precision.
- *
- * \sa setPrecision() */
- RealScalar precision() const { return m_precision; }
-
- /** Sets the flags. Possible values are:
- * - CompleteFactorization
- * - IncompleteFactorization
- * - MemoryEfficient (hint to use the memory most efficient method offered by the backend)
- * - SupernodalMultifrontal (implies a complete factorization if supported by the backend,
- * overloads the MemoryEfficient flags)
- * - SupernodalLeftLooking (implies a complete factorization if supported by the backend,
- * overloads the MemoryEfficient flags)
- *
- * \sa flags() */
- void settagss(int f) { m_flags = f; }
- /** \returns the current flags */
- int flags() const { return m_flags; }
-
- /** Computes/re-computes the LDLT factorization */
- void compute(const MatrixType& matrix);
-
- /** Perform a symbolic factorization */
- void _symbolic(const MatrixType& matrix);
- /** Perform the actual factorization using the previously
- * computed symbolic factorization */
- bool _numeric(const MatrixType& matrix);
-
- /** \returns the lower triangular matrix L */
- inline const CholMatrixType& matrixL(void) const { return m_matrix; }
-
- /** \returns the coefficients of the diagonal matrix D */
- inline VectorType vectorD(void) const { return m_diag; }
-
- template<typename Derived>
- bool solveInPlace(MatrixBase<Derived> &b) const;
-
- /** \returns true if the factorization succeeded */
- inline bool succeeded(void) const { return m_succeeded; }
-
- protected:
- CholMatrixType m_matrix;
- VectorType m_diag;
- VectorXi m_parent; // elimination tree
- VectorXi m_nonZerosPerCol;
-// VectorXi m_w; // workspace
- RealScalar m_precision;
- int m_flags;
- mutable int m_status;
- bool m_succeeded;
-};
-
-/** Computes / recomputes the LDLT decomposition of matrix \a a
- * using the default algorithm.
- */
-template<typename MatrixType, int Backend>
-void SparseLDLT<MatrixType,Backend>::compute(const MatrixType& a)
-{
- _symbolic(a);
- m_succeeded = _numeric(a);
-}
-
-template<typename MatrixType, int Backend>
-void SparseLDLT<MatrixType,Backend>::_symbolic(const MatrixType& a)
-{
- assert(a.rows()==a.cols());
- const int size = a.rows();
- m_matrix.resize(size, size);
- m_parent.resize(size);
- m_nonZerosPerCol.resize(size);
- int * tags = ei_aligned_stack_new(int, size);
-
- const int* Ap = a._outerIndexPtr();
- const int* Ai = a._innerIndexPtr();
- int* Lp = m_matrix._outerIndexPtr();
- const int* P = 0;
- int* Pinv = 0;
-
- if (P)
- {
- /* If P is present then compute Pinv, the inverse of P */
- for (int k = 0; k < size; ++k)
- Pinv[P[k]] = k;
- }
- for (int k = 0; k < size; ++k)
- {
- /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
- m_parent[k] = -1; /* parent of k is not yet known */
- tags[k] = k; /* mark node k as visited */
- m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
- int kk = P ? P[k] : k; /* kth original, or permuted, column */
- int p2 = Ap[kk+1];
- for (int p = Ap[kk]; p < p2; ++p)
- {
- /* A (i,k) is nonzero (original or permuted A) */
- int i = Pinv ? Pinv[Ai[p]] : Ai[p];
- if (i < k)
- {
- /* follow path from i to root of etree, stop at flagged node */
- for (; tags[i] != k; i = m_parent[i])
- {
- /* find parent of i if not yet determined */
- if (m_parent[i] == -1)
- m_parent[i] = k;
- ++m_nonZerosPerCol[i]; /* L (k,i) is nonzero */
- tags[i] = k; /* mark i as visited */
- }
- }
- }
- }
- /* construct Lp index array from m_nonZerosPerCol column counts */
- Lp[0] = 0;
- for (int k = 0; k < size; ++k)
- Lp[k+1] = Lp[k] + m_nonZerosPerCol[k];
-
- m_matrix.resizeNonZeros(Lp[size]);
- ei_aligned_stack_delete(int, tags, size);
-}
-
-template<typename MatrixType, int Backend>
-bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
-{
- assert(a.rows()==a.cols());
- const int size = a.rows();
- assert(m_parent.size()==size);
- assert(m_nonZerosPerCol.size()==size);
-
- const int* Ap = a._outerIndexPtr();
- const int* Ai = a._innerIndexPtr();
- const Scalar* Ax = a._valuePtr();
- const int* Lp = m_matrix._outerIndexPtr();
- int* Li = m_matrix._innerIndexPtr();
- Scalar* Lx = m_matrix._valuePtr();
- m_diag.resize(size);
-
- Scalar * y = ei_aligned_stack_new(Scalar, size);
- int * pattern = ei_aligned_stack_new(int, size);
- int * tags = ei_aligned_stack_new(int, size);
-
- const int* P = 0;
- const int* Pinv = 0;
- bool ok = true;
-
- for (int k = 0; k < size; ++k)
- {
- /* compute nonzero pattern of kth row of L, in topological order */
- y[k] = 0.0; /* Y(0:k) is now all zero */
- int top = size; /* stack for pattern is empty */
- tags[k] = k; /* mark node k as visited */
- m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
- int kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */
- int p2 = Ap[kk+1];
- for (int p = Ap[kk]; p < p2; ++p)
- {
- int i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */
- if (i <= k)
- {
- y[i] += Ax[p]; /* scatter A(i,k) into Y (sum duplicates) */
- int len;
- for (len = 0; tags[i] != k; i = m_parent[i])
- {
- pattern[len++] = i; /* L(k,i) is nonzero */
- tags[i] = k; /* mark i as visited */
- }
- while (len > 0)
- pattern[--top] = pattern[--len];
- }
- }
- /* compute numerical values kth row of L (a sparse triangular solve) */
- m_diag[k] = y[k]; /* get D(k,k) and clear Y(k) */
- y[k] = 0.0;
- for (; top < size; ++top)
- {
- int i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
- Scalar yi = y[i]; /* get and clear Y(i) */
- y[i] = 0.0;
- int p2 = Lp[i] + m_nonZerosPerCol[i];
- int p;
- for (p = Lp[i]; p < p2; ++p)
- y[Li[p]] -= Lx[p] * yi;
- Scalar l_ki = yi / m_diag[i]; /* the nonzero entry L(k,i) */
- m_diag[k] -= l_ki * yi;
- Li[p] = k; /* store L(k,i) in column form of L */
- Lx[p] = l_ki;
- ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
- }
- if (m_diag[k] == 0.0)
- {
- ok = false; /* failure, D(k,k) is zero */
- break;
- }
- }
-
- ei_aligned_stack_delete(Scalar, y, size);
- ei_aligned_stack_delete(int, pattern, size);
- ei_aligned_stack_delete(int, tags, size);
-
- return ok; /* success, diagonal of D is all nonzero */
-}
-
-/** Computes b = L^-T L^-1 b */
-template<typename MatrixType, int Backend>
-template<typename Derived>
-bool SparseLDLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
-{
- const int size = m_matrix.rows();
- ei_assert(size==b.rows());
- if (!m_succeeded)
- return false;
-
- if (m_matrix.nonZeros()>0) // otherwise L==I
- m_matrix.solveTriangularInPlace(b);
- b = b.cwise() / m_diag;
- // FIXME should be .adjoint() but it fails to compile...
-
- if (m_matrix.nonZeros()>0) // otherwise L==I
- m_matrix.transpose().solveTriangularInPlace(b);
-
- return true;
-}
-
-#endif // EIGEN_SPARSELDLT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseLLT.h b/extern/Eigen2/Eigen/src/Sparse/SparseLLT.h
deleted file mode 100644
index e7c314c2cad..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseLLT.h
+++ /dev/null
@@ -1,205 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSELLT_H
-#define EIGEN_SPARSELLT_H
-
-/** \ingroup Sparse_Module
- *
- * \class SparseLLT
- *
- * \brief LLT Cholesky decomposition of a sparse matrix and associated features
- *
- * \param MatrixType the type of the matrix of which we are computing the LLT Cholesky decomposition
- *
- * \sa class LLT, class LDLT
- */
-template<typename MatrixType, int Backend = DefaultBackend>
-class SparseLLT
-{
- protected:
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef SparseMatrix<Scalar,LowerTriangular> CholMatrixType;
-
- enum {
- SupernodalFactorIsDirty = 0x10000,
- MatrixLIsDirty = 0x20000
- };
-
- public:
-
- /** Creates a dummy LLT factorization object with flags \a flags. */
- SparseLLT(int flags = 0)
- : m_flags(flags), m_status(0)
- {
- m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
- }
-
- /** Creates a LLT object and compute the respective factorization of \a matrix using
- * flags \a flags. */
- SparseLLT(const MatrixType& matrix, int flags = 0)
- : m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0)
- {
- m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
- compute(matrix);
- }
-
- /** Sets the relative threshold value used to prune zero coefficients during the decomposition.
- *
- * Setting a value greater than zero speeds up computation, and yields to an imcomplete
- * factorization with fewer non zero coefficients. Such approximate factors are especially
- * useful to initialize an iterative solver.
- *
- * \warning if precision is greater that zero, the LLT factorization is not guaranteed to succeed
- * even if the matrix is positive definite.
- *
- * Note that the exact meaning of this parameter might depends on the actual
- * backend. Moreover, not all backends support this feature.
- *
- * \sa precision() */
- void setPrecision(RealScalar v) { m_precision = v; }
-
- /** \returns the current precision.
- *
- * \sa setPrecision() */
- RealScalar precision() const { return m_precision; }
-
- /** Sets the flags. Possible values are:
- * - CompleteFactorization
- * - IncompleteFactorization
- * - MemoryEfficient (hint to use the memory most efficient method offered by the backend)
- * - SupernodalMultifrontal (implies a complete factorization if supported by the backend,
- * overloads the MemoryEfficient flags)
- * - SupernodalLeftLooking (implies a complete factorization if supported by the backend,
- * overloads the MemoryEfficient flags)
- *
- * \sa flags() */
- void setFlags(int f) { m_flags = f; }
- /** \returns the current flags */
- int flags() const { return m_flags; }
-
- /** Computes/re-computes the LLT factorization */
- void compute(const MatrixType& matrix);
-
- /** \returns the lower triangular matrix L */
- inline const CholMatrixType& matrixL(void) const { return m_matrix; }
-
- template<typename Derived>
- bool solveInPlace(MatrixBase<Derived> &b) const;
-
- /** \returns true if the factorization succeeded */
- inline bool succeeded(void) const { return m_succeeded; }
-
- protected:
- CholMatrixType m_matrix;
- RealScalar m_precision;
- int m_flags;
- mutable int m_status;
- bool m_succeeded;
-};
-
-/** Computes / recomputes the LLT decomposition of matrix \a a
- * using the default algorithm.
- */
-template<typename MatrixType, int Backend>
-void SparseLLT<MatrixType,Backend>::compute(const MatrixType& a)
-{
- assert(a.rows()==a.cols());
- const int size = a.rows();
- m_matrix.resize(size, size);
-
- // allocate a temporary vector for accumulations
- AmbiVector<Scalar> tempVector(size);
- RealScalar density = a.nonZeros()/RealScalar(size*size);
-
- // TODO estimate the number of non zeros
- m_matrix.startFill(a.nonZeros()*2);
- for (int j = 0; j < size; ++j)
- {
- Scalar x = ei_real(a.coeff(j,j));
-
- // TODO better estimate of the density !
- tempVector.init(density>0.001? IsDense : IsSparse);
- tempVector.setBounds(j+1,size);
- tempVector.setZero();
- // init with current matrix a
- {
- typename MatrixType::InnerIterator it(a,j);
- ++it; // skip diagonal element
- for (; it; ++it)
- tempVector.coeffRef(it.index()) = it.value();
- }
- for (int k=0; k<j+1; ++k)
- {
- typename CholMatrixType::InnerIterator it(m_matrix, k);
- while (it && it.index()<j)
- ++it;
- if (it && it.index()==j)
- {
- Scalar y = it.value();
- x -= ei_abs2(y);
- ++it; // skip j-th element, and process remaining column coefficients
- tempVector.restart();
- for (; it; ++it)
- {
- tempVector.coeffRef(it.index()) -= it.value() * y;
- }
- }
- }
- // copy the temporary vector to the respective m_matrix.col()
- // while scaling the result by 1/real(x)
- RealScalar rx = ei_sqrt(ei_real(x));
- m_matrix.fill(j,j) = rx;
- Scalar y = Scalar(1)/rx;
- for (typename AmbiVector<Scalar>::Iterator it(tempVector, m_precision*rx); it; ++it)
- {
- m_matrix.fill(it.index(), j) = it.value() * y;
- }
- }
- m_matrix.endFill();
-}
-
-/** Computes b = L^-T L^-1 b */
-template<typename MatrixType, int Backend>
-template<typename Derived>
-bool SparseLLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
-{
- const int size = m_matrix.rows();
- ei_assert(size==b.rows());
-
- m_matrix.solveTriangularInPlace(b);
- // FIXME should be simply .adjoint() but it fails to compile...
- if (NumTraits<Scalar>::IsComplex)
- {
- CholMatrixType aux = m_matrix.conjugate();
- aux.transpose().solveTriangularInPlace(b);
- }
- else
- m_matrix.transpose().solveTriangularInPlace(b);
-
- return true;
-}
-
-#endif // EIGEN_SPARSELLT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseLU.h b/extern/Eigen2/Eigen/src/Sparse/SparseLU.h
deleted file mode 100644
index 1425920509f..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseLU.h
+++ /dev/null
@@ -1,148 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSELU_H
-#define EIGEN_SPARSELU_H
-
-/** \ingroup Sparse_Module
- *
- * \class SparseLU
- *
- * \brief LU decomposition of a sparse matrix and associated features
- *
- * \param MatrixType the type of the matrix of which we are computing the LU factorization
- *
- * \sa class LU, class SparseLLT
- */
-template<typename MatrixType, int Backend = DefaultBackend>
-class SparseLU
-{
- protected:
- typedef typename MatrixType::Scalar Scalar;
- typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- typedef SparseMatrix<Scalar,LowerTriangular> LUMatrixType;
-
- enum {
- MatrixLUIsDirty = 0x10000
- };
-
- public:
-
- /** Creates a dummy LU factorization object with flags \a flags. */
- SparseLU(int flags = 0)
- : m_flags(flags), m_status(0)
- {
- m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
- }
-
- /** Creates a LU object and compute the respective factorization of \a matrix using
- * flags \a flags. */
- SparseLU(const MatrixType& matrix, int flags = 0)
- : /*m_matrix(matrix.rows(), matrix.cols()),*/ m_flags(flags), m_status(0)
- {
- m_precision = RealScalar(0.1) * Eigen::precision<RealScalar>();
- compute(matrix);
- }
-
- /** Sets the relative threshold value used to prune zero coefficients during the decomposition.
- *
- * Setting a value greater than zero speeds up computation, and yields to an imcomplete
- * factorization with fewer non zero coefficients. Such approximate factors are especially
- * useful to initialize an iterative solver.
- *
- * Note that the exact meaning of this parameter might depends on the actual
- * backend. Moreover, not all backends support this feature.
- *
- * \sa precision() */
- void setPrecision(RealScalar v) { m_precision = v; }
-
- /** \returns the current precision.
- *
- * \sa setPrecision() */
- RealScalar precision() const { return m_precision; }
-
- /** Sets the flags. Possible values are:
- * - CompleteFactorization
- * - IncompleteFactorization
- * - MemoryEfficient
- * - one of the ordering methods
- * - etc...
- *
- * \sa flags() */
- void setFlags(int f) { m_flags = f; }
- /** \returns the current flags */
- int flags() const { return m_flags; }
-
- void setOrderingMethod(int m)
- {
- ei_assert(m&~OrderingMask == 0 && m!=0 && "invalid ordering method");
- m_flags = m_flags&~OrderingMask | m&OrderingMask;
- }
-
- int orderingMethod() const
- {
- return m_flags&OrderingMask;
- }
-
- /** Computes/re-computes the LU factorization */
- void compute(const MatrixType& matrix);
-
- /** \returns the lower triangular matrix L */
- //inline const MatrixType& matrixL() const { return m_matrixL; }
-
- /** \returns the upper triangular matrix U */
- //inline const MatrixType& matrixU() const { return m_matrixU; }
-
- template<typename BDerived, typename XDerived>
- bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x) const;
-
- /** \returns true if the factorization succeeded */
- inline bool succeeded(void) const { return m_succeeded; }
-
- protected:
- RealScalar m_precision;
- int m_flags;
- mutable int m_status;
- bool m_succeeded;
-};
-
-/** Computes / recomputes the LU decomposition of matrix \a a
- * using the default algorithm.
- */
-template<typename MatrixType, int Backend>
-void SparseLU<MatrixType,Backend>::compute(const MatrixType& a)
-{
- ei_assert(false && "not implemented yet");
-}
-
-/** Computes *x = U^-1 L^-1 b */
-template<typename MatrixType, int Backend>
-template<typename BDerived, typename XDerived>
-bool SparseLU<MatrixType,Backend>::solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x) const
-{
- ei_assert(false && "not implemented yet");
- return false;
-}
-
-#endif // EIGEN_SPARSELU_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h b/extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h
deleted file mode 100644
index 65c609686d2..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h
+++ /dev/null
@@ -1,452 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSEMATRIX_H
-#define EIGEN_SPARSEMATRIX_H
-
-/** \class SparseMatrix
- *
- * \brief Sparse matrix
- *
- * \param _Scalar the scalar type, i.e. the type of the coefficients
- *
- * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
- *
- */
-template<typename _Scalar, int _Flags>
-struct ei_traits<SparseMatrix<_Scalar, _Flags> >
-{
- typedef _Scalar Scalar;
- enum {
- RowsAtCompileTime = Dynamic,
- ColsAtCompileTime = Dynamic,
- MaxRowsAtCompileTime = Dynamic,
- MaxColsAtCompileTime = Dynamic,
- Flags = SparseBit | _Flags,
- CoeffReadCost = NumTraits<Scalar>::ReadCost,
- SupportedAccessPatterns = InnerRandomAccessPattern
- };
-};
-
-
-
-template<typename _Scalar, int _Flags>
-class SparseMatrix
- : public SparseMatrixBase<SparseMatrix<_Scalar, _Flags> >
-{
- public:
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseMatrix)
- EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
- EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
- // FIXME: why are these operator already alvailable ???
- // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, *=)
- // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, /=)
-
- typedef MappedSparseMatrix<Scalar,Flags> Map;
-
- protected:
-
- enum { IsRowMajor = Base::IsRowMajor };
- typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
-
- int m_outerSize;
- int m_innerSize;
- int* m_outerIndex;
- CompressedStorage<Scalar> m_data;
-
- public:
-
- inline int rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
- inline int cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
-
- inline int innerSize() const { return m_innerSize; }
- inline int outerSize() const { return m_outerSize; }
- inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
-
- inline const Scalar* _valuePtr() const { return &m_data.value(0); }
- inline Scalar* _valuePtr() { return &m_data.value(0); }
-
- inline const int* _innerIndexPtr() const { return &m_data.index(0); }
- inline int* _innerIndexPtr() { return &m_data.index(0); }
-
- inline const int* _outerIndexPtr() const { return m_outerIndex; }
- inline int* _outerIndexPtr() { return m_outerIndex; }
-
- inline Scalar coeff(int row, int col) const
- {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
- return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner);
- }
-
- inline Scalar& coeffRef(int row, int col)
- {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
-
- int start = m_outerIndex[outer];
- int end = m_outerIndex[outer+1];
- ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
- ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
- const int id = m_data.searchLowerIndex(start,end-1,inner);
- ei_assert((id<end) && (m_data.index(id)==inner) && "coeffRef cannot be called on a zero coefficient");
- return m_data.value(id);
- }
-
- public:
-
- class InnerIterator;
-
- inline void setZero()
- {
- m_data.clear();
- //if (m_outerSize)
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
-// for (int i=0; i<m_outerSize; ++i)
-// m_outerIndex[i] = 0;
-// if (m_outerSize)
-// m_outerIndex[i] = 0;
- }
-
- /** \returns the number of non zero coefficients */
- inline int nonZeros() const { return m_data.size(); }
-
- /** Initializes the filling process of \c *this.
- * \param reserveSize approximate number of nonzeros
- * Note that the matrix \c *this is zero-ed.
- */
- inline void startFill(int reserveSize = 1000)
- {
- setZero();
- m_data.reserve(reserveSize);
- }
-
- /**
- */
- inline Scalar& fill(int row, int col)
- {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
-
- if (m_outerIndex[outer+1]==0)
- {
- // we start a new inner vector
- int i = outer;
- while (i>=0 && m_outerIndex[i]==0)
- {
- m_outerIndex[i] = m_data.size();
- --i;
- }
- m_outerIndex[outer+1] = m_outerIndex[outer];
- }
- else
- {
- ei_assert(m_data.index(m_data.size()-1)<inner && "wrong sorted insertion");
- }
- assert(size_t(m_outerIndex[outer+1]) == m_data.size());
- int id = m_outerIndex[outer+1];
- ++m_outerIndex[outer+1];
-
- m_data.append(0, inner);
- return m_data.value(id);
- }
-
- /** Like fill() but with random inner coordinates.
- */
- inline Scalar& fillrand(int row, int col)
- {
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
- if (m_outerIndex[outer+1]==0)
- {
- // we start a new inner vector
- // nothing special to do here
- int i = outer;
- while (i>=0 && m_outerIndex[i]==0)
- {
- m_outerIndex[i] = m_data.size();
- --i;
- }
- m_outerIndex[outer+1] = m_outerIndex[outer];
- }
- assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "invalid outer index");
- size_t startId = m_outerIndex[outer];
- // FIXME let's make sure sizeof(long int) == sizeof(size_t)
- size_t id = m_outerIndex[outer+1];
- ++m_outerIndex[outer+1];
-
- float reallocRatio = 1;
- if (m_data.allocatedSize()<id+1)
- {
- // we need to reallocate the data, to reduce multiple reallocations
- // we use a smart resize algorithm based on the current filling ratio
- // we use float to avoid overflows
- float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer);
- reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
- // let's bounds the realloc ratio to
- // 1) reduce multiple minor realloc when the matrix is almost filled
- // 2) avoid to allocate too much memory when the matrix is almost empty
- reallocRatio = std::min(std::max(reallocRatio,1.5f),8.f);
- }
- m_data.resize(id+1,reallocRatio);
-
- while ( (id > startId) && (m_data.index(id-1) > inner) )
- {
- m_data.index(id) = m_data.index(id-1);
- m_data.value(id) = m_data.value(id-1);
- --id;
- }
-
- m_data.index(id) = inner;
- return (m_data.value(id) = 0);
- }
-
- inline void endFill()
- {
- int size = m_data.size();
- int i = m_outerSize;
- // find the last filled column
- while (i>=0 && m_outerIndex[i]==0)
- --i;
- ++i;
- while (i<=m_outerSize)
- {
- m_outerIndex[i] = size;
- ++i;
- }
- }
-
- void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
- {
- int k = 0;
- for (int j=0; j<m_outerSize; ++j)
- {
- int previousStart = m_outerIndex[j];
- m_outerIndex[j] = k;
- int end = m_outerIndex[j+1];
- for (int i=previousStart; i<end; ++i)
- {
- if (!ei_isMuchSmallerThan(m_data.value(i), reference, epsilon))
- {
- m_data.value(k) = m_data.value(i);
- m_data.index(k) = m_data.index(i);
- ++k;
- }
- }
- }
- m_outerIndex[m_outerSize] = k;
- m_data.resize(k,0);
- }
-
- /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero
- * \sa resizeNonZeros(int), reserve(), setZero()
- */
- void resize(int rows, int cols)
- {
- const int outerSize = IsRowMajor ? rows : cols;
- m_innerSize = IsRowMajor ? cols : rows;
- m_data.clear();
- if (m_outerSize != outerSize || m_outerSize==0)
- {
- delete[] m_outerIndex;
- m_outerIndex = new int [outerSize+1];
- m_outerSize = outerSize;
- }
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(int));
- }
- void resizeNonZeros(int size)
- {
- m_data.resize(size);
- }
-
- inline SparseMatrix()
- : m_outerSize(-1), m_innerSize(0), m_outerIndex(0)
- {
- resize(0, 0);
- }
-
- inline SparseMatrix(int rows, int cols)
- : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
- {
- resize(rows, cols);
- }
-
- template<typename OtherDerived>
- inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
- : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
- {
- *this = other.derived();
- }
-
- inline SparseMatrix(const SparseMatrix& other)
- : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0)
- {
- *this = other.derived();
- }
-
- inline void swap(SparseMatrix& other)
- {
- //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
- std::swap(m_outerIndex, other.m_outerIndex);
- std::swap(m_innerSize, other.m_innerSize);
- std::swap(m_outerSize, other.m_outerSize);
- m_data.swap(other.m_data);
- }
-
- inline SparseMatrix& operator=(const SparseMatrix& other)
- {
-// std::cout << "SparseMatrix& operator=(const SparseMatrix& other)\n";
- if (other.isRValue())
- {
- swap(other.const_cast_derived());
- }
- else
- {
- resize(other.rows(), other.cols());
- memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(int));
- m_data = other.m_data;
- }
- return *this;
- }
-
- template<typename OtherDerived>
- inline SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
- {
- const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
- if (needToTranspose)
- {
- // two passes algorithm:
- // 1 - compute the number of coeffs per dest inner vector
- // 2 - do the actual copy/eval
- // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed
- //typedef typename ei_nested<OtherDerived,2>::type OtherCopy;
- typedef typename ei_eval<OtherDerived>::type OtherCopy;
- typedef typename ei_cleantype<OtherCopy>::type _OtherCopy;
- OtherCopy otherCopy(other.derived());
-
- resize(other.rows(), other.cols());
- Eigen::Map<VectorXi>(m_outerIndex,outerSize()).setZero();
- // pass 1
- // FIXME the above copy could be merged with that pass
- for (int j=0; j<otherCopy.outerSize(); ++j)
- for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
- ++m_outerIndex[it.index()];
-
- // prefix sum
- int count = 0;
- VectorXi positions(outerSize());
- for (int j=0; j<outerSize(); ++j)
- {
- int tmp = m_outerIndex[j];
- m_outerIndex[j] = count;
- positions[j] = count;
- count += tmp;
- }
- m_outerIndex[outerSize()] = count;
- // alloc
- m_data.resize(count);
- // pass 2
- for (int j=0; j<otherCopy.outerSize(); ++j)
- for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
- {
- int pos = positions[it.index()]++;
- m_data.index(pos) = j;
- m_data.value(pos) = it.value();
- }
-
- return *this;
- }
- else
- {
- // there is no special optimization
- return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
- }
- }
-
- friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
- {
- EIGEN_DBG_SPARSE(
- s << "Nonzero entries:\n";
- for (int i=0; i<m.nonZeros(); ++i)
- {
- s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
- }
- s << std::endl;
- s << std::endl;
- s << "Column pointers:\n";
- for (int i=0; i<m.outerSize(); ++i)
- {
- s << m.m_outerIndex[i] << " ";
- }
- s << " $" << std::endl;
- s << std::endl;
- );
- s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
- return s;
- }
-
- /** Destructor */
- inline ~SparseMatrix()
- {
- delete[] m_outerIndex;
- }
-};
-
-template<typename Scalar, int _Flags>
-class SparseMatrix<Scalar,_Flags>::InnerIterator
-{
- public:
- InnerIterator(const SparseMatrix& mat, int outer)
- : m_matrix(mat), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_start(m_id), m_end(mat.m_outerIndex[outer+1])
- {}
-
- template<unsigned int Added, unsigned int Removed>
- InnerIterator(const Flagged<SparseMatrix,Added,Removed>& mat, int outer)
- : m_matrix(mat._expression()), m_outer(outer), m_id(m_matrix.m_outerIndex[outer]),
- m_start(m_id), m_end(m_matrix.m_outerIndex[outer+1])
- {}
-
- inline InnerIterator& operator++() { m_id++; return *this; }
-
- inline Scalar value() const { return m_matrix.m_data.value(m_id); }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.m_data.value(m_id)); }
-
- inline int index() const { return m_matrix.m_data.index(m_id); }
- inline int row() const { return IsRowMajor ? m_outer : index(); }
- inline int col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
-
- protected:
- const SparseMatrix& m_matrix;
- const int m_outer;
- int m_id;
- const int m_start;
- const int m_end;
-
- private:
- InnerIterator& operator=(const InnerIterator&);
-};
-
-#endif // EIGEN_SPARSEMATRIX_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseProduct.h b/extern/Eigen2/Eigen/src/Sparse/SparseProduct.h
deleted file mode 100644
index c98a71e993b..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseProduct.h
+++ /dev/null
@@ -1,415 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSEPRODUCT_H
-#define EIGEN_SPARSEPRODUCT_H
-
-template<typename Lhs, typename Rhs> struct ei_sparse_product_mode
-{
- enum {
-
- value = ((Lhs::Flags&Diagonal)==Diagonal || (Rhs::Flags&Diagonal)==Diagonal)
- ? DiagonalProduct
- : (Rhs::Flags&Lhs::Flags&SparseBit)==SparseBit
- ? SparseTimeSparseProduct
- : (Lhs::Flags&SparseBit)==SparseBit
- ? SparseTimeDenseProduct
- : DenseTimeSparseProduct };
-};
-
-template<typename Lhs, typename Rhs, int ProductMode>
-struct SparseProductReturnType
-{
- typedef const typename ei_nested<Lhs,Rhs::RowsAtCompileTime>::type LhsNested;
- typedef const typename ei_nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
-
- typedef SparseProduct<LhsNested, RhsNested, ProductMode> Type;
-};
-
-template<typename Lhs, typename Rhs>
-struct SparseProductReturnType<Lhs,Rhs,DiagonalProduct>
-{
- typedef const typename ei_nested<Lhs,Rhs::RowsAtCompileTime>::type LhsNested;
- typedef const typename ei_nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
-
- typedef SparseDiagonalProduct<LhsNested, RhsNested> Type;
-};
-
-// sparse product return type specialization
-template<typename Lhs, typename Rhs>
-struct SparseProductReturnType<Lhs,Rhs,SparseTimeSparseProduct>
-{
- typedef typename ei_traits<Lhs>::Scalar Scalar;
- enum {
- LhsRowMajor = ei_traits<Lhs>::Flags & RowMajorBit,
- RhsRowMajor = ei_traits<Rhs>::Flags & RowMajorBit,
- TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
- TransposeLhs = LhsRowMajor && (!RhsRowMajor)
- };
-
- // FIXME if we transpose let's evaluate to a LinkedVectorMatrix since it is the
- // type of the temporary to perform the transpose op
- typedef typename ei_meta_if<TransposeLhs,
- SparseMatrix<Scalar,0>,
- const typename ei_nested<Lhs,Rhs::RowsAtCompileTime>::type>::ret LhsNested;
-
- typedef typename ei_meta_if<TransposeRhs,
- SparseMatrix<Scalar,0>,
- const typename ei_nested<Rhs,Lhs::RowsAtCompileTime>::type>::ret RhsNested;
-
- typedef SparseProduct<LhsNested, RhsNested, SparseTimeSparseProduct> Type;
-};
-
-template<typename LhsNested, typename RhsNested, int ProductMode>
-struct ei_traits<SparseProduct<LhsNested, RhsNested, ProductMode> >
-{
- // clean the nested types:
- typedef typename ei_cleantype<LhsNested>::type _LhsNested;
- typedef typename ei_cleantype<RhsNested>::type _RhsNested;
- typedef typename _LhsNested::Scalar Scalar;
-
- enum {
- LhsCoeffReadCost = _LhsNested::CoeffReadCost,
- RhsCoeffReadCost = _RhsNested::CoeffReadCost,
- LhsFlags = _LhsNested::Flags,
- RhsFlags = _RhsNested::Flags,
-
- RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
- ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
- InnerSize = EIGEN_ENUM_MIN(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
-
- MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
-
-// LhsIsRowMajor = (LhsFlags & RowMajorBit)==RowMajorBit,
-// RhsIsRowMajor = (RhsFlags & RowMajorBit)==RowMajorBit,
-
- EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
- ResultIsSparse = ProductMode==SparseTimeSparseProduct || ProductMode==DiagonalProduct,
-
- RemovedBits = ~( (EvalToRowMajor ? 0 : RowMajorBit) | (ResultIsSparse ? 0 : SparseBit) ),
-
- Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
- | EvalBeforeAssigningBit
- | EvalBeforeNestingBit,
-
- CoeffReadCost = Dynamic
- };
-
- typedef typename ei_meta_if<ResultIsSparse,
- SparseMatrixBase<SparseProduct<LhsNested, RhsNested, ProductMode> >,
- MatrixBase<SparseProduct<LhsNested, RhsNested, ProductMode> > >::ret Base;
-};
-
-template<typename LhsNested, typename RhsNested, int ProductMode>
-class SparseProduct : ei_no_assignment_operator,
- public ei_traits<SparseProduct<LhsNested, RhsNested, ProductMode> >::Base
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(SparseProduct)
-
- private:
-
- typedef typename ei_traits<SparseProduct>::_LhsNested _LhsNested;
- typedef typename ei_traits<SparseProduct>::_RhsNested _RhsNested;
-
- public:
-
- template<typename Lhs, typename Rhs>
- EIGEN_STRONG_INLINE SparseProduct(const Lhs& lhs, const Rhs& rhs)
- : m_lhs(lhs), m_rhs(rhs)
- {
- ei_assert(lhs.cols() == rhs.rows());
-
- enum {
- ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
- || _RhsNested::RowsAtCompileTime==Dynamic
- || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
- AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
- SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
- };
- // note to the lost user:
- // * for a dot product use: v1.dot(v2)
- // * for a coeff-wise product use: v1.cwise()*v2
- EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
- INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
- EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
- INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
- EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
- }
-
- EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); }
-
- EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
- EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
-
- protected:
- LhsNested m_lhs;
- RhsNested m_rhs;
-};
-
-// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
-template<typename Lhs, typename Rhs, typename ResultType>
-static void ei_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
-{
- typedef typename ei_traits<typename ei_cleantype<Lhs>::type>::Scalar Scalar;
-
- // make sure to call innerSize/outerSize since we fake the storage order.
- int rows = lhs.innerSize();
- int cols = rhs.outerSize();
- //int size = lhs.outerSize();
- ei_assert(lhs.outerSize() == rhs.innerSize());
-
- // allocate a temporary buffer
- AmbiVector<Scalar> tempVector(rows);
-
- // estimate the number of non zero entries
- float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
- float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
- float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
-
- res.resize(rows, cols);
- res.startFill(int(ratioRes*rows*cols));
- for (int j=0; j<cols; ++j)
- {
- // let's do a more accurate determination of the nnz ratio for the current column j of res
- //float ratioColRes = std::min(ratioLhs * rhs.innerNonZeros(j), 1.f);
- // FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
- float ratioColRes = ratioRes;
- tempVector.init(ratioColRes);
- tempVector.setZero();
- for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
- {
- // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
- tempVector.restart();
- Scalar x = rhsIt.value();
- for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
- {
- tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
- }
- }
- for (typename AmbiVector<Scalar>::Iterator it(tempVector); it; ++it)
- if (ResultType::Flags&RowMajorBit)
- res.fill(j,it.index()) = it.value();
- else
- res.fill(it.index(), j) = it.value();
- }
- res.endFill();
-}
-
-template<typename Lhs, typename Rhs, typename ResultType,
- int LhsStorageOrder = ei_traits<Lhs>::Flags&RowMajorBit,
- int RhsStorageOrder = ei_traits<Rhs>::Flags&RowMajorBit,
- int ResStorageOrder = ei_traits<ResultType>::Flags&RowMajorBit>
-struct ei_sparse_product_selector;
-
-template<typename Lhs, typename Rhs, typename ResultType>
-struct ei_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
-{
- typedef typename ei_traits<typename ei_cleantype<Lhs>::type>::Scalar Scalar;
-
- static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
- {
- typename ei_cleantype<ResultType>::type _res(res.rows(), res.cols());
- ei_sparse_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res);
- res.swap(_res);
- }
-};
-
-template<typename Lhs, typename Rhs, typename ResultType>
-struct ei_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
-{
- static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
- {
- // we need a col-major matrix to hold the result
- typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
- SparseTemporaryType _res(res.rows(), res.cols());
- ei_sparse_product_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res);
- res = _res;
- }
-};
-
-template<typename Lhs, typename Rhs, typename ResultType>
-struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
-{
- static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
- {
- // let's transpose the product to get a column x column product
- typename ei_cleantype<ResultType>::type _res(res.rows(), res.cols());
- ei_sparse_product_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res);
- res.swap(_res);
- }
-};
-
-template<typename Lhs, typename Rhs, typename ResultType>
-struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
-{
- static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
- {
- // let's transpose the product to get a column x column product
- typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
- SparseTemporaryType _res(res.cols(), res.rows());
- ei_sparse_product_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
- res = _res.transpose();
- }
-};
-
-// NOTE eventually let's transpose one argument even in this case since it might be expensive if
-// the result is not dense.
-// template<typename Lhs, typename Rhs, typename ResultType, int ResStorageOrder>
-// struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ResStorageOrder>
-// {
-// static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
-// {
-// // trivial product as lhs.row/rhs.col dot products
-// // loop over the preferred order of the result
-// }
-// };
-
-// NOTE the 2 others cases (col row *) must never occurs since they are caught
-// by ProductReturnType which transform it to (col col *) by evaluating rhs.
-
-
-// template<typename Derived>
-// template<typename Lhs, typename Rhs>
-// inline Derived& SparseMatrixBase<Derived>::lazyAssign(const SparseProduct<Lhs,Rhs>& product)
-// {
-// // std::cout << "sparse product to dense\n";
-// ei_sparse_product_selector<
-// typename ei_cleantype<Lhs>::type,
-// typename ei_cleantype<Rhs>::type,
-// typename ei_cleantype<Derived>::type>::run(product.lhs(),product.rhs(),derived());
-// return derived();
-// }
-
-// sparse = sparse * sparse
-template<typename Derived>
-template<typename Lhs, typename Rhs>
-inline Derived& SparseMatrixBase<Derived>::operator=(const SparseProduct<Lhs,Rhs,SparseTimeSparseProduct>& product)
-{
- ei_sparse_product_selector<
- typename ei_cleantype<Lhs>::type,
- typename ei_cleantype<Rhs>::type,
- Derived>::run(product.lhs(),product.rhs(),derived());
- return derived();
-}
-
-// dense = sparse * dense
-// template<typename Derived>
-// template<typename Lhs, typename Rhs>
-// Derived& MatrixBase<Derived>::lazyAssign(const SparseProduct<Lhs,Rhs,SparseTimeDenseProduct>& product)
-// {
-// typedef typename ei_cleantype<Lhs>::type _Lhs;
-// typedef typename _Lhs::InnerIterator LhsInnerIterator;
-// enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit };
-// derived().setZero();
-// for (int j=0; j<product.lhs().outerSize(); ++j)
-// for (LhsInnerIterator i(product.lhs(),j); i; ++i)
-// derived().row(LhsIsRowMajor ? j : i.index()) += i.value() * product.rhs().row(LhsIsRowMajor ? i.index() : j);
-// return derived();
-// }
-
-template<typename Derived>
-template<typename Lhs, typename Rhs>
-Derived& MatrixBase<Derived>::lazyAssign(const SparseProduct<Lhs,Rhs,SparseTimeDenseProduct>& product)
-{
- typedef typename ei_cleantype<Lhs>::type _Lhs;
- typedef typename ei_cleantype<Rhs>::type _Rhs;
- typedef typename _Lhs::InnerIterator LhsInnerIterator;
- enum {
- LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
- LhsIsSelfAdjoint = (_Lhs::Flags&SelfAdjointBit)==SelfAdjointBit,
- ProcessFirstHalf = LhsIsSelfAdjoint
- && ( ((_Lhs::Flags&(UpperTriangularBit|LowerTriangularBit))==0)
- || ( (_Lhs::Flags&UpperTriangularBit) && !LhsIsRowMajor)
- || ( (_Lhs::Flags&LowerTriangularBit) && LhsIsRowMajor) ),
- ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf)
- };
- derived().setZero();
- for (int j=0; j<product.lhs().outerSize(); ++j)
- {
- LhsInnerIterator i(product.lhs(),j);
- if (ProcessSecondHalf && i && (i.index()==j))
- {
- derived().row(j) += i.value() * product.rhs().row(j);
- ++i;
- }
- Block<Derived,1,Derived::ColsAtCompileTime> res(derived().row(LhsIsRowMajor ? j : 0));
- for (; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
- {
- if (LhsIsSelfAdjoint)
- {
- int a = LhsIsRowMajor ? j : i.index();
- int b = LhsIsRowMajor ? i.index() : j;
- Scalar v = i.value();
- derived().row(a) += (v) * product.rhs().row(b);
- derived().row(b) += ei_conj(v) * product.rhs().row(a);
- }
- else if (LhsIsRowMajor)
- res += i.value() * product.rhs().row(i.index());
- else
- derived().row(i.index()) += i.value() * product.rhs().row(j);
- }
- if (ProcessFirstHalf && i && (i.index()==j))
- derived().row(j) += i.value() * product.rhs().row(j);
- }
- return derived();
-}
-
-// dense = dense * sparse
-template<typename Derived>
-template<typename Lhs, typename Rhs>
-Derived& MatrixBase<Derived>::lazyAssign(const SparseProduct<Lhs,Rhs,DenseTimeSparseProduct>& product)
-{
- typedef typename ei_cleantype<Rhs>::type _Rhs;
- typedef typename _Rhs::InnerIterator RhsInnerIterator;
- enum { RhsIsRowMajor = (_Rhs::Flags&RowMajorBit)==RowMajorBit };
- derived().setZero();
- for (int j=0; j<product.rhs().outerSize(); ++j)
- for (RhsInnerIterator i(product.rhs(),j); i; ++i)
- derived().col(RhsIsRowMajor ? i.index() : j) += i.value() * product.lhs().col(RhsIsRowMajor ? j : i.index());
- return derived();
-}
-
-// sparse * sparse
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const typename SparseProductReturnType<Derived,OtherDerived>::Type
-SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
-{
- return typename SparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
-}
-
-// sparse * dense
-template<typename Derived>
-template<typename OtherDerived>
-EIGEN_STRONG_INLINE const typename SparseProductReturnType<Derived,OtherDerived>::Type
-SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
-{
- return typename SparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
-}
-
-#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseTranspose.h b/extern/Eigen2/Eigen/src/Sparse/SparseTranspose.h
deleted file mode 100644
index 7386294e4d4..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseTranspose.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSETRANSPOSE_H
-#define EIGEN_SPARSETRANSPOSE_H
-
-template<typename MatrixType>
-struct ei_traits<SparseTranspose<MatrixType> > : ei_traits<Transpose<MatrixType> >
-{};
-
-template<typename MatrixType> class SparseTranspose
- : public SparseMatrixBase<SparseTranspose<MatrixType> >
-{
- public:
-
- EIGEN_GENERIC_PUBLIC_INTERFACE(SparseTranspose)
-
- class InnerIterator;
- class ReverseInnerIterator;
-
- inline SparseTranspose(const MatrixType& matrix) : m_matrix(matrix) {}
-
- //EIGEN_INHERIT_ASSIGNMENT_OPERATORS(SparseTranspose)
-
- inline int rows() const { return m_matrix.cols(); }
- inline int cols() const { return m_matrix.rows(); }
- inline int nonZeros() const { return m_matrix.nonZeros(); }
-
- // FIXME should be keep them ?
- inline Scalar& coeffRef(int row, int col)
- { return m_matrix.const_cast_derived().coeffRef(col, row); }
-
- inline const Scalar coeff(int row, int col) const
- { return m_matrix.coeff(col, row); }
-
- inline const Scalar coeff(int index) const
- { return m_matrix.coeff(index); }
-
- inline Scalar& coeffRef(int index)
- { return m_matrix.const_cast_derived().coeffRef(index); }
-
- protected:
- const typename MatrixType::Nested m_matrix;
-
- private:
- SparseTranspose& operator=(const SparseTranspose&);
-};
-
-template<typename MatrixType> class SparseTranspose<MatrixType>::InnerIterator : public MatrixType::InnerIterator
-{
- public:
- EIGEN_STRONG_INLINE InnerIterator(const SparseTranspose& trans, int outer)
- : MatrixType::InnerIterator(trans.m_matrix, outer)
- {}
-
- private:
- InnerIterator& operator=(const InnerIterator&);
-};
-
-template<typename MatrixType> class SparseTranspose<MatrixType>::ReverseInnerIterator : public MatrixType::ReverseInnerIterator
-{
- public:
-
- EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTranspose& xpr, int outer)
- : MatrixType::ReverseInnerIterator(xpr.m_matrix, outer)
- {}
-};
-
-#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseUtil.h b/extern/Eigen2/Eigen/src/Sparse/SparseUtil.h
deleted file mode 100644
index 393cdda6ea2..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SparseUtil.h
+++ /dev/null
@@ -1,148 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSEUTIL_H
-#define EIGEN_SPARSEUTIL_H
-
-#ifdef NDEBUG
-#define EIGEN_DBG_SPARSE(X)
-#else
-#define EIGEN_DBG_SPARSE(X) X
-#endif
-
-#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
-template<typename OtherDerived> \
-EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
-{ \
- return Base::operator Op(other.derived()); \
-} \
-EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
-{ \
- return Base::operator Op(other); \
-}
-
-#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
-template<typename Other> \
-EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
-{ \
- return Base::operator Op(scalar); \
-}
-
-#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
-EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
-EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
-EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
-EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
-EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
-
-#define _EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \
-typedef BaseClass Base; \
-typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
-typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
-typedef typename Eigen::ei_nested<Derived>::type Nested; \
-enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
- ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
- Flags = Eigen::ei_traits<Derived>::Flags, \
- CoeffReadCost = Eigen::ei_traits<Derived>::CoeffReadCost, \
- SizeAtCompileTime = Base::SizeAtCompileTime, \
- IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
-
-#define EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived) \
-_EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived>)
-
-enum SparseBackend {
- DefaultBackend,
- Taucs,
- Cholmod,
- SuperLU,
- UmfPack
-};
-
-// solver flags
-enum {
- CompleteFactorization = 0x0000, // the default
- IncompleteFactorization = 0x0001,
- MemoryEfficient = 0x0002,
-
- // For LLT Cholesky:
- SupernodalMultifrontal = 0x0010,
- SupernodalLeftLooking = 0x0020,
-
- // Ordering methods:
- NaturalOrdering = 0x0100, // the default
- MinimumDegree_AT_PLUS_A = 0x0200,
- MinimumDegree_ATA = 0x0300,
- ColApproxMinimumDegree = 0x0400,
- Metis = 0x0500,
- Scotch = 0x0600,
- Chaco = 0x0700,
- OrderingMask = 0x0f00
-};
-
-template<typename Derived> class SparseMatrixBase;
-template<typename _Scalar, int _Flags = 0> class SparseMatrix;
-template<typename _Scalar, int _Flags = 0> class DynamicSparseMatrix;
-template<typename _Scalar, int _Flags = 0> class SparseVector;
-template<typename _Scalar, int _Flags = 0> class MappedSparseMatrix;
-
-template<typename MatrixType> class SparseTranspose;
-template<typename MatrixType, int Size> class SparseInnerVectorSet;
-template<typename Derived> class SparseCwise;
-template<typename UnaryOp, typename MatrixType> class SparseCwiseUnaryOp;
-template<typename BinaryOp, typename Lhs, typename Rhs> class SparseCwiseBinaryOp;
-template<typename ExpressionType,
- unsigned int Added, unsigned int Removed> class SparseFlagged;
-template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
-
-template<typename Lhs, typename Rhs> struct ei_sparse_product_mode;
-template<typename Lhs, typename Rhs, int ProductMode = ei_sparse_product_mode<Lhs,Rhs>::value> struct SparseProductReturnType;
-
-const int CoherentAccessPattern = 0x1;
-const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
-const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
-const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
-
-// const int AccessPatternNotSupported = 0x0;
-// const int AccessPatternSupported = 0x1;
-//
-// template<typename MatrixType, int AccessPattern> struct ei_support_access_pattern
-// {
-// enum { ret = (int(ei_traits<MatrixType>::SupportedAccessPatterns) & AccessPattern) == AccessPattern
-// ? AccessPatternSupported
-// : AccessPatternNotSupported
-// };
-// };
-
-template<typename T> class ei_eval<T,IsSparse>
-{
- typedef typename ei_traits<T>::Scalar _Scalar;
- enum {
- _Flags = ei_traits<T>::Flags
- };
-
- public:
- typedef SparseMatrix<_Scalar, _Flags> type;
-};
-
-#endif // EIGEN_SPARSEUTIL_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h b/extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h
deleted file mode 100644
index 3c9a4fcced6..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h
+++ /dev/null
@@ -1,565 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SUPERLUSUPPORT_H
-#define EIGEN_SUPERLUSUPPORT_H
-
-// declaration of gssvx taken from GMM++
-#define DECL_GSSVX(NAMESPACE,FNAME,FLOATTYPE,KEYTYPE) \
- inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \
- int *perm_c, int *perm_r, int *etree, char *equed, \
- FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \
- SuperMatrix *U, void *work, int lwork, \
- SuperMatrix *B, SuperMatrix *X, \
- FLOATTYPE *recip_pivot_growth, \
- FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \
- SuperLUStat_t *stats, int *info, KEYTYPE) { \
- using namespace NAMESPACE; \
- mem_usage_t mem_usage; \
- NAMESPACE::FNAME(options, A, perm_c, perm_r, etree, equed, R, C, L, \
- U, work, lwork, B, X, recip_pivot_growth, rcond, \
- ferr, berr, &mem_usage, stats, info); \
- return mem_usage.for_lu; /* bytes used by the factor storage */ \
- }
-
-DECL_GSSVX(SuperLU_S,sgssvx,float,float)
-DECL_GSSVX(SuperLU_C,cgssvx,float,std::complex<float>)
-DECL_GSSVX(SuperLU_D,dgssvx,double,double)
-DECL_GSSVX(SuperLU_Z,zgssvx,double,std::complex<double>)
-
-template<typename MatrixType>
-struct SluMatrixMapHelper;
-
-/** \internal
- *
- * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices
- * and dense matrices. Supernodal and other fancy format are not supported by this wrapper.
- *
- * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure.
- */
-struct SluMatrix : SuperMatrix
-{
- SluMatrix()
- {
- Store = &storage;
- }
-
- SluMatrix(const SluMatrix& other)
- : SuperMatrix(other)
- {
- Store = &storage;
- storage = other.storage;
- }
-
- SluMatrix& operator=(const SluMatrix& other)
- {
- SuperMatrix::operator=(static_cast<const SuperMatrix&>(other));
- Store = &storage;
- storage = other.storage;
- return *this;
- }
-
- struct
- {
- union {int nnz;int lda;};
- void *values;
- int *innerInd;
- int *outerInd;
- } storage;
-
- void setStorageType(Stype_t t)
- {
- Stype = t;
- if (t==SLU_NC || t==SLU_NR || t==SLU_DN)
- Store = &storage;
- else
- {
- ei_assert(false && "storage type not supported");
- Store = 0;
- }
- }
-
- template<typename Scalar>
- void setScalarType()
- {
- if (ei_is_same_type<Scalar,float>::ret)
- Dtype = SLU_S;
- else if (ei_is_same_type<Scalar,double>::ret)
- Dtype = SLU_D;
- else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
- Dtype = SLU_C;
- else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
- Dtype = SLU_Z;
- else
- {
- ei_assert(false && "Scalar type not supported by SuperLU");
- }
- }
-
- template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
- static SluMatrix Map(Matrix<Scalar,Rows,Cols,Options,MRows,MCols>& mat)
- {
- typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
- ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
- SluMatrix res;
- res.setStorageType(SLU_DN);
- res.setScalarType<Scalar>();
- res.Mtype = SLU_GE;
-
- res.nrow = mat.rows();
- res.ncol = mat.cols();
-
- res.storage.lda = mat.stride();
- res.storage.values = mat.data();
- return res;
- }
-
- template<typename MatrixType>
- static SluMatrix Map(SparseMatrixBase<MatrixType>& mat)
- {
- SluMatrix res;
- if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
- {
- res.setStorageType(SLU_NR);
- res.nrow = mat.cols();
- res.ncol = mat.rows();
- }
- else
- {
- res.setStorageType(SLU_NC);
- res.nrow = mat.rows();
- res.ncol = mat.cols();
- }
-
- res.Mtype = SLU_GE;
-
- res.storage.nnz = mat.nonZeros();
- res.storage.values = mat.derived()._valuePtr();
- res.storage.innerInd = mat.derived()._innerIndexPtr();
- res.storage.outerInd = mat.derived()._outerIndexPtr();
-
- res.setScalarType<typename MatrixType::Scalar>();
-
- // FIXME the following is not very accurate
- if (MatrixType::Flags & UpperTriangular)
- res.Mtype = SLU_TRU;
- if (MatrixType::Flags & LowerTriangular)
- res.Mtype = SLU_TRL;
- if (MatrixType::Flags & SelfAdjoint)
- ei_assert(false && "SelfAdjoint matrix shape not supported by SuperLU");
- return res;
- }
-};
-
-template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
-struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
-{
- typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
- static void run(MatrixType& mat, SluMatrix& res)
- {
- ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
- res.setStorageType(SLU_DN);
- res.setScalarType<Scalar>();
- res.Mtype = SLU_GE;
-
- res.nrow = mat.rows();
- res.ncol = mat.cols();
-
- res.storage.lda = mat.stride();
- res.storage.values = mat.data();
- }
-};
-
-template<typename Derived>
-struct SluMatrixMapHelper<SparseMatrixBase<Derived> >
-{
- typedef Derived MatrixType;
- static void run(MatrixType& mat, SluMatrix& res)
- {
- if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
- {
- res.setStorageType(SLU_NR);
- res.nrow = mat.cols();
- res.ncol = mat.rows();
- }
- else
- {
- res.setStorageType(SLU_NC);
- res.nrow = mat.rows();
- res.ncol = mat.cols();
- }
-
- res.Mtype = SLU_GE;
-
- res.storage.nnz = mat.nonZeros();
- res.storage.values = mat._valuePtr();
- res.storage.innerInd = mat._innerIndexPtr();
- res.storage.outerInd = mat._outerIndexPtr();
-
- res.setScalarType<typename MatrixType::Scalar>();
-
- // FIXME the following is not very accurate
- if (MatrixType::Flags & UpperTriangular)
- res.Mtype = SLU_TRU;
- if (MatrixType::Flags & LowerTriangular)
- res.Mtype = SLU_TRL;
- if (MatrixType::Flags & SelfAdjoint)
- ei_assert(false && "SelfAdjoint matrix shape not supported by SuperLU");
- }
-};
-
-template<typename Derived>
-SluMatrix SparseMatrixBase<Derived>::asSluMatrix()
-{
- return SluMatrix::Map(derived());
-}
-
-/** View a Super LU matrix as an Eigen expression */
-template<typename Scalar, int Flags>
-MappedSparseMatrix<Scalar,Flags>::MappedSparseMatrix(SluMatrix& sluMat)
-{
- if ((Flags&RowMajorBit)==RowMajorBit)
- {
- assert(sluMat.Stype == SLU_NR);
- m_innerSize = sluMat.ncol;
- m_outerSize = sluMat.nrow;
- }
- else
- {
- assert(sluMat.Stype == SLU_NC);
- m_innerSize = sluMat.nrow;
- m_outerSize = sluMat.ncol;
- }
- m_outerIndex = sluMat.storage.outerInd;
- m_innerIndices = sluMat.storage.innerInd;
- m_values = reinterpret_cast<Scalar*>(sluMat.storage.values);
- m_nnz = sluMat.storage.outerInd[m_outerSize];
-}
-
-template<typename MatrixType>
-class SparseLU<MatrixType,SuperLU> : public SparseLU<MatrixType>
-{
- protected:
- typedef SparseLU<MatrixType> Base;
- typedef typename Base::Scalar Scalar;
- typedef typename Base::RealScalar RealScalar;
- typedef Matrix<Scalar,Dynamic,1> Vector;
- typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
- typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
- typedef SparseMatrix<Scalar,LowerTriangular|UnitDiagBit> LMatrixType;
- typedef SparseMatrix<Scalar,UpperTriangular> UMatrixType;
- using Base::m_flags;
- using Base::m_status;
-
- public:
-
- SparseLU(int flags = NaturalOrdering)
- : Base(flags)
- {
- }
-
- SparseLU(const MatrixType& matrix, int flags = NaturalOrdering)
- : Base(flags)
- {
- compute(matrix);
- }
-
- ~SparseLU()
- {
- }
-
- inline const LMatrixType& matrixL() const
- {
- if (m_extractedDataAreDirty) extractData();
- return m_l;
- }
-
- inline const UMatrixType& matrixU() const
- {
- if (m_extractedDataAreDirty) extractData();
- return m_u;
- }
-
- inline const IntColVectorType& permutationP() const
- {
- if (m_extractedDataAreDirty) extractData();
- return m_p;
- }
-
- inline const IntRowVectorType& permutationQ() const
- {
- if (m_extractedDataAreDirty) extractData();
- return m_q;
- }
-
- Scalar determinant() const;
-
- template<typename BDerived, typename XDerived>
- bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x) const;
-
- void compute(const MatrixType& matrix);
-
- protected:
-
- void extractData() const;
-
- protected:
- // cached data to reduce reallocation, etc.
- mutable LMatrixType m_l;
- mutable UMatrixType m_u;
- mutable IntColVectorType m_p;
- mutable IntRowVectorType m_q;
-
- mutable SparseMatrix<Scalar> m_matrix;
- mutable SluMatrix m_sluA;
- mutable SuperMatrix m_sluL, m_sluU;
- mutable SluMatrix m_sluB, m_sluX;
- mutable SuperLUStat_t m_sluStat;
- mutable superlu_options_t m_sluOptions;
- mutable std::vector<int> m_sluEtree;
- mutable std::vector<RealScalar> m_sluRscale, m_sluCscale;
- mutable std::vector<RealScalar> m_sluFerr, m_sluBerr;
- mutable char m_sluEqued;
- mutable bool m_extractedDataAreDirty;
-};
-
-template<typename MatrixType>
-void SparseLU<MatrixType,SuperLU>::compute(const MatrixType& a)
-{
- const int size = a.rows();
- m_matrix = a;
-
- set_default_options(&m_sluOptions);
- m_sluOptions.ColPerm = NATURAL;
- m_sluOptions.PrintStat = NO;
- m_sluOptions.ConditionNumber = NO;
- m_sluOptions.Trans = NOTRANS;
- // m_sluOptions.Equil = NO;
-
- switch (Base::orderingMethod())
- {
- case NaturalOrdering : m_sluOptions.ColPerm = NATURAL; break;
- case MinimumDegree_AT_PLUS_A : m_sluOptions.ColPerm = MMD_AT_PLUS_A; break;
- case MinimumDegree_ATA : m_sluOptions.ColPerm = MMD_ATA; break;
- case ColApproxMinimumDegree : m_sluOptions.ColPerm = COLAMD; break;
- default:
- std::cerr << "Eigen: ordering method \"" << Base::orderingMethod() << "\" not supported by the SuperLU backend\n";
- m_sluOptions.ColPerm = NATURAL;
- };
-
- m_sluA = m_matrix.asSluMatrix();
- memset(&m_sluL,0,sizeof m_sluL);
- memset(&m_sluU,0,sizeof m_sluU);
- m_sluEqued = 'B';
- int info = 0;
-
- m_p.resize(size);
- m_q.resize(size);
- m_sluRscale.resize(size);
- m_sluCscale.resize(size);
- m_sluEtree.resize(size);
-
- RealScalar recip_pivot_gross, rcond;
- RealScalar ferr, berr;
-
- // set empty B and X
- m_sluB.setStorageType(SLU_DN);
- m_sluB.setScalarType<Scalar>();
- m_sluB.Mtype = SLU_GE;
- m_sluB.storage.values = 0;
- m_sluB.nrow = m_sluB.ncol = 0;
- m_sluB.storage.lda = size;
- m_sluX = m_sluB;
-
- StatInit(&m_sluStat);
- SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
- &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
- &m_sluL, &m_sluU,
- NULL, 0,
- &m_sluB, &m_sluX,
- &recip_pivot_gross, &rcond,
- &ferr, &berr,
- &m_sluStat, &info, Scalar());
- StatFree(&m_sluStat);
-
- m_extractedDataAreDirty = true;
-
- // FIXME how to better check for errors ???
- Base::m_succeeded = (info == 0);
-}
-
-template<typename MatrixType>
-template<typename BDerived,typename XDerived>
-bool SparseLU<MatrixType,SuperLU>::solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> *x) const
-{
- const int size = m_matrix.rows();
- const int rhsCols = b.cols();
- ei_assert(size==b.rows());
-
- m_sluOptions.Fact = FACTORED;
- m_sluOptions.IterRefine = NOREFINE;
-
- m_sluFerr.resize(rhsCols);
- m_sluBerr.resize(rhsCols);
- m_sluB = SluMatrix::Map(b.const_cast_derived());
- m_sluX = SluMatrix::Map(x->derived());
-
- StatInit(&m_sluStat);
- int info = 0;
- RealScalar recip_pivot_gross, rcond;
- SuperLU_gssvx(
- &m_sluOptions, &m_sluA,
- m_q.data(), m_p.data(),
- &m_sluEtree[0], &m_sluEqued,
- &m_sluRscale[0], &m_sluCscale[0],
- &m_sluL, &m_sluU,
- NULL, 0,
- &m_sluB, &m_sluX,
- &recip_pivot_gross, &rcond,
- &m_sluFerr[0], &m_sluBerr[0],
- &m_sluStat, &info, Scalar());
- StatFree(&m_sluStat);
-
- return info==0;
-}
-
-//
-// the code of this extractData() function has been adapted from the SuperLU's Matlab support code,
-//
-// Copyright (c) 1994 by Xerox Corporation. All rights reserved.
-//
-// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
-// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
-//
-template<typename MatrixType>
-void SparseLU<MatrixType,SuperLU>::extractData() const
-{
- if (m_extractedDataAreDirty)
- {
- int upper;
- int fsupc, istart, nsupr;
- int lastl = 0, lastu = 0;
- SCformat *Lstore = static_cast<SCformat*>(m_sluL.Store);
- NCformat *Ustore = static_cast<NCformat*>(m_sluU.Store);
- Scalar *SNptr;
-
- const int size = m_matrix.rows();
- m_l.resize(size,size);
- m_l.resizeNonZeros(Lstore->nnz);
- m_u.resize(size,size);
- m_u.resizeNonZeros(Ustore->nnz);
-
- int* Lcol = m_l._outerIndexPtr();
- int* Lrow = m_l._innerIndexPtr();
- Scalar* Lval = m_l._valuePtr();
-
- int* Ucol = m_u._outerIndexPtr();
- int* Urow = m_u._innerIndexPtr();
- Scalar* Uval = m_u._valuePtr();
-
- Ucol[0] = 0;
- Ucol[0] = 0;
-
- /* for each supernode */
- for (int k = 0; k <= Lstore->nsuper; ++k)
- {
- fsupc = L_FST_SUPC(k);
- istart = L_SUB_START(fsupc);
- nsupr = L_SUB_START(fsupc+1) - istart;
- upper = 1;
-
- /* for each column in the supernode */
- for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)
- {
- SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)];
-
- /* Extract U */
- for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i)
- {
- Uval[lastu] = ((Scalar*)Ustore->nzval)[i];
- /* Matlab doesn't like explicit zero. */
- if (Uval[lastu] != 0.0)
- Urow[lastu++] = U_SUB(i);
- }
- for (int i = 0; i < upper; ++i)
- {
- /* upper triangle in the supernode */
- Uval[lastu] = SNptr[i];
- /* Matlab doesn't like explicit zero. */
- if (Uval[lastu] != 0.0)
- Urow[lastu++] = L_SUB(istart+i);
- }
- Ucol[j+1] = lastu;
-
- /* Extract L */
- Lval[lastl] = 1.0; /* unit diagonal */
- Lrow[lastl++] = L_SUB(istart + upper - 1);
- for (int i = upper; i < nsupr; ++i)
- {
- Lval[lastl] = SNptr[i];
- /* Matlab doesn't like explicit zero. */
- if (Lval[lastl] != 0.0)
- Lrow[lastl++] = L_SUB(istart+i);
- }
- Lcol[j+1] = lastl;
-
- ++upper;
- } /* for j ... */
-
- } /* for k ... */
-
- // squeeze the matrices :
- m_l.resizeNonZeros(lastl);
- m_u.resizeNonZeros(lastu);
-
- m_extractedDataAreDirty = false;
- }
-}
-
-template<typename MatrixType>
-typename SparseLU<MatrixType,SuperLU>::Scalar SparseLU<MatrixType,SuperLU>::determinant() const
-{
- if (m_extractedDataAreDirty)
- extractData();
-
- // TODO this code coule be moved to the default/base backend
- // FIXME perhaps we have to take into account the scale factors m_sluRscale and m_sluCscale ???
- Scalar det = Scalar(1);
- for (int j=0; j<m_u.cols(); ++j)
- {
- if (m_u._outerIndexPtr()[j+1]-m_u._outerIndexPtr()[j] > 0)
- {
- int lastId = m_u._outerIndexPtr()[j+1]-1;
- ei_assert(m_u._innerIndexPtr()[lastId]<=j);
- if (m_u._innerIndexPtr()[lastId]==j)
- {
- det *= m_u._valuePtr()[lastId];
- }
- }
- // std::cout << m_sluRscale[j] << " " << m_sluCscale[j] << " ";
- }
- return det;
-}
-
-#endif // EIGEN_SUPERLUSUPPORT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h b/extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h
deleted file mode 100644
index 4dddca7b622..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h
+++ /dev/null
@@ -1,210 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_TAUCSSUPPORT_H
-#define EIGEN_TAUCSSUPPORT_H
-
-template<typename Derived>
-taucs_ccs_matrix SparseMatrixBase<Derived>::asTaucsMatrix()
-{
- taucs_ccs_matrix res;
- res.n = cols();
- res.m = rows();
- res.flags = 0;
- res.colptr = derived()._outerIndexPtr();
- res.rowind = derived()._innerIndexPtr();
- res.values.v = derived()._valuePtr();
- if (ei_is_same_type<Scalar,int>::ret)
- res.flags |= TAUCS_INT;
- else if (ei_is_same_type<Scalar,float>::ret)
- res.flags |= TAUCS_SINGLE;
- else if (ei_is_same_type<Scalar,double>::ret)
- res.flags |= TAUCS_DOUBLE;
- else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
- res.flags |= TAUCS_SCOMPLEX;
- else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
- res.flags |= TAUCS_DCOMPLEX;
- else
- {
- ei_assert(false && "Scalar type not supported by TAUCS");
- }
-
- if (Flags & UpperTriangular)
- res.flags |= TAUCS_UPPER;
- if (Flags & LowerTriangular)
- res.flags |= TAUCS_LOWER;
- if (Flags & SelfAdjoint)
- res.flags |= (NumTraits<Scalar>::IsComplex ? TAUCS_HERMITIAN : TAUCS_SYMMETRIC);
- else if ((Flags & UpperTriangular) || (Flags & LowerTriangular))
- res.flags |= TAUCS_TRIANGULAR;
-
- return res;
-}
-
-template<typename Scalar, int Flags>
-MappedSparseMatrix<Scalar,Flags>::MappedSparseMatrix(taucs_ccs_matrix& taucsMat)
-{
- m_innerSize = taucsMat.m;
- m_outerSize = taucsMat.n;
- m_outerIndex = taucsMat.colptr;
- m_innerIndices = taucsMat.rowind;
- m_values = reinterpret_cast<Scalar*>(taucsMat.values.v);
- m_nnz = taucsMat.colptr[taucsMat.n];
-}
-
-template<typename MatrixType>
-class SparseLLT<MatrixType,Taucs> : public SparseLLT<MatrixType>
-{
- protected:
- typedef SparseLLT<MatrixType> Base;
- typedef typename Base::Scalar Scalar;
- typedef typename Base::RealScalar RealScalar;
- using Base::MatrixLIsDirty;
- using Base::SupernodalFactorIsDirty;
- using Base::m_flags;
- using Base::m_matrix;
- using Base::m_status;
-
- public:
-
- SparseLLT(int flags = 0)
- : Base(flags), m_taucsSupernodalFactor(0)
- {
- }
-
- SparseLLT(const MatrixType& matrix, int flags = 0)
- : Base(flags), m_taucsSupernodalFactor(0)
- {
- compute(matrix);
- }
-
- ~SparseLLT()
- {
- if (m_taucsSupernodalFactor)
- taucs_supernodal_factor_free(m_taucsSupernodalFactor);
- }
-
- inline const typename Base::CholMatrixType& matrixL(void) const;
-
- template<typename Derived>
- void solveInPlace(MatrixBase<Derived> &b) const;
-
- void compute(const MatrixType& matrix);
-
- protected:
- void* m_taucsSupernodalFactor;
-};
-
-template<typename MatrixType>
-void SparseLLT<MatrixType,Taucs>::compute(const MatrixType& a)
-{
- if (m_taucsSupernodalFactor)
- {
- taucs_supernodal_factor_free(m_taucsSupernodalFactor);
- m_taucsSupernodalFactor = 0;
- }
-
- if (m_flags & IncompleteFactorization)
- {
- taucs_ccs_matrix taucsMatA = const_cast<MatrixType&>(a).asTaucsMatrix();
- taucs_ccs_matrix* taucsRes = taucs_ccs_factor_llt(&taucsMatA, Base::m_precision, 0);
- // the matrix returned by Taucs is not necessarily sorted,
- // so let's copy it in two steps
- DynamicSparseMatrix<Scalar,RowMajor> tmp = MappedSparseMatrix<Scalar>(*taucsRes);
- m_matrix = tmp;
- free(taucsRes);
- m_status = (m_status & ~(CompleteFactorization|MatrixLIsDirty))
- | IncompleteFactorization
- | SupernodalFactorIsDirty;
- }
- else
- {
- taucs_ccs_matrix taucsMatA = const_cast<MatrixType&>(a).asTaucsMatrix();
- if ( (m_flags & SupernodalLeftLooking)
- || ((!(m_flags & SupernodalMultifrontal)) && (m_flags & MemoryEfficient)) )
- {
- m_taucsSupernodalFactor = taucs_ccs_factor_llt_ll(&taucsMatA);
- }
- else
- {
- // use the faster Multifrontal routine
- m_taucsSupernodalFactor = taucs_ccs_factor_llt_mf(&taucsMatA);
- }
- m_status = (m_status & ~IncompleteFactorization) | CompleteFactorization | MatrixLIsDirty;
- }
-}
-
-template<typename MatrixType>
-inline const typename SparseLLT<MatrixType>::CholMatrixType&
-SparseLLT<MatrixType,Taucs>::matrixL() const
-{
- if (m_status & MatrixLIsDirty)
- {
- ei_assert(!(m_status & SupernodalFactorIsDirty));
-
- taucs_ccs_matrix* taucsL = taucs_supernodal_factor_to_ccs(m_taucsSupernodalFactor);
-
- // the matrix returned by Taucs is not necessarily sorted,
- // so let's copy it in two steps
- DynamicSparseMatrix<Scalar,RowMajor> tmp = MappedSparseMatrix<Scalar>(*taucsL);
- const_cast<typename Base::CholMatrixType&>(m_matrix) = tmp;
- free(taucsL);
- m_status = (m_status & ~MatrixLIsDirty);
- }
- return m_matrix;
-}
-
-template<typename MatrixType>
-template<typename Derived>
-void SparseLLT<MatrixType,Taucs>::solveInPlace(MatrixBase<Derived> &b) const
-{
- bool inputIsCompatibleWithTaucs = (Derived::Flags&RowMajorBit)==0;
-
- if (!inputIsCompatibleWithTaucs)
- {
- matrixL();
- Base::solveInPlace(b);
- }
- else if (m_flags & IncompleteFactorization)
- {
- taucs_ccs_matrix taucsLLT = const_cast<typename Base::CholMatrixType&>(m_matrix).asTaucsMatrix();
- typename ei_plain_matrix_type<Derived>::type x(b.rows());
- for (int j=0; j<b.cols(); ++j)
- {
- taucs_ccs_solve_llt(&taucsLLT,x.data(),&b.col(j).coeffRef(0));
- b.col(j) = x;
- }
- }
- else
- {
- typename ei_plain_matrix_type<Derived>::type x(b.rows());
- for (int j=0; j<b.cols(); ++j)
- {
- taucs_supernodal_solve_llt(m_taucsSupernodalFactor,x.data(),&b.col(j).coeffRef(0));
- b.col(j) = x;
- }
- }
-}
-
-#endif // EIGEN_TAUCSSUPPORT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/TriangularSolver.h b/extern/Eigen2/Eigen/src/Sparse/TriangularSolver.h
deleted file mode 100644
index 8948ae45e1d..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/TriangularSolver.h
+++ /dev/null
@@ -1,178 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
-#define EIGEN_SPARSETRIANGULARSOLVER_H
-
-// forward substitution, row-major
-template<typename Lhs, typename Rhs>
-struct ei_solve_triangular_selector<Lhs,Rhs,LowerTriangular,RowMajor|IsSparse>
-{
- typedef typename Rhs::Scalar Scalar;
- static void run(const Lhs& lhs, Rhs& other)
- {
- for(int col=0 ; col<other.cols() ; ++col)
- {
- for(int i=0; i<lhs.rows(); ++i)
- {
- Scalar tmp = other.coeff(i,col);
- Scalar lastVal = 0;
- int lastIndex = 0;
- for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
- {
- lastVal = it.value();
- lastIndex = it.index();
- tmp -= lastVal * other.coeff(lastIndex,col);
- }
- if (Lhs::Flags & UnitDiagBit)
- other.coeffRef(i,col) = tmp;
- else
- {
- ei_assert(lastIndex==i);
- other.coeffRef(i,col) = tmp/lastVal;
- }
- }
- }
- }
-};
-
-// backward substitution, row-major
-template<typename Lhs, typename Rhs>
-struct ei_solve_triangular_selector<Lhs,Rhs,UpperTriangular,RowMajor|IsSparse>
-{
- typedef typename Rhs::Scalar Scalar;
- static void run(const Lhs& lhs, Rhs& other)
- {
- for(int col=0 ; col<other.cols() ; ++col)
- {
- for(int i=lhs.rows()-1 ; i>=0 ; --i)
- {
- Scalar tmp = other.coeff(i,col);
- typename Lhs::InnerIterator it(lhs, i);
- if (it.index() == i)
- ++it;
- for(; it; ++it)
- {
- tmp -= it.value() * other.coeff(it.index(),col);
- }
-
- if (Lhs::Flags & UnitDiagBit)
- other.coeffRef(i,col) = tmp;
- else
- {
- typename Lhs::InnerIterator it(lhs, i);
- ei_assert(it.index() == i);
- other.coeffRef(i,col) = tmp/it.value();
- }
- }
- }
- }
-};
-
-// forward substitution, col-major
-template<typename Lhs, typename Rhs>
-struct ei_solve_triangular_selector<Lhs,Rhs,LowerTriangular,ColMajor|IsSparse>
-{
- typedef typename Rhs::Scalar Scalar;
- static void run(const Lhs& lhs, Rhs& other)
- {
- for(int col=0 ; col<other.cols() ; ++col)
- {
- for(int i=0; i<lhs.cols(); ++i)
- {
- typename Lhs::InnerIterator it(lhs, i);
- if(!(Lhs::Flags & UnitDiagBit))
- {
- // std::cerr << it.value() << " ; " << it.index() << " == " << i << "\n";
- ei_assert(it.index()==i);
- other.coeffRef(i,col) /= it.value();
- }
- Scalar tmp = other.coeffRef(i,col);
- if (it.index()==i)
- ++it;
- for(; it; ++it)
- other.coeffRef(it.index(), col) -= tmp * it.value();
- }
- }
- }
-};
-
-// backward substitution, col-major
-template<typename Lhs, typename Rhs>
-struct ei_solve_triangular_selector<Lhs,Rhs,UpperTriangular,ColMajor|IsSparse>
-{
- typedef typename Rhs::Scalar Scalar;
- static void run(const Lhs& lhs, Rhs& other)
- {
- for(int col=0 ; col<other.cols() ; ++col)
- {
- for(int i=lhs.cols()-1; i>=0; --i)
- {
- if(!(Lhs::Flags & UnitDiagBit))
- {
- // FIXME lhs.coeff(i,i) might not be always efficient while it must simply be the
- // last element of the column !
- other.coeffRef(i,col) /= lhs.coeff(i,i);
- }
- Scalar tmp = other.coeffRef(i,col);
- typename Lhs::InnerIterator it(lhs, i);
- for(; it && it.index()<i; ++it)
- other.coeffRef(it.index(), col) -= tmp * it.value();
- }
- }
- }
-};
-
-template<typename Derived>
-template<typename OtherDerived>
-void SparseMatrixBase<Derived>::solveTriangularInPlace(MatrixBase<OtherDerived>& other) const
-{
- ei_assert(derived().cols() == derived().rows());
- ei_assert(derived().cols() == other.rows());
- ei_assert(!(Flags & ZeroDiagBit));
- ei_assert(Flags & (UpperTriangularBit|LowerTriangularBit));
-
- enum { copy = ei_traits<OtherDerived>::Flags & RowMajorBit };
-
- typedef typename ei_meta_if<copy,
- typename ei_plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::ret OtherCopy;
- OtherCopy otherCopy(other.derived());
-
- ei_solve_triangular_selector<Derived, typename ei_unref<OtherCopy>::type>::run(derived(), otherCopy);
-
- if (copy)
- other = otherCopy;
-}
-
-template<typename Derived>
-template<typename OtherDerived>
-typename ei_plain_matrix_type_column_major<OtherDerived>::type
-SparseMatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
-{
- typename ei_plain_matrix_type_column_major<OtherDerived>::type res(other);
- solveTriangularInPlace(res);
- return res;
-}
-
-#endif // EIGEN_SPARSETRIANGULARSOLVER_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h b/extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h
deleted file mode 100644
index b76ffb25248..00000000000
--- a/extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h
+++ /dev/null
@@ -1,289 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
-//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
-//
-// Eigen is free software; you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public
-// License as published by the Free Software Foundation; either
-// version 3 of the License, or (at your option) any later version.
-//
-// Alternatively, you can redistribute it and/or
-// modify it under the terms of the GNU General Public License as
-// published by the Free Software Foundation; either version 2 of
-// the License, or (at your option) any later version.
-//
-// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public
-// License and a copy of the GNU General Public License along with
-// Eigen. If not, see <http://www.gnu.org/licenses/>.
-
-#ifndef EIGEN_UMFPACKSUPPORT_H
-#define EIGEN_UMFPACKSUPPORT_H
-
-/* TODO extract L, extract U, compute det, etc... */
-
-// generic double/complex<double> wrapper functions:
-
-inline void umfpack_free_numeric(void **Numeric, double)
-{ umfpack_di_free_numeric(Numeric); }
-
-inline void umfpack_free_numeric(void **Numeric, std::complex<double>)
-{ umfpack_zi_free_numeric(Numeric); }
-
-inline void umfpack_free_symbolic(void **Symbolic, double)
-{ umfpack_di_free_symbolic(Symbolic); }
-
-inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>)
-{ umfpack_zi_free_symbolic(Symbolic); }
-
-inline int umfpack_symbolic(int n_row,int n_col,
- const int Ap[], const int Ai[], const double Ax[], void **Symbolic,
- const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
-{
- return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info);
-}
-
-inline int umfpack_symbolic(int n_row,int n_col,
- const int Ap[], const int Ai[], const std::complex<double> Ax[], void **Symbolic,
- const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
-{
- return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&Ax[0].real(),0,Symbolic,Control,Info);
-}
-
-inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
- void *Symbolic, void **Numeric,
- const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
-{
- return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info);
-}
-
-inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<double> Ax[],
- void *Symbolic, void **Numeric,
- const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
-{
- return umfpack_zi_numeric(Ap,Ai,&Ax[0].real(),0,Symbolic,Numeric,Control,Info);
-}
-
-inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
- double X[], const double B[], void *Numeric,
- const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
-{
- return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info);
-}
-
-inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex<double> Ax[],
- std::complex<double> X[], const std::complex<double> B[], void *Numeric,
- const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
-{
- return umfpack_zi_solve(sys,Ap,Ai,&Ax[0].real(),0,&X[0].real(),0,&B[0].real(),0,Numeric,Control,Info);
-}
-
-inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
-{
- return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
-}
-
-inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex<double>)
-{
- return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
-}
-
-inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[],
- int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric)
-{
- return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric);
-}
-
-inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], int Up[], int Ui[], std::complex<double> Ux[],
- int P[], int Q[], std::complex<double> Dx[], int *do_recip, double Rs[], void *Numeric)
-{
- return umfpack_zi_get_numeric(Lp,Lj,Lx?&Lx[0].real():0,0,Up,Ui,Ux?&Ux[0].real():0,0,P,Q,
- Dx?&Dx[0].real():0,0,do_recip,Rs,Numeric);
-}
-
-inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
-{
- return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info);
-}
-
-inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
-{
- return umfpack_zi_get_determinant(&Mx->real(),0,Ex,NumericHandle,User_Info);
-}
-
-
-template<typename MatrixType>
-class SparseLU<MatrixType,UmfPack> : public SparseLU<MatrixType>
-{
- protected:
- typedef SparseLU<MatrixType> Base;
- typedef typename Base::Scalar Scalar;
- typedef typename Base::RealScalar RealScalar;
- typedef Matrix<Scalar,Dynamic,1> Vector;
- typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
- typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
- typedef SparseMatrix<Scalar,LowerTriangular|UnitDiagBit> LMatrixType;
- typedef SparseMatrix<Scalar,UpperTriangular> UMatrixType;
- using Base::m_flags;
- using Base::m_status;
-
- public:
-
- SparseLU(int flags = NaturalOrdering)
- : Base(flags), m_numeric(0)
- {
- }
-
- SparseLU(const MatrixType& matrix, int flags = NaturalOrdering)
- : Base(flags), m_numeric(0)
- {
- compute(matrix);
- }
-
- ~SparseLU()
- {
- if (m_numeric)
- umfpack_free_numeric(&m_numeric,Scalar());
- }
-
- inline const LMatrixType& matrixL() const
- {
- if (m_extractedDataAreDirty) extractData();
- return m_l;
- }
-
- inline const UMatrixType& matrixU() const
- {
- if (m_extractedDataAreDirty) extractData();
- return m_u;
- }
-
- inline const IntColVectorType& permutationP() const
- {
- if (m_extractedDataAreDirty) extractData();
- return m_p;
- }
-
- inline const IntRowVectorType& permutationQ() const
- {
- if (m_extractedDataAreDirty) extractData();
- return m_q;
- }
-
- Scalar determinant() const;
-
- template<typename BDerived, typename XDerived>
- bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x) const;
-
- void compute(const MatrixType& matrix);
-
- protected:
-
- void extractData() const;
-
- protected:
- // cached data:
- void* m_numeric;
- const MatrixType* m_matrixRef;
- mutable LMatrixType m_l;
- mutable UMatrixType m_u;
- mutable IntColVectorType m_p;
- mutable IntRowVectorType m_q;
- mutable bool m_extractedDataAreDirty;
-};
-
-template<typename MatrixType>
-void SparseLU<MatrixType,UmfPack>::compute(const MatrixType& a)
-{
- const int rows = a.rows();
- const int cols = a.cols();
- ei_assert((MatrixType::Flags&RowMajorBit)==0 && "Row major matrices are not supported yet");
-
- m_matrixRef = &a;
-
- if (m_numeric)
- umfpack_free_numeric(&m_numeric,Scalar());
-
- void* symbolic;
- int errorCode = 0;
- errorCode = umfpack_symbolic(rows, cols, a._outerIndexPtr(), a._innerIndexPtr(), a._valuePtr(),
- &symbolic, 0, 0);
- if (errorCode==0)
- errorCode = umfpack_numeric(a._outerIndexPtr(), a._innerIndexPtr(), a._valuePtr(),
- symbolic, &m_numeric, 0, 0);
-
- umfpack_free_symbolic(&symbolic,Scalar());
-
- m_extractedDataAreDirty = true;
-
- Base::m_succeeded = (errorCode==0);
-}
-
-template<typename MatrixType>
-void SparseLU<MatrixType,UmfPack>::extractData() const
-{
- if (m_extractedDataAreDirty)
- {
- // get size of the data
- int lnz, unz, rows, cols, nz_udiag;
- umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
-
- // allocate data
- m_l.resize(rows,std::min(rows,cols));
- m_l.resizeNonZeros(lnz);
-
- m_u.resize(std::min(rows,cols),cols);
- m_u.resizeNonZeros(unz);
-
- m_p.resize(rows);
- m_q.resize(cols);
-
- // extract
- umfpack_get_numeric(m_l._outerIndexPtr(), m_l._innerIndexPtr(), m_l._valuePtr(),
- m_u._outerIndexPtr(), m_u._innerIndexPtr(), m_u._valuePtr(),
- m_p.data(), m_q.data(), 0, 0, 0, m_numeric);
-
- m_extractedDataAreDirty = false;
- }
-}
-
-template<typename MatrixType>
-typename SparseLU<MatrixType,UmfPack>::Scalar SparseLU<MatrixType,UmfPack>::determinant() const
-{
- Scalar det;
- umfpack_get_determinant(&det, 0, m_numeric, 0);
- return det;
-}
-
-template<typename MatrixType>
-template<typename BDerived,typename XDerived>
-bool SparseLU<MatrixType,UmfPack>::solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> *x) const
-{
- //const int size = m_matrix.rows();
- const int rhsCols = b.cols();
-// ei_assert(size==b.rows());
- ei_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major rhs yet");
- ei_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major result yet");
-
- int errorCode;
- for (int j=0; j<rhsCols; ++j)
- {
- errorCode = umfpack_solve(UMFPACK_A,
- m_matrixRef->_outerIndexPtr(), m_matrixRef->_innerIndexPtr(), m_matrixRef->_valuePtr(),
- &x->col(j).coeffRef(0), &b.const_cast_derived().col(j).coeffRef(0), m_numeric, 0, 0);
- if (errorCode!=0)
- return false;
- }
-// errorCode = umfpack_di_solve(UMFPACK_A,
-// m_matrixRef._outerIndexPtr(), m_matrixRef._innerIndexPtr(), m_matrixRef._valuePtr(),
-// x->derived().data(), b.derived().data(), m_numeric, 0, 0);
-
- return true;
-}
-
-#endif // EIGEN_UMFPACKSUPPORT_H
diff --git a/extern/Eigen3/Eigen/Array b/extern/Eigen3/Eigen/Array
new file mode 100644
index 00000000000..3d004fb69e8
--- /dev/null
+++ b/extern/Eigen3/Eigen/Array
@@ -0,0 +1,11 @@
+#ifndef EIGEN_ARRAY_MODULE_H
+#define EIGEN_ARRAY_MODULE_H
+
+// include Core first to handle Eigen2 support macros
+#include "Core"
+
+#ifndef EIGEN2_SUPPORT
+ #error The Eigen/Array header does no longer exist in Eigen3. All that functionality has moved to Eigen/Core.
+#endif
+
+#endif // EIGEN_ARRAY_MODULE_H
diff --git a/extern/Eigen3/Eigen/Cholesky b/extern/Eigen3/Eigen/Cholesky
new file mode 100644
index 00000000000..53f7bf911a4
--- /dev/null
+++ b/extern/Eigen3/Eigen/Cholesky
@@ -0,0 +1,33 @@
+#ifndef EIGEN_CHOLESKY_MODULE_H
+#define EIGEN_CHOLESKY_MODULE_H
+
+#include "Core"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+namespace Eigen {
+
+/** \defgroup Cholesky_Module Cholesky module
+ *
+ *
+ *
+ * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices.
+ * Those decompositions are accessible via the following MatrixBase methods:
+ * - MatrixBase::llt(),
+ * - MatrixBase::ldlt()
+ *
+ * \code
+ * #include <Eigen/Cholesky>
+ * \endcode
+ */
+
+#include "src/misc/Solve.h"
+#include "src/Cholesky/LLT.h"
+#include "src/Cholesky/LDLT.h"
+
+} // namespace Eigen
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_CHOLESKY_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/extern/Eigen3/Eigen/Core b/extern/Eigen3/Eigen/Core
new file mode 100644
index 00000000000..6e855427c33
--- /dev/null
+++ b/extern/Eigen3/Eigen/Core
@@ -0,0 +1,360 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CORE_H
+#define EIGEN_CORE_H
+
+// first thing Eigen does: stop the compiler from committing suicide
+#include "src/Core/util/DisableStupidWarnings.h"
+
+// then include this file where all our macros are defined. It's really important to do it first because
+// it's where we do all the alignment settings (platform detection and honoring the user's will if he
+// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization.
+#include "src/Core/util/Macros.h"
+
+// if alignment is disabled, then disable vectorization. Note: EIGEN_ALIGN is the proper check, it takes into
+// account both the user's will (EIGEN_DONT_ALIGN) and our own platform checks
+#if !EIGEN_ALIGN
+ #ifndef EIGEN_DONT_VECTORIZE
+ #define EIGEN_DONT_VECTORIZE
+ #endif
+#endif
+
+#ifdef _MSC_VER
+ #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled
+ #if (_MSC_VER >= 1500) // 2008 or later
+ // Remember that usage of defined() in a #define is undefined by the standard.
+ // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
+ #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || defined(_M_X64)
+ #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
+ #endif
+ #endif
+#else
+ // Remember that usage of defined() in a #define is undefined by the standard
+ #if (defined __SSE2__) && ( (!defined __GNUC__) || EIGEN_GNUC_AT_LEAST(4,2) )
+ #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
+ #endif
+#endif
+
+#ifndef EIGEN_DONT_VECTORIZE
+
+ #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
+
+ // Defines symbols for compile-time detection of which instructions are
+ // used.
+ // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_SSE
+ #define EIGEN_VECTORIZE_SSE2
+
+ // Detect sse3/ssse3/sse4:
+ // gcc and icc defines __SSE3__, ...
+ // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you
+ // want to force the use of those instructions with msvc.
+ #ifdef __SSE3__
+ #define EIGEN_VECTORIZE_SSE3
+ #endif
+ #ifdef __SSSE3__
+ #define EIGEN_VECTORIZE_SSSE3
+ #endif
+ #ifdef __SSE4_1__
+ #define EIGEN_VECTORIZE_SSE4_1
+ #endif
+ #ifdef __SSE4_2__
+ #define EIGEN_VECTORIZE_SSE4_2
+ #endif
+
+ // include files
+
+ // This extern "C" works around a MINGW-w64 compilation issue
+ // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354
+ // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do).
+ // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations
+ // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know;
+ // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too.
+ // notice that since these are C headers, the extern "C" is theoretically needed anyways.
+ extern "C" {
+ #include <emmintrin.h>
+ #include <xmmintrin.h>
+ #ifdef EIGEN_VECTORIZE_SSE3
+ #include <pmmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSSE3
+ #include <tmmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSE4_1
+ #include <smmintrin.h>
+ #endif
+ #ifdef EIGEN_VECTORIZE_SSE4_2
+ #include <nmmintrin.h>
+ #endif
+ } // end extern "C"
+ #elif defined __ALTIVEC__
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_ALTIVEC
+ #include <altivec.h>
+ // We need to #undef all these ugly tokens defined in <altivec.h>
+ // => use __vector instead of vector
+ #undef bool
+ #undef vector
+ #undef pixel
+ #elif defined __ARM_NEON__
+ #define EIGEN_VECTORIZE
+ #define EIGEN_VECTORIZE_NEON
+ #include <arm_neon.h>
+ #endif
+#endif
+
+#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE)
+ #define EIGEN_HAS_OPENMP
+#endif
+
+#ifdef EIGEN_HAS_OPENMP
+#include <omp.h>
+#endif
+
+// MSVC for windows mobile does not have the errno.h file
+#if !(defined(_MSC_VER) && defined(_WIN32_WCE))
+#define EIGEN_HAS_ERRNO
+#endif
+
+#ifdef EIGEN_HAS_ERRNO
+#include <cerrno>
+#endif
+#include <cstddef>
+#include <cstdlib>
+#include <cmath>
+#include <complex>
+#include <cassert>
+#include <functional>
+#include <iosfwd>
+#include <cstring>
+#include <string>
+#include <limits>
+#include <climits> // for CHAR_BIT
+// for min/max:
+#include <algorithm>
+
+// for outputting debug info
+#ifdef EIGEN_DEBUG_ASSIGN
+#include <iostream>
+#endif
+
+// required for __cpuid, needs to be included after cmath
+#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64))
+ #include <intrin.h>
+#endif
+
+#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(EIGEN_NO_EXCEPTIONS)
+ #define EIGEN_EXCEPTIONS
+#endif
+
+#ifdef EIGEN_EXCEPTIONS
+ #include <new>
+#endif
+
+// defined in bits/termios.h
+#undef B0
+
+/** \brief Namespace containing all symbols from the %Eigen library. */
+namespace Eigen {
+
+inline static const char *SimdInstructionSetsInUse(void) {
+#if defined(EIGEN_VECTORIZE_SSE4_2)
+ return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
+#elif defined(EIGEN_VECTORIZE_SSE4_1)
+ return "SSE, SSE2, SSE3, SSSE3, SSE4.1";
+#elif defined(EIGEN_VECTORIZE_SSSE3)
+ return "SSE, SSE2, SSE3, SSSE3";
+#elif defined(EIGEN_VECTORIZE_SSE3)
+ return "SSE, SSE2, SSE3";
+#elif defined(EIGEN_VECTORIZE_SSE2)
+ return "SSE, SSE2";
+#elif defined(EIGEN_VECTORIZE_ALTIVEC)
+ return "AltiVec";
+#elif defined(EIGEN_VECTORIZE_NEON)
+ return "ARM NEON";
+#else
+ return "None";
+#endif
+}
+
+#define STAGE10_FULL_EIGEN2_API 10
+#define STAGE20_RESOLVE_API_CONFLICTS 20
+#define STAGE30_FULL_EIGEN3_API 30
+#define STAGE40_FULL_EIGEN3_STRICTNESS 40
+#define STAGE99_NO_EIGEN2_SUPPORT 99
+
+#if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS
+ #define EIGEN2_SUPPORT
+ #define EIGEN2_SUPPORT_STAGE STAGE40_FULL_EIGEN3_STRICTNESS
+#elif defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API
+ #define EIGEN2_SUPPORT
+ #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API
+#elif defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS
+ #define EIGEN2_SUPPORT
+ #define EIGEN2_SUPPORT_STAGE STAGE20_RESOLVE_API_CONFLICTS
+#elif defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API
+ #define EIGEN2_SUPPORT
+ #define EIGEN2_SUPPORT_STAGE STAGE10_FULL_EIGEN2_API
+#elif defined EIGEN2_SUPPORT
+ // default to stage 3, that's what it's always meant
+ #define EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API
+ #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API
+#else
+ #define EIGEN2_SUPPORT_STAGE STAGE99_NO_EIGEN2_SUPPORT
+#endif
+
+#ifdef EIGEN2_SUPPORT
+#undef minor
+#endif
+
+// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to
+// ensure QNX/QCC support
+using std::size_t;
+// gcc 4.6.0 wants std:: for ptrdiff_t
+using std::ptrdiff_t;
+
+/** \defgroup Core_Module Core module
+ * This is the main module of Eigen providing dense matrix and vector support
+ * (both fixed and dynamic size) with all the features corresponding to a BLAS library
+ * and much more...
+ *
+ * \code
+ * #include <Eigen/Core>
+ * \endcode
+ */
+
+#include "src/Core/util/Constants.h"
+#include "src/Core/util/ForwardDeclarations.h"
+#include "src/Core/util/Meta.h"
+#include "src/Core/util/XprHelper.h"
+#include "src/Core/util/StaticAssert.h"
+#include "src/Core/util/Memory.h"
+
+#include "src/Core/NumTraits.h"
+#include "src/Core/MathFunctions.h"
+#include "src/Core/GenericPacketMath.h"
+
+#if defined EIGEN_VECTORIZE_SSE
+ #include "src/Core/arch/SSE/PacketMath.h"
+ #include "src/Core/arch/SSE/MathFunctions.h"
+ #include "src/Core/arch/SSE/Complex.h"
+#elif defined EIGEN_VECTORIZE_ALTIVEC
+ #include "src/Core/arch/AltiVec/PacketMath.h"
+ #include "src/Core/arch/AltiVec/Complex.h"
+#elif defined EIGEN_VECTORIZE_NEON
+ #include "src/Core/arch/NEON/PacketMath.h"
+ #include "src/Core/arch/NEON/Complex.h"
+#endif
+
+#include "src/Core/arch/Default/Settings.h"
+
+#include "src/Core/Functors.h"
+#include "src/Core/DenseCoeffsBase.h"
+#include "src/Core/DenseBase.h"
+#include "src/Core/MatrixBase.h"
+#include "src/Core/EigenBase.h"
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874
+ // at least confirmed with Doxygen 1.5.5 and 1.5.6
+ #include "src/Core/Assign.h"
+#endif
+
+#include "src/Core/util/BlasUtil.h"
+#include "src/Core/DenseStorage.h"
+#include "src/Core/NestByValue.h"
+#include "src/Core/ForceAlignedAccess.h"
+#include "src/Core/ReturnByValue.h"
+#include "src/Core/NoAlias.h"
+#include "src/Core/PlainObjectBase.h"
+#include "src/Core/Matrix.h"
+#include "src/Core/Array.h"
+#include "src/Core/CwiseBinaryOp.h"
+#include "src/Core/CwiseUnaryOp.h"
+#include "src/Core/CwiseNullaryOp.h"
+#include "src/Core/CwiseUnaryView.h"
+#include "src/Core/SelfCwiseBinaryOp.h"
+#include "src/Core/Dot.h"
+#include "src/Core/StableNorm.h"
+#include "src/Core/MapBase.h"
+#include "src/Core/Stride.h"
+#include "src/Core/Map.h"
+#include "src/Core/Block.h"
+#include "src/Core/VectorBlock.h"
+#include "src/Core/Transpose.h"
+#include "src/Core/DiagonalMatrix.h"
+#include "src/Core/Diagonal.h"
+#include "src/Core/DiagonalProduct.h"
+#include "src/Core/PermutationMatrix.h"
+#include "src/Core/Transpositions.h"
+#include "src/Core/Redux.h"
+#include "src/Core/Visitor.h"
+#include "src/Core/Fuzzy.h"
+#include "src/Core/IO.h"
+#include "src/Core/Swap.h"
+#include "src/Core/CommaInitializer.h"
+#include "src/Core/Flagged.h"
+#include "src/Core/ProductBase.h"
+#include "src/Core/Product.h"
+#include "src/Core/TriangularMatrix.h"
+#include "src/Core/SelfAdjointView.h"
+#include "src/Core/SolveTriangular.h"
+#include "src/Core/products/Parallelizer.h"
+#include "src/Core/products/CoeffBasedProduct.h"
+#include "src/Core/products/GeneralBlockPanelKernel.h"
+#include "src/Core/products/GeneralMatrixVector.h"
+#include "src/Core/products/GeneralMatrixMatrix.h"
+#include "src/Core/products/GeneralMatrixMatrixTriangular.h"
+#include "src/Core/products/SelfadjointMatrixVector.h"
+#include "src/Core/products/SelfadjointMatrixMatrix.h"
+#include "src/Core/products/SelfadjointProduct.h"
+#include "src/Core/products/SelfadjointRank2Update.h"
+#include "src/Core/products/TriangularMatrixVector.h"
+#include "src/Core/products/TriangularMatrixMatrix.h"
+#include "src/Core/products/TriangularSolverMatrix.h"
+#include "src/Core/products/TriangularSolverVector.h"
+#include "src/Core/BandMatrix.h"
+
+#include "src/Core/BooleanRedux.h"
+#include "src/Core/Select.h"
+#include "src/Core/VectorwiseOp.h"
+#include "src/Core/Random.h"
+#include "src/Core/Replicate.h"
+#include "src/Core/Reverse.h"
+#include "src/Core/ArrayBase.h"
+#include "src/Core/ArrayWrapper.h"
+
+} // namespace Eigen
+
+#include "src/Core/GlobalFunctions.h"
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#ifdef EIGEN2_SUPPORT
+#include "Eigen2Support"
+#endif
+
+#endif // EIGEN_CORE_H
diff --git a/extern/Eigen2/Eigen/Dense b/extern/Eigen3/Eigen/Dense
index 9655edcd7aa..5768910bd88 100644
--- a/extern/Eigen2/Eigen/Dense
+++ b/extern/Eigen3/Eigen/Dense
@@ -1,8 +1,7 @@
#include "Core"
-#include "Array"
#include "LU"
#include "Cholesky"
#include "QR"
#include "SVD"
#include "Geometry"
-#include "LeastSquares"
+#include "Eigenvalues"
diff --git a/extern/Eigen3/Eigen/Eigen b/extern/Eigen3/Eigen/Eigen
new file mode 100644
index 00000000000..19b40ea4e7e
--- /dev/null
+++ b/extern/Eigen3/Eigen/Eigen
@@ -0,0 +1,2 @@
+#include "Dense"
+//#include "Sparse"
diff --git a/extern/Eigen3/Eigen/Eigen2Support b/extern/Eigen3/Eigen/Eigen2Support
new file mode 100644
index 00000000000..d96592a8de9
--- /dev/null
+++ b/extern/Eigen3/Eigen/Eigen2Support
@@ -0,0 +1,82 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN2SUPPORT_H
+#define EIGEN2SUPPORT_H
+
+#if (!defined(EIGEN2_SUPPORT)) || (!defined(EIGEN_CORE_H))
+#error Eigen2 support must be enabled by defining EIGEN2_SUPPORT before including any Eigen header
+#endif
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+namespace Eigen {
+
+/** \defgroup Eigen2Support_Module Eigen2 support module
+ * This module provides a couple of deprecated functions improving the compatibility with Eigen2.
+ *
+ * To use it, define EIGEN2_SUPPORT before including any Eigen header
+ * \code
+ * #define EIGEN2_SUPPORT
+ * \endcode
+ *
+ */
+
+#include "src/Eigen2Support/Macros.h"
+#include "src/Eigen2Support/Memory.h"
+#include "src/Eigen2Support/Meta.h"
+#include "src/Eigen2Support/Lazy.h"
+#include "src/Eigen2Support/Cwise.h"
+#include "src/Eigen2Support/CwiseOperators.h"
+#include "src/Eigen2Support/TriangularSolver.h"
+#include "src/Eigen2Support/Block.h"
+#include "src/Eigen2Support/VectorBlock.h"
+#include "src/Eigen2Support/Minor.h"
+#include "src/Eigen2Support/MathFunctions.h"
+
+
+} // namespace Eigen
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+// Eigen2 used to include iostream
+#include<iostream>
+
+#define USING_PART_OF_NAMESPACE_EIGEN \
+EIGEN_USING_MATRIX_TYPEDEFS \
+using Eigen::Matrix; \
+using Eigen::MatrixBase; \
+using Eigen::ei_random; \
+using Eigen::ei_real; \
+using Eigen::ei_imag; \
+using Eigen::ei_conj; \
+using Eigen::ei_abs; \
+using Eigen::ei_abs2; \
+using Eigen::ei_sqrt; \
+using Eigen::ei_exp; \
+using Eigen::ei_log; \
+using Eigen::ei_sin; \
+using Eigen::ei_cos;
+
+#endif // EIGEN2SUPPORT_H
diff --git a/extern/Eigen3/Eigen/Eigenvalues b/extern/Eigen3/Eigen/Eigenvalues
new file mode 100644
index 00000000000..250c0f46652
--- /dev/null
+++ b/extern/Eigen3/Eigen/Eigenvalues
@@ -0,0 +1,44 @@
+#ifndef EIGEN_EIGENVALUES_MODULE_H
+#define EIGEN_EIGENVALUES_MODULE_H
+
+#include "Core"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include "Cholesky"
+#include "Jacobi"
+#include "Householder"
+#include "LU"
+
+namespace Eigen {
+
+/** \defgroup Eigenvalues_Module Eigenvalues module
+ *
+ *
+ *
+ * This module mainly provides various eigenvalue solvers.
+ * This module also provides some MatrixBase methods, including:
+ * - MatrixBase::eigenvalues(),
+ * - MatrixBase::operatorNorm()
+ *
+ * \code
+ * #include <Eigen/Eigenvalues>
+ * \endcode
+ */
+
+#include "src/Eigenvalues/Tridiagonalization.h"
+#include "src/Eigenvalues/RealSchur.h"
+#include "src/Eigenvalues/EigenSolver.h"
+#include "src/Eigenvalues/SelfAdjointEigenSolver.h"
+#include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h"
+#include "src/Eigenvalues/HessenbergDecomposition.h"
+#include "src/Eigenvalues/ComplexSchur.h"
+#include "src/Eigenvalues/ComplexEigenSolver.h"
+#include "src/Eigenvalues/MatrixBaseEigenvalues.h"
+
+} // namespace Eigen
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_EIGENVALUES_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/extern/Eigen3/Eigen/Geometry b/extern/Eigen3/Eigen/Geometry
new file mode 100644
index 00000000000..78277c0c560
--- /dev/null
+++ b/extern/Eigen3/Eigen/Geometry
@@ -0,0 +1,67 @@
+#ifndef EIGEN_GEOMETRY_MODULE_H
+#define EIGEN_GEOMETRY_MODULE_H
+
+#include "Core"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include "SVD"
+#include "LU"
+#include <limits>
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+
+namespace Eigen {
+
+/** \defgroup Geometry_Module Geometry module
+ *
+ *
+ *
+ * This module provides support for:
+ * - fixed-size homogeneous transformations
+ * - translation, scaling, 2D and 3D rotations
+ * - quaternions
+ * - \ref MatrixBase::cross() "cross product"
+ * - \ref MatrixBase::unitOrthogonal() "orthognal vector generation"
+ * - some linear components: parametrized-lines and hyperplanes
+ *
+ * \code
+ * #include <Eigen/Geometry>
+ * \endcode
+ */
+
+#include "src/Geometry/OrthoMethods.h"
+#include "src/Geometry/EulerAngles.h"
+
+#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+ #include "src/Geometry/Homogeneous.h"
+ #include "src/Geometry/RotationBase.h"
+ #include "src/Geometry/Rotation2D.h"
+ #include "src/Geometry/Quaternion.h"
+ #include "src/Geometry/AngleAxis.h"
+ #include "src/Geometry/Transform.h"
+ #include "src/Geometry/Translation.h"
+ #include "src/Geometry/Scaling.h"
+ #include "src/Geometry/Hyperplane.h"
+ #include "src/Geometry/ParametrizedLine.h"
+ #include "src/Geometry/AlignedBox.h"
+ #include "src/Geometry/Umeyama.h"
+
+ #if defined EIGEN_VECTORIZE_SSE
+ #include "src/Geometry/arch/Geometry_SSE.h"
+ #endif
+#endif
+
+#ifdef EIGEN2_SUPPORT
+#include "src/Eigen2Support/Geometry/All.h"
+#endif
+
+} // namespace Eigen
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_GEOMETRY_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
+
diff --git a/extern/Eigen3/Eigen/Householder b/extern/Eigen3/Eigen/Householder
new file mode 100644
index 00000000000..6b86cf65c55
--- /dev/null
+++ b/extern/Eigen3/Eigen/Householder
@@ -0,0 +1,27 @@
+#ifndef EIGEN_HOUSEHOLDER_MODULE_H
+#define EIGEN_HOUSEHOLDER_MODULE_H
+
+#include "Core"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+namespace Eigen {
+
+/** \defgroup Householder_Module Householder module
+ * This module provides Householder transformations.
+ *
+ * \code
+ * #include <Eigen/Householder>
+ * \endcode
+ */
+
+#include "src/Householder/Householder.h"
+#include "src/Householder/HouseholderSequence.h"
+#include "src/Householder/BlockHouseholder.h"
+
+} // namespace Eigen
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_HOUSEHOLDER_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/extern/Eigen3/Eigen/Jacobi b/extern/Eigen3/Eigen/Jacobi
new file mode 100644
index 00000000000..afa67681379
--- /dev/null
+++ b/extern/Eigen3/Eigen/Jacobi
@@ -0,0 +1,30 @@
+#ifndef EIGEN_JACOBI_MODULE_H
+#define EIGEN_JACOBI_MODULE_H
+
+#include "Core"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+namespace Eigen {
+
+/** \defgroup Jacobi_Module Jacobi module
+ * This module provides Jacobi and Givens rotations.
+ *
+ * \code
+ * #include <Eigen/Jacobi>
+ * \endcode
+ *
+ * In addition to listed classes, it defines the two following MatrixBase methods to apply a Jacobi or Givens rotation:
+ * - MatrixBase::applyOnTheLeft()
+ * - MatrixBase::applyOnTheRight().
+ */
+
+#include "src/Jacobi/Jacobi.h"
+
+} // namespace Eigen
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_JACOBI_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
+
diff --git a/extern/Eigen2/Eigen/LU b/extern/Eigen3/Eigen/LU
index 0ce69456598..226f88ca38a 100644
--- a/extern/Eigen2/Eigen/LU
+++ b/extern/Eigen3/Eigen/LU
@@ -3,7 +3,7 @@
#include "Core"
-#include "src/Core/util/DisableMSVCWarnings.h"
+#include "src/Core/util/DisableStupidWarnings.h"
namespace Eigen {
@@ -18,12 +18,25 @@ namespace Eigen {
* \endcode
*/
-#include "src/LU/LU.h"
+#include "src/misc/Solve.h"
+#include "src/misc/Kernel.h"
+#include "src/misc/Image.h"
+#include "src/LU/FullPivLU.h"
+#include "src/LU/PartialPivLU.h"
#include "src/LU/Determinant.h"
#include "src/LU/Inverse.h"
+#if defined EIGEN_VECTORIZE_SSE
+ #include "src/LU/arch/Inverse_SSE.h"
+#endif
+
+#ifdef EIGEN2_SUPPORT
+ #include "src/Eigen2Support/LU.h"
+#endif
+
} // namespace Eigen
-#include "src/Core/util/EnableMSVCWarnings.h"
+#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_LU_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/extern/Eigen3/Eigen/LeastSquares b/extern/Eigen3/Eigen/LeastSquares
new file mode 100644
index 00000000000..93a6302dcd9
--- /dev/null
+++ b/extern/Eigen3/Eigen/LeastSquares
@@ -0,0 +1,36 @@
+#ifndef EIGEN_REGRESSION_MODULE_H
+#define EIGEN_REGRESSION_MODULE_H
+
+#ifndef EIGEN2_SUPPORT
+#error LeastSquares is only available in Eigen2 support mode (define EIGEN2_SUPPORT)
+#endif
+
+// exclude from normal eigen3-only documentation
+#ifdef EIGEN2_SUPPORT
+
+#include "Core"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include "Eigenvalues"
+#include "Geometry"
+
+namespace Eigen {
+
+/** \defgroup LeastSquares_Module LeastSquares module
+ * This module provides linear regression and related features.
+ *
+ * \code
+ * #include <Eigen/LeastSquares>
+ * \endcode
+ */
+
+#include "src/Eigen2Support/LeastSquares.h"
+
+} // namespace Eigen
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN2_SUPPORT
+
+#endif // EIGEN_REGRESSION_MODULE_H
diff --git a/extern/Eigen3/Eigen/QR b/extern/Eigen3/Eigen/QR
new file mode 100644
index 00000000000..97c1788ee30
--- /dev/null
+++ b/extern/Eigen3/Eigen/QR
@@ -0,0 +1,45 @@
+#ifndef EIGEN_QR_MODULE_H
+#define EIGEN_QR_MODULE_H
+
+#include "Core"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include "Cholesky"
+#include "Jacobi"
+#include "Householder"
+
+namespace Eigen {
+
+/** \defgroup QR_Module QR module
+ *
+ *
+ *
+ * This module provides various QR decompositions
+ * This module also provides some MatrixBase methods, including:
+ * - MatrixBase::qr(),
+ *
+ * \code
+ * #include <Eigen/QR>
+ * \endcode
+ */
+
+#include "src/misc/Solve.h"
+#include "src/QR/HouseholderQR.h"
+#include "src/QR/FullPivHouseholderQR.h"
+#include "src/QR/ColPivHouseholderQR.h"
+
+#ifdef EIGEN2_SUPPORT
+#include "src/Eigen2Support/QR.h"
+#endif
+
+} // namespace Eigen
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#ifdef EIGEN2_SUPPORT
+#include "Eigenvalues"
+#endif
+
+#endif // EIGEN_QR_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/extern/Eigen3/Eigen/QtAlignedMalloc b/extern/Eigen3/Eigen/QtAlignedMalloc
new file mode 100644
index 00000000000..46f7d83b70f
--- /dev/null
+++ b/extern/Eigen3/Eigen/QtAlignedMalloc
@@ -0,0 +1,34 @@
+
+#ifndef EIGEN_QTMALLOC_MODULE_H
+#define EIGEN_QTMALLOC_MODULE_H
+
+#include "Core"
+
+#if (!EIGEN_MALLOC_ALREADY_ALIGNED)
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+void *qMalloc(size_t size)
+{
+ return Eigen::internal::aligned_malloc(size);
+}
+
+void qFree(void *ptr)
+{
+ Eigen::internal::aligned_free(ptr);
+}
+
+void *qRealloc(void *ptr, size_t size)
+{
+ void* newPtr = Eigen::internal::aligned_malloc(size);
+ memcpy(newPtr, ptr, size);
+ Eigen::internal::aligned_free(ptr);
+ return newPtr;
+}
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif
+
+#endif // EIGEN_QTMALLOC_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/extern/Eigen2/Eigen/SVD b/extern/Eigen3/Eigen/SVD
index eef05564bde..d24471fd724 100644
--- a/extern/Eigen2/Eigen/SVD
+++ b/extern/Eigen3/Eigen/SVD
@@ -1,15 +1,17 @@
#ifndef EIGEN_SVD_MODULE_H
#define EIGEN_SVD_MODULE_H
-#include "Core"
+#include "QR"
+#include "Householder"
+#include "Jacobi"
-#include "src/Core/util/DisableMSVCWarnings.h"
+#include "src/Core/util/DisableStupidWarnings.h"
namespace Eigen {
/** \defgroup SVD_Module SVD module
*
- * \nonstableyet
+ *
*
* This module provides SVD decomposition for (currently) real matrices.
* This decomposition is accessible via the following MatrixBase method:
@@ -20,10 +22,17 @@ namespace Eigen {
* \endcode
*/
-#include "src/SVD/SVD.h"
+#include "src/misc/Solve.h"
+#include "src/SVD/JacobiSVD.h"
+#include "src/SVD/UpperBidiagonalization.h"
+
+#ifdef EIGEN2_SUPPORT
+#include "src/Eigen2Support/SVD.h"
+#endif
} // namespace Eigen
-#include "src/Core/util/EnableMSVCWarnings.h"
+#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_SVD_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/extern/Eigen3/Eigen/Sparse b/extern/Eigen3/Eigen/Sparse
new file mode 100644
index 00000000000..7425b3a412a
--- /dev/null
+++ b/extern/Eigen3/Eigen/Sparse
@@ -0,0 +1,69 @@
+#ifndef EIGEN_SPARSE_MODULE_H
+#define EIGEN_SPARSE_MODULE_H
+
+#include "Core"
+
+#include "src/Core/util/DisableStupidWarnings.h"
+
+#include <vector>
+#include <map>
+#include <cstdlib>
+#include <cstring>
+#include <algorithm>
+
+#ifdef EIGEN2_SUPPORT
+#define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET
+#endif
+
+#ifndef EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET
+#error The sparse module API is not stable yet. To use it anyway, please define the EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET preprocessor token.
+#endif
+
+namespace Eigen {
+
+/** \defgroup Sparse_Module Sparse module
+ *
+ *
+ *
+ * See the \ref TutorialSparse "Sparse tutorial"
+ *
+ * \code
+ * #include <Eigen/Sparse>
+ * \endcode
+ */
+
+/** The type used to identify a general sparse storage. */
+struct Sparse {};
+
+#include "src/Sparse/SparseUtil.h"
+#include "src/Sparse/SparseMatrixBase.h"
+#include "src/Sparse/CompressedStorage.h"
+#include "src/Sparse/AmbiVector.h"
+#include "src/Sparse/SparseMatrix.h"
+#include "src/Sparse/DynamicSparseMatrix.h"
+#include "src/Sparse/MappedSparseMatrix.h"
+#include "src/Sparse/SparseVector.h"
+#include "src/Sparse/CoreIterators.h"
+#include "src/Sparse/SparseBlock.h"
+#include "src/Sparse/SparseTranspose.h"
+#include "src/Sparse/SparseCwiseUnaryOp.h"
+#include "src/Sparse/SparseCwiseBinaryOp.h"
+#include "src/Sparse/SparseDot.h"
+#include "src/Sparse/SparseAssign.h"
+#include "src/Sparse/SparseRedux.h"
+#include "src/Sparse/SparseFuzzy.h"
+#include "src/Sparse/SparseProduct.h"
+#include "src/Sparse/SparseSparseProduct.h"
+#include "src/Sparse/SparseDenseProduct.h"
+#include "src/Sparse/SparseDiagonalProduct.h"
+#include "src/Sparse/SparseTriangularView.h"
+#include "src/Sparse/SparseSelfAdjointView.h"
+#include "src/Sparse/TriangularSolver.h"
+#include "src/Sparse/SparseView.h"
+
+} // namespace Eigen
+
+#include "src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SPARSE_MODULE_H
+
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseRedux.h b/extern/Eigen3/Eigen/StdDeque
index f0d3705488e..a4f96232d8c 100644
--- a/extern/Eigen2/Eigen/src/Sparse/SparseRedux.h
+++ b/extern/Eigen3/Eigen/StdDeque
@@ -1,7 +1,8 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -22,19 +23,20 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_SPARSEREDUX_H
-#define EIGEN_SPARSEREDUX_H
-
-template<typename Derived>
-typename ei_traits<Derived>::Scalar
-SparseMatrixBase<Derived>::sum() const
-{
- ei_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
- Scalar res = 0;
- for (int j=0; j<outerSize(); ++j)
- for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
- res += iter.value();
- return res;
-}
-
-#endif // EIGEN_SPARSEREDUX_H
+#ifndef EIGEN_STDDEQUE_MODULE_H
+#define EIGEN_STDDEQUE_MODULE_H
+
+#include "Core"
+#include <deque>
+
+#if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */
+
+#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...)
+
+#else
+
+#include "src/StlSupport/StdDeque.h"
+
+#endif
+
+#endif // EIGEN_STDDEQUE_MODULE_H
diff --git a/extern/Eigen2/Eigen/src/QR/QrInstantiations.cpp b/extern/Eigen3/Eigen/StdList
index dacb05d3d1f..d914ded4f93 100644
--- a/extern/Eigen2/Eigen/src/QR/QrInstantiations.cpp
+++ b/extern/Eigen3/Eigen/StdList
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -22,22 +22,20 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_EXTERN_INSTANTIATIONS
-#define EIGEN_EXTERN_INSTANTIATIONS
-#endif
-#include "../../Core"
-#undef EIGEN_EXTERN_INSTANTIATIONS
+#ifndef EIGEN_STDLIST_MODULE_H
+#define EIGEN_STDLIST_MODULE_H
+
+#include "Core"
+#include <list>
-#include "../../QR"
+#if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */
-namespace Eigen
-{
+#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...)
-template static void ei_tridiagonal_qr_step(float* , float* , int, int, float* , int);
-template static void ei_tridiagonal_qr_step(double* , double* , int, int, double* , int);
-template static void ei_tridiagonal_qr_step(float* , float* , int, int, std::complex<float>* , int);
-template static void ei_tridiagonal_qr_step(double* , double* , int, int, std::complex<double>* , int);
+#else
-EIGEN_QR_MODULE_INSTANTIATE();
+#include "src/StlSupport/StdList.h"
+
+#endif
-}
+#endif // EIGEN_STDLIST_MODULE_H
diff --git a/extern/Eigen2/Eigen/src/Core/CoreInstantiations.cpp b/extern/Eigen3/Eigen/StdVector
index 56a9448917a..3d8995e5aae 100644
--- a/extern/Eigen2/Eigen/src/Core/CoreInstantiations.cpp
+++ b/extern/Eigen3/Eigen/StdVector
@@ -1,7 +1,8 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -22,26 +23,20 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifdef EIGEN_EXTERN_INSTANTIATIONS
-#undef EIGEN_EXTERN_INSTANTIATIONS
-#endif
+#ifndef EIGEN_STDVECTOR_MODULE_H
+#define EIGEN_STDVECTOR_MODULE_H
+
+#include "Core"
+#include <vector>
-#include "../../Core"
+#if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */
-namespace Eigen
-{
+#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...)
-#define EIGEN_INSTANTIATE_PRODUCT(TYPE) \
-template static void ei_cache_friendly_product<TYPE>( \
- int _rows, int _cols, int depth, \
- bool _lhsRowMajor, const TYPE* _lhs, int _lhsStride, \
- bool _rhsRowMajor, const TYPE* _rhs, int _rhsStride, \
- bool resRowMajor, TYPE* res, int resStride)
+#else
-EIGEN_INSTANTIATE_PRODUCT(float);
-EIGEN_INSTANTIATE_PRODUCT(double);
-EIGEN_INSTANTIATE_PRODUCT(int);
-EIGEN_INSTANTIATE_PRODUCT(std::complex<float>);
-EIGEN_INSTANTIATE_PRODUCT(std::complex<double>);
+#include "src/StlSupport/StdVector.h"
+
+#endif
-}
+#endif // EIGEN_STDVECTOR_MODULE_H
diff --git a/extern/Eigen3/Eigen/src/Cholesky/LDLT.h b/extern/Eigen3/Eigen/src/Cholesky/LDLT.h
new file mode 100644
index 00000000000..f47b2ea5669
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Cholesky/LDLT.h
@@ -0,0 +1,484 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Keir Mierle <mierle@gmail.com>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_LDLT_H
+#define EIGEN_LDLT_H
+
+namespace internal {
+template<typename MatrixType, int UpLo> struct LDLT_Traits;
+}
+
+/** \ingroup cholesky_Module
+ *
+ * \class LDLT
+ *
+ * \brief Robust Cholesky decomposition of a matrix with pivoting
+ *
+ * \param MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition
+ *
+ * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite
+ * matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L
+ * is lower triangular with a unit diagonal and D is a diagonal matrix.
+ *
+ * The decomposition uses pivoting to ensure stability, so that L will have
+ * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root
+ * on D also stabilizes the computation.
+ *
+ * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky
+ * decomposition to determine whether a system of equations has a solution.
+ *
+ * \sa MatrixBase::ldlt(), class LLT
+ */
+ /* THIS PART OF THE DOX IS CURRENTLY DISABLED BECAUSE INACCURATE BECAUSE OF BUG IN THE DECOMPOSITION CODE
+ * Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
+ * the strict lower part does not have to store correct values.
+ */
+template<typename _MatrixType, int _UpLo> class LDLT
+{
+ public:
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options & ~RowMajorBit, // these are the options for the TmpMatrixType, we need a ColMajor matrix here!
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ UpLo = _UpLo
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType;
+
+ typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
+ typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
+
+ typedef internal::LDLT_Traits<MatrixType,UpLo> Traits;
+
+ /** \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via LDLT::compute(const MatrixType&).
+ */
+ LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {}
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem \a size.
+ * \sa LDLT()
+ */
+ LDLT(Index size)
+ : m_matrix(size, size),
+ m_transpositions(size),
+ m_temporary(size),
+ m_isInitialized(false)
+ {}
+
+ LDLT(const MatrixType& matrix)
+ : m_matrix(matrix.rows(), matrix.cols()),
+ m_transpositions(matrix.rows()),
+ m_temporary(matrix.rows()),
+ m_isInitialized(false)
+ {
+ compute(matrix);
+ }
+
+ /** \returns a view of the upper triangular matrix U */
+ inline typename Traits::MatrixU matrixU() const
+ {
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ return Traits::getU(m_matrix);
+ }
+
+ /** \returns a view of the lower triangular matrix L */
+ inline typename Traits::MatrixL matrixL() const
+ {
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ return Traits::getL(m_matrix);
+ }
+
+ /** \returns the permutation matrix P as a transposition sequence.
+ */
+ inline const TranspositionType& transpositionsP() const
+ {
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ return m_transpositions;
+ }
+
+ /** \returns the coefficients of the diagonal matrix D */
+ inline Diagonal<const MatrixType> vectorD(void) const
+ {
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ return m_matrix.diagonal();
+ }
+
+ /** \returns true if the matrix is positive (semidefinite) */
+ inline bool isPositive(void) const
+ {
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ return m_sign == 1;
+ }
+
+ #ifdef EIGEN2_SUPPORT
+ inline bool isPositiveDefinite() const
+ {
+ return isPositive();
+ }
+ #endif
+
+ /** \returns true if the matrix is negative (semidefinite) */
+ inline bool isNegative(void) const
+ {
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ return m_sign == -1;
+ }
+
+ /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> .
+ *
+ * \note_about_checking_solutions
+ *
+ * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$
+ * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$,
+ * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
+ * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
+ * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
+ * computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular.
+ *
+ * \sa MatrixBase::ldlt()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<LDLT, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ eigen_assert(m_matrix.rows()==b.rows()
+ && "LDLT::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<LDLT, Rhs>(*this, b.derived());
+ }
+
+ #ifdef EIGEN2_SUPPORT
+ template<typename OtherDerived, typename ResultType>
+ bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
+ {
+ *result = this->solve(b);
+ return true;
+ }
+ #endif
+
+ template<typename Derived>
+ bool solveInPlace(MatrixBase<Derived> &bAndX) const;
+
+ LDLT& compute(const MatrixType& matrix);
+
+ /** \returns the internal LDLT decomposition matrix
+ *
+ * TODO: document the storage layout
+ */
+ inline const MatrixType& matrixLDLT() const
+ {
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ return m_matrix;
+ }
+
+ MatrixType reconstructedMatrix() const;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ protected:
+
+ /** \internal
+ * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
+ * The strict upper part is used during the decomposition, the strict lower
+ * part correspond to the coefficients of L (its diagonal is equal to 1 and
+ * is not stored), and the diagonal entries correspond to D.
+ */
+ MatrixType m_matrix;
+ TranspositionType m_transpositions;
+ TmpMatrixType m_temporary;
+ int m_sign;
+ bool m_isInitialized;
+};
+
+namespace internal {
+
+template<int UpLo> struct ldlt_inplace;
+
+template<> struct ldlt_inplace<Lower>
+{
+ template<typename MatrixType, typename TranspositionType, typename Workspace>
+ static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
+ {
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ eigen_assert(mat.rows()==mat.cols());
+ const Index size = mat.rows();
+
+ if (size <= 1)
+ {
+ transpositions.setIdentity();
+ if(sign)
+ *sign = real(mat.coeff(0,0))>0 ? 1:-1;
+ return true;
+ }
+
+ RealScalar cutoff = 0, biggest_in_corner;
+
+ for (Index k = 0; k < size; ++k)
+ {
+ // Find largest diagonal element
+ Index index_of_biggest_in_corner;
+ biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
+ index_of_biggest_in_corner += k;
+
+ if(k == 0)
+ {
+ // The biggest overall is the point of reference to which further diagonals
+ // are compared; if any diagonal is negligible compared
+ // to the largest overall, the algorithm bails.
+ cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
+
+ if(sign)
+ *sign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1;
+ }
+
+ // Finish early if the matrix is not full rank.
+ if(biggest_in_corner < cutoff)
+ {
+ for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
+ break;
+ }
+
+ transpositions.coeffRef(k) = index_of_biggest_in_corner;
+ if(k != index_of_biggest_in_corner)
+ {
+ // apply the transposition while taking care to consider only
+ // the lower triangular part
+ Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element
+ mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k));
+ mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s));
+ std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner));
+ for(int i=k+1;i<index_of_biggest_in_corner;++i)
+ {
+ Scalar tmp = mat.coeffRef(i,k);
+ mat.coeffRef(i,k) = conj(mat.coeffRef(index_of_biggest_in_corner,i));
+ mat.coeffRef(index_of_biggest_in_corner,i) = conj(tmp);
+ }
+ if(NumTraits<Scalar>::IsComplex)
+ mat.coeffRef(index_of_biggest_in_corner,k) = conj(mat.coeff(index_of_biggest_in_corner,k));
+ }
+
+ // partition the matrix:
+ // A00 | - | -
+ // lu = A10 | A11 | -
+ // A20 | A21 | A22
+ Index rs = size - k - 1;
+ Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);
+ Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
+ Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
+
+ if(k>0)
+ {
+ temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint();
+ mat.coeffRef(k,k) -= (A10 * temp.head(k)).value();
+ if(rs>0)
+ A21.noalias() -= A20 * temp.head(k);
+ }
+ if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
+ A21 /= mat.coeffRef(k,k);
+ }
+
+ return true;
+ }
+};
+
+template<> struct ldlt_inplace<Upper>
+{
+ template<typename MatrixType, typename TranspositionType, typename Workspace>
+ static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
+ {
+ Transpose<MatrixType> matt(mat);
+ return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
+ }
+};
+
+template<typename MatrixType> struct LDLT_Traits<MatrixType,Lower>
+{
+ typedef TriangularView<MatrixType, UnitLower> MatrixL;
+ typedef TriangularView<typename MatrixType::AdjointReturnType, UnitUpper> MatrixU;
+ inline static MatrixL getL(const MatrixType& m) { return m; }
+ inline static MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+};
+
+template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
+{
+ typedef TriangularView<typename MatrixType::AdjointReturnType, UnitLower> MatrixL;
+ typedef TriangularView<MatrixType, UnitUpper> MatrixU;
+ inline static MatrixL getL(const MatrixType& m) { return m.adjoint(); }
+ inline static MatrixU getU(const MatrixType& m) { return m; }
+};
+
+} // end namespace internal
+
+/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix
+ */
+template<typename MatrixType, int _UpLo>
+LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
+{
+ eigen_assert(a.rows()==a.cols());
+ const Index size = a.rows();
+
+ m_matrix = a;
+
+ m_transpositions.resize(size);
+ m_isInitialized = false;
+ m_temporary.resize(size);
+
+ internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign);
+
+ m_isInitialized = true;
+ return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int _UpLo, typename Rhs>
+struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
+ : solve_retval_base<LDLT<_MatrixType,_UpLo>, Rhs>
+{
+ typedef LDLT<_MatrixType,_UpLo> LDLTType;
+ EIGEN_MAKE_SOLVE_HELPERS(LDLTType,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ eigen_assert(rhs().rows() == dec().matrixLDLT().rows());
+ // dst = P b
+ dst = dec().transpositionsP() * rhs();
+
+ // dst = L^-1 (P b)
+ dec().matrixL().solveInPlace(dst);
+
+ // dst = D^-1 (L^-1 P b)
+ // more precisely, use pseudo-inverse of D (see bug 241)
+ using std::abs;
+ using std::max;
+ typedef typename LDLTType::MatrixType MatrixType;
+ typedef typename LDLTType::Scalar Scalar;
+ typedef typename LDLTType::RealScalar RealScalar;
+ const Diagonal<const MatrixType> vectorD = dec().vectorD();
+ RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits<Scalar>::epsilon(),
+ RealScalar(1) / NumTraits<RealScalar>::highest()); // motivated by LAPACK's xGELSS
+ for (Index i = 0; i < vectorD.size(); ++i) {
+ if(abs(vectorD(i)) > tolerance)
+ dst.row(i) /= vectorD(i);
+ else
+ dst.row(i).setZero();
+ }
+
+ // dst = L^-T (D^-1 L^-1 P b)
+ dec().matrixU().solveInPlace(dst);
+
+ // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b
+ dst = dec().transpositionsP().transpose() * dst;
+ }
+};
+}
+
+/** \internal use x = ldlt_object.solve(x);
+ *
+ * This is the \em in-place version of solve().
+ *
+ * \param bAndX represents both the right-hand side matrix b and result x.
+ *
+ * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD.
+ *
+ * This version avoids a copy when the right hand side matrix b is not
+ * needed anymore.
+ *
+ * \sa LDLT::solve(), MatrixBase::ldlt()
+ */
+template<typename MatrixType,int _UpLo>
+template<typename Derived>
+bool LDLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
+{
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ const Index size = m_matrix.rows();
+ eigen_assert(size == bAndX.rows());
+
+ bAndX = this->solve(bAndX);
+
+ return true;
+}
+
+/** \returns the matrix represented by the decomposition,
+ * i.e., it returns the product: P^T L D L^* P.
+ * This function is provided for debug purpose. */
+template<typename MatrixType, int _UpLo>
+MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
+{
+ eigen_assert(m_isInitialized && "LDLT is not initialized.");
+ const Index size = m_matrix.rows();
+ MatrixType res(size,size);
+
+ // P
+ res.setIdentity();
+ res = transpositionsP() * res;
+ // L^* P
+ res = matrixU() * res;
+ // D(L^*P)
+ res = vectorD().asDiagonal() * res;
+ // L(DL^*P)
+ res = matrixL() * res;
+ // P^T (LDL^*P)
+ res = transpositionsP().transpose() * res;
+
+ return res;
+}
+
+/** \cholesky_module
+ * \returns the Cholesky decomposition with full pivoting without square root of \c *this
+ */
+template<typename MatrixType, unsigned int UpLo>
+inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
+SelfAdjointView<MatrixType, UpLo>::ldlt() const
+{
+ return LDLT<PlainObject,UpLo>(m_matrix);
+}
+
+/** \cholesky_module
+ * \returns the Cholesky decomposition with full pivoting without square root of \c *this
+ */
+template<typename Derived>
+inline const LDLT<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::ldlt() const
+{
+ return LDLT<PlainObject>(derived());
+}
+
+#endif // EIGEN_LDLT_H
diff --git a/extern/Eigen3/Eigen/src/Cholesky/LLT.h b/extern/Eigen3/Eigen/src/Cholesky/LLT.h
new file mode 100644
index 00000000000..a4ee5b11cb9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Cholesky/LLT.h
@@ -0,0 +1,386 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_LLT_H
+#define EIGEN_LLT_H
+
+namespace internal{
+template<typename MatrixType, int UpLo> struct LLT_Traits;
+}
+
+/** \ingroup cholesky_Module
+ *
+ * \class LLT
+ *
+ * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features
+ *
+ * \param MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition
+ *
+ * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite
+ * matrix A such that A = LL^* = U^*U, where L is lower triangular.
+ *
+ * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b,
+ * for that purpose, we recommend the Cholesky decomposition without square root which is more stable
+ * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other
+ * situations like generalised eigen problems with hermitian matrices.
+ *
+ * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices,
+ * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations
+ * has a solution.
+ *
+ * \sa MatrixBase::llt(), class LDLT
+ */
+ /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH)
+ * Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
+ * the strict lower part does not have to store correct values.
+ */
+template<typename _MatrixType, int _UpLo> class LLT
+{
+ public:
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ enum {
+ PacketSize = internal::packet_traits<Scalar>::size,
+ AlignmentMask = int(PacketSize)-1,
+ UpLo = _UpLo
+ };
+
+ typedef internal::LLT_Traits<MatrixType,UpLo> Traits;
+
+ /**
+ * \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via LLT::compute(const MatrixType&).
+ */
+ LLT() : m_matrix(), m_isInitialized(false) {}
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem \a size.
+ * \sa LLT()
+ */
+ LLT(Index size) : m_matrix(size, size),
+ m_isInitialized(false) {}
+
+ LLT(const MatrixType& matrix)
+ : m_matrix(matrix.rows(), matrix.cols()),
+ m_isInitialized(false)
+ {
+ compute(matrix);
+ }
+
+ /** \returns a view of the upper triangular matrix U */
+ inline typename Traits::MatrixU matrixU() const
+ {
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ return Traits::getU(m_matrix);
+ }
+
+ /** \returns a view of the lower triangular matrix L */
+ inline typename Traits::MatrixL matrixL() const
+ {
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ return Traits::getL(m_matrix);
+ }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+ *
+ * Since this LLT class assumes anyway that the matrix A is invertible, the solution
+ * theoretically exists and is unique regardless of b.
+ *
+ * Example: \include LLT_solve.cpp
+ * Output: \verbinclude LLT_solve.out
+ *
+ * \sa solveInPlace(), MatrixBase::llt()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<LLT, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ eigen_assert(m_matrix.rows()==b.rows()
+ && "LLT::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<LLT, Rhs>(*this, b.derived());
+ }
+
+ #ifdef EIGEN2_SUPPORT
+ template<typename OtherDerived, typename ResultType>
+ bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
+ {
+ *result = this->solve(b);
+ return true;
+ }
+
+ bool isPositiveDefinite() const { return true; }
+ #endif
+
+ template<typename Derived>
+ void solveInPlace(MatrixBase<Derived> &bAndX) const;
+
+ LLT& compute(const MatrixType& matrix);
+
+ /** \returns the LLT decomposition matrix
+ *
+ * TODO: document the storage layout
+ */
+ inline const MatrixType& matrixLLT() const
+ {
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ return m_matrix;
+ }
+
+ MatrixType reconstructedMatrix() const;
+
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix.appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ return m_info;
+ }
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ protected:
+ /** \internal
+ * Used to compute and store L
+ * The strict upper part is not used and even not initialized.
+ */
+ MatrixType m_matrix;
+ bool m_isInitialized;
+ ComputationInfo m_info;
+};
+
+namespace internal {
+
+template<int UpLo> struct llt_inplace;
+
+template<> struct llt_inplace<Lower>
+{
+ template<typename MatrixType>
+ static typename MatrixType::Index unblocked(MatrixType& mat)
+ {
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+
+ eigen_assert(mat.rows()==mat.cols());
+ const Index size = mat.rows();
+ for(Index k = 0; k < size; ++k)
+ {
+ Index rs = size-k-1; // remaining size
+
+ Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);
+ Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
+ Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
+
+ RealScalar x = real(mat.coeff(k,k));
+ if (k>0) x -= A10.squaredNorm();
+ if (x<=RealScalar(0))
+ return k;
+ mat.coeffRef(k,k) = x = sqrt(x);
+ if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint();
+ if (rs>0) A21 *= RealScalar(1)/x;
+ }
+ return -1;
+ }
+
+ template<typename MatrixType>
+ static typename MatrixType::Index blocked(MatrixType& m)
+ {
+ typedef typename MatrixType::Index Index;
+ eigen_assert(m.rows()==m.cols());
+ Index size = m.rows();
+ if(size<32)
+ return unblocked(m);
+
+ Index blockSize = size/8;
+ blockSize = (blockSize/16)*16;
+ blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128));
+
+ for (Index k=0; k<size; k+=blockSize)
+ {
+ // partition the matrix:
+ // A00 | - | -
+ // lu = A10 | A11 | -
+ // A20 | A21 | A22
+ Index bs = (std::min)(blockSize, size-k);
+ Index rs = size - k - bs;
+ Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs);
+ Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs);
+ Block<MatrixType,Dynamic,Dynamic> A22(m,k+bs,k+bs,rs,rs);
+
+ Index ret;
+ if((ret=unblocked(A11))>=0) return k+ret;
+ if(rs>0) A11.adjoint().template triangularView<Upper>().template solveInPlace<OnTheRight>(A21);
+ if(rs>0) A22.template selfadjointView<Lower>().rankUpdate(A21,-1); // bottleneck
+ }
+ return -1;
+ }
+};
+
+template<> struct llt_inplace<Upper>
+{
+ template<typename MatrixType>
+ static EIGEN_STRONG_INLINE typename MatrixType::Index unblocked(MatrixType& mat)
+ {
+ Transpose<MatrixType> matt(mat);
+ return llt_inplace<Lower>::unblocked(matt);
+ }
+ template<typename MatrixType>
+ static EIGEN_STRONG_INLINE typename MatrixType::Index blocked(MatrixType& mat)
+ {
+ Transpose<MatrixType> matt(mat);
+ return llt_inplace<Lower>::blocked(matt);
+ }
+};
+
+template<typename MatrixType> struct LLT_Traits<MatrixType,Lower>
+{
+ typedef TriangularView<MatrixType, Lower> MatrixL;
+ typedef TriangularView<typename MatrixType::AdjointReturnType, Upper> MatrixU;
+ inline static MatrixL getL(const MatrixType& m) { return m; }
+ inline static MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+ static bool inplace_decomposition(MatrixType& m)
+ { return llt_inplace<Lower>::blocked(m)==-1; }
+};
+
+template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
+{
+ typedef TriangularView<typename MatrixType::AdjointReturnType, Lower> MatrixL;
+ typedef TriangularView<MatrixType, Upper> MatrixU;
+ inline static MatrixL getL(const MatrixType& m) { return m.adjoint(); }
+ inline static MatrixU getU(const MatrixType& m) { return m; }
+ static bool inplace_decomposition(MatrixType& m)
+ { return llt_inplace<Upper>::blocked(m)==-1; }
+};
+
+} // end namespace internal
+
+/** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix
+ *
+ *
+ * \returns a reference to *this
+ */
+template<typename MatrixType, int _UpLo>
+LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
+{
+ assert(a.rows()==a.cols());
+ const Index size = a.rows();
+ m_matrix.resize(size, size);
+ m_matrix = a;
+
+ m_isInitialized = true;
+ bool ok = Traits::inplace_decomposition(m_matrix);
+ m_info = ok ? Success : NumericalIssue;
+
+ return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int UpLo, typename Rhs>
+struct solve_retval<LLT<_MatrixType, UpLo>, Rhs>
+ : solve_retval_base<LLT<_MatrixType, UpLo>, Rhs>
+{
+ typedef LLT<_MatrixType,UpLo> LLTType;
+ EIGEN_MAKE_SOLVE_HELPERS(LLTType,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dst = rhs();
+ dec().solveInPlace(dst);
+ }
+};
+}
+
+/** \internal use x = llt_object.solve(x);
+ *
+ * This is the \em in-place version of solve().
+ *
+ * \param bAndX represents both the right-hand side matrix b and result x.
+ *
+ * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD.
+ *
+ * This version avoids a copy when the right hand side matrix b is not
+ * needed anymore.
+ *
+ * \sa LLT::solve(), MatrixBase::llt()
+ */
+template<typename MatrixType, int _UpLo>
+template<typename Derived>
+void LLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
+{
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ eigen_assert(m_matrix.rows()==bAndX.rows());
+ matrixL().solveInPlace(bAndX);
+ matrixU().solveInPlace(bAndX);
+}
+
+/** \returns the matrix represented by the decomposition,
+ * i.e., it returns the product: L L^*.
+ * This function is provided for debug purpose. */
+template<typename MatrixType, int _UpLo>
+MatrixType LLT<MatrixType,_UpLo>::reconstructedMatrix() const
+{
+ eigen_assert(m_isInitialized && "LLT is not initialized.");
+ return matrixL() * matrixL().adjoint().toDenseMatrix();
+}
+
+/** \cholesky_module
+ * \returns the LLT decomposition of \c *this
+ */
+template<typename Derived>
+inline const LLT<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::llt() const
+{
+ return LLT<PlainObject>(derived());
+}
+
+/** \cholesky_module
+ * \returns the LLT decomposition of \c *this
+ */
+template<typename MatrixType, unsigned int UpLo>
+inline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
+SelfAdjointView<MatrixType, UpLo>::llt() const
+{
+ return LLT<PlainObject,UpLo>(m_matrix);
+}
+
+#endif // EIGEN_LLT_H
diff --git a/extern/Eigen3/Eigen/src/Core/Array.h b/extern/Eigen3/Eigen/src/Core/Array.h
new file mode 100644
index 00000000000..a3a2167ad3e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Array.h
@@ -0,0 +1,322 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ARRAY_H
+#define EIGEN_ARRAY_H
+
+/** \class Array
+ * \ingroup Core_Module
+ *
+ * \brief General-purpose arrays with easy API for coefficient-wise operations
+ *
+ * The %Array class is very similar to the Matrix class. It provides
+ * general-purpose one- and two-dimensional arrays. The difference between the
+ * %Array and the %Matrix class is primarily in the API: the API for the
+ * %Array class provides easy access to coefficient-wise operations, while the
+ * API for the %Matrix class provides easy access to linear-algebra
+ * operations.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN.
+ *
+ * \sa \ref TutorialArrayClass, \ref TopicClassHierarchy
+ */
+namespace internal {
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct traits<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > : traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+{
+ typedef ArrayXpr XprKind;
+ typedef ArrayBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > XprBase;
+};
+}
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+class Array
+ : public PlainObjectBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+{
+ public:
+
+ typedef PlainObjectBase<Array> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Array)
+
+ enum { Options = _Options };
+ typedef typename Base::PlainObject PlainObject;
+
+ protected:
+ template <typename Derived, typename OtherDerived, bool IsVector>
+ friend struct internal::conservative_resize_like_impl;
+
+ using Base::m_storage;
+ public:
+ enum { NeedsToAlign = (!(Options&DontAlign))
+ && SizeAtCompileTime!=Dynamic && ((static_cast<int>(sizeof(Scalar))*SizeAtCompileTime)%16)==0 };
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+
+ using Base::base;
+ using Base::coeff;
+ using Base::coeffRef;
+
+ /**
+ * The usage of
+ * using Base::operator=;
+ * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped
+ * the usage of 'using'. This should be done only for operator=.
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Array& operator=(const EigenBase<OtherDerived> &other)
+ {
+ return Base::operator=(other);
+ }
+
+ /** Copies the value of the expression \a other into \c *this with automatic resizing.
+ *
+ * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
+ * it will be initialized.
+ *
+ * Note that copying a row-vector into a vector (and conversely) is allowed.
+ * The resizing, if any, is then done in the appropriate way so that row-vectors
+ * remain row-vectors and vectors remain vectors.
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Array& operator=(const ArrayBase<OtherDerived>& other)
+ {
+ return Base::_set(other);
+ }
+
+ /** This is a special case of the templated operator=. Its purpose is to
+ * prevent a default operator= from hiding the templated operator=.
+ */
+ EIGEN_STRONG_INLINE Array& operator=(const Array& other)
+ {
+ return Base::_set(other);
+ }
+
+ /** Default constructor.
+ *
+ * For fixed-size matrices, does nothing.
+ *
+ * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix
+ * is called a null matrix. This constructor is the unique way to create null matrices: resizing
+ * a matrix to 0 is not supported.
+ *
+ * \sa resize(Index,Index)
+ */
+ EIGEN_STRONG_INLINE explicit Array() : Base()
+ {
+ Base::_check_template_params();
+ EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ // FIXME is it still needed ??
+ /** \internal */
+ Array(internal::constructor_without_unaligned_array_assert)
+ : Base(internal::constructor_without_unaligned_array_assert())
+ {
+ Base::_check_template_params();
+ EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ }
+#endif
+
+ /** Constructs a vector or row-vector with given dimension. \only_for_vectors
+ *
+ * Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
+ * it is redundant to pass the dimension here, so it makes more sense to use the default
+ * constructor Matrix() instead.
+ */
+ EIGEN_STRONG_INLINE explicit Array(Index dim)
+ : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
+ {
+ Base::_check_template_params();
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Array)
+ eigen_assert(dim >= 0);
+ eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
+ EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename T0, typename T1>
+ EIGEN_STRONG_INLINE Array(const T0& x, const T1& y)
+ {
+ Base::_check_template_params();
+ this->template _init2<T0,T1>(x, y);
+ }
+ #else
+ /** constructs an uninitialized matrix with \a rows rows and \a cols columns.
+ *
+ * This is useful for dynamic-size matrices. For fixed-size matrices,
+ * it is redundant to pass these parameters, so one should use the default constructor
+ * Matrix() instead. */
+ Array(Index rows, Index cols);
+ /** constructs an initialized 2D vector with given coefficients */
+ Array(const Scalar& x, const Scalar& y);
+ #endif
+
+ /** constructs an initialized 3D vector with given coefficients */
+ EIGEN_STRONG_INLINE Array(const Scalar& x, const Scalar& y, const Scalar& z)
+ {
+ Base::_check_template_params();
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3)
+ m_storage.data()[0] = x;
+ m_storage.data()[1] = y;
+ m_storage.data()[2] = z;
+ }
+ /** constructs an initialized 4D vector with given coefficients */
+ EIGEN_STRONG_INLINE Array(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w)
+ {
+ Base::_check_template_params();
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4)
+ m_storage.data()[0] = x;
+ m_storage.data()[1] = y;
+ m_storage.data()[2] = z;
+ m_storage.data()[3] = w;
+ }
+
+ explicit Array(const Scalar *data);
+
+ /** Constructor copying the value of the expression \a other */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Array(const ArrayBase<OtherDerived>& other)
+ : Base(other.rows() * other.cols(), other.rows(), other.cols())
+ {
+ Base::_check_template_params();
+ Base::_set_noalias(other);
+ }
+ /** Copy constructor */
+ EIGEN_STRONG_INLINE Array(const Array& other)
+ : Base(other.rows() * other.cols(), other.rows(), other.cols())
+ {
+ Base::_check_template_params();
+ Base::_set_noalias(other);
+ }
+ /** Copy constructor with in-place evaluation */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Array(const ReturnByValue<OtherDerived>& other)
+ {
+ Base::_check_template_params();
+ Base::resize(other.rows(), other.cols());
+ other.evalTo(*this);
+ }
+
+ /** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Array(const EigenBase<OtherDerived> &other)
+ : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
+ {
+ Base::_check_template_params();
+ Base::resize(other.rows(), other.cols());
+ *this = other;
+ }
+
+ /** Override MatrixBase::swap() since for dynamic-sized matrices of same type it is enough to swap the
+ * data pointers.
+ */
+ template<typename OtherDerived>
+ void swap(ArrayBase<OtherDerived> const & other)
+ { this->_swap(other.derived()); }
+
+ inline Index innerStride() const { return 1; }
+ inline Index outerStride() const { return this->innerSize(); }
+
+ #ifdef EIGEN_ARRAY_PLUGIN
+ #include EIGEN_ARRAY_PLUGIN
+ #endif
+
+ private:
+
+ template<typename MatrixType, typename OtherDerived, bool SwapPointers>
+ friend struct internal::matrix_swap_impl;
+};
+
+/** \defgroup arraytypedefs Global array typedefs
+ * \ingroup Core_Module
+ *
+ * Eigen defines several typedef shortcuts for most common 1D and 2D array types.
+ *
+ * The general patterns are the following:
+ *
+ * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size,
+ * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd
+ * for complex double.
+ *
+ * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of floats.
+ *
+ * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is
+ * a fixed-size 1D array of 4 complex floats.
+ *
+ * \sa class Array
+ */
+
+#define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \
+/** \ingroup arraytypedefs */ \
+typedef Array<Type, Size, Size> Array##SizeSuffix##SizeSuffix##TypeSuffix; \
+/** \ingroup arraytypedefs */ \
+typedef Array<Type, Size, 1> Array##SizeSuffix##TypeSuffix;
+
+#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \
+/** \ingroup arraytypedefs */ \
+typedef Array<Type, Size, Dynamic> Array##Size##X##TypeSuffix; \
+/** \ingroup arraytypedefs */ \
+typedef Array<Type, Dynamic, Size> Array##X##Size##TypeSuffix;
+
+#define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4)
+
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int, i)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float, f)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double, d)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<float>, cf)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
+
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS
+
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE
+
+#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \
+using Eigen::Matrix##SizeSuffix##TypeSuffix; \
+using Eigen::Vector##SizeSuffix##TypeSuffix; \
+using Eigen::RowVector##SizeSuffix##TypeSuffix;
+
+#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \
+
+#define EIGEN_USING_ARRAY_TYPEDEFS \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd)
+
+
+#endif // EIGEN_ARRAY_H
diff --git a/extern/Eigen3/Eigen/src/Core/ArrayBase.h b/extern/Eigen3/Eigen/src/Core/ArrayBase.h
new file mode 100644
index 00000000000..9399ac3d15c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/ArrayBase.h
@@ -0,0 +1,239 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ARRAYBASE_H
+#define EIGEN_ARRAYBASE_H
+
+template<typename ExpressionType> class MatrixWrapper;
+
+/** \class ArrayBase
+ * \ingroup Core_Module
+ *
+ * \brief Base class for all 1D and 2D array, and related expressions
+ *
+ * An array is similar to a dense vector or matrix. While matrices are mathematical
+ * objects with well defined linear algebra operators, an array is just a collection
+ * of scalar values arranged in a one or two dimensionnal fashion. As the main consequence,
+ * all operations applied to an array are performed coefficient wise. Furthermore,
+ * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient
+ * constructors allowing to easily write generic code working for both scalar values
+ * and arrays.
+ *
+ * This class is the base that is inherited by all array expression types.
+ *
+ * \tparam Derived is the derived type, e.g., an array or an expression type.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN.
+ *
+ * \sa class MatrixBase, \ref TopicClassHierarchy
+ */
+template<typename Derived> class ArrayBase
+ : public DenseBase<Derived>
+{
+ public:
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** The base class for a given storage type. */
+ typedef ArrayBase StorageBaseType;
+
+ typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
+
+ using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
+ typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
+
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ typedef DenseBase<Derived> Base;
+ using Base::RowsAtCompileTime;
+ using Base::ColsAtCompileTime;
+ using Base::SizeAtCompileTime;
+ using Base::MaxRowsAtCompileTime;
+ using Base::MaxColsAtCompileTime;
+ using Base::MaxSizeAtCompileTime;
+ using Base::IsVectorAtCompileTime;
+ using Base::Flags;
+ using Base::CoeffReadCost;
+
+ using Base::derived;
+ using Base::const_cast_derived;
+ using Base::rows;
+ using Base::cols;
+ using Base::size;
+ using Base::coeff;
+ using Base::coeffRef;
+ using Base::lazyAssign;
+ using Base::operator=;
+ using Base::operator+=;
+ using Base::operator-=;
+ using Base::operator*=;
+ using Base::operator/=;
+
+ typedef typename Base::CoeffReturnType CoeffReturnType;
+
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal the plain matrix type corresponding to this expression. Note that is not necessarily
+ * exactly the return type of eval(): in the case of plain matrices, the return type of eval() is a const
+ * reference to a matrix, not a matrix! It is however guaranteed that the return type of eval() is either
+ * PlainObject or const PlainObject&.
+ */
+ typedef Array<typename internal::traits<Derived>::Scalar,
+ internal::traits<Derived>::RowsAtCompileTime,
+ internal::traits<Derived>::ColsAtCompileTime,
+ AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),
+ internal::traits<Derived>::MaxRowsAtCompileTime,
+ internal::traits<Derived>::MaxColsAtCompileTime
+ > PlainObject;
+
+
+ /** \internal Represents a matrix with all coefficients equal to one another*/
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase
+# include "../plugins/CommonCwiseUnaryOps.h"
+# include "../plugins/MatrixCwiseUnaryOps.h"
+# include "../plugins/ArrayCwiseUnaryOps.h"
+# include "../plugins/CommonCwiseBinaryOps.h"
+# include "../plugins/MatrixCwiseBinaryOps.h"
+# include "../plugins/ArrayCwiseBinaryOps.h"
+# ifdef EIGEN_ARRAYBASE_PLUGIN
+# include EIGEN_ARRAYBASE_PLUGIN
+# endif
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+ /** Special case of the template operator=, in order to prevent the compiler
+ * from generating a default operator= (issue hit with g++ 4.1)
+ */
+ Derived& operator=(const ArrayBase& other)
+ {
+ return internal::assign_selector<Derived,Derived>::run(derived(), other.derived());
+ }
+
+ Derived& operator+=(const Scalar& scalar)
+ { return *this = derived() + scalar; }
+ Derived& operator-=(const Scalar& scalar)
+ { return *this = derived() - scalar; }
+
+ template<typename OtherDerived>
+ Derived& operator+=(const ArrayBase<OtherDerived>& other);
+ template<typename OtherDerived>
+ Derived& operator-=(const ArrayBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ Derived& operator*=(const ArrayBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ Derived& operator/=(const ArrayBase<OtherDerived>& other);
+
+ public:
+ ArrayBase<Derived>& array() { return *this; }
+ const ArrayBase<Derived>& array() const { return *this; }
+
+ /** \returns an \link MatrixBase Matrix \endlink expression of this array
+ * \sa MatrixBase::array() */
+ MatrixWrapper<Derived> matrix() { return derived(); }
+ const MatrixWrapper<Derived> matrix() const { return derived(); }
+
+// template<typename Dest>
+// inline void evalTo(Dest& dst) const { dst = matrix(); }
+
+ protected:
+ ArrayBase() : Base() {}
+
+ private:
+ explicit ArrayBase(Index);
+ ArrayBase(Index,Index);
+ template<typename OtherDerived> explicit ArrayBase(const ArrayBase<OtherDerived>&);
+ protected:
+ // mixing arrays and matrices is not legal
+ template<typename OtherDerived> Derived& operator+=(const MatrixBase<OtherDerived>& )
+ {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);}
+ // mixing arrays and matrices is not legal
+ template<typename OtherDerived> Derived& operator-=(const MatrixBase<OtherDerived>& )
+ {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);}
+};
+
+/** replaces \c *this by \c *this - \a other.
+ *
+ * \returns a reference to \c *this
+ */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other)
+{
+ SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, Derived, OtherDerived> tmp(derived());
+ tmp = other.derived();
+ return derived();
+}
+
+/** replaces \c *this by \c *this + \a other.
+ *
+ * \returns a reference to \c *this
+ */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other)
+{
+ SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived> tmp(derived());
+ tmp = other.derived();
+ return derived();
+}
+
+/** replaces \c *this by \c *this * \a other coefficient wise.
+ *
+ * \returns a reference to \c *this
+ */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+ArrayBase<Derived>::operator*=(const ArrayBase<OtherDerived>& other)
+{
+ SelfCwiseBinaryOp<internal::scalar_product_op<Scalar>, Derived, OtherDerived> tmp(derived());
+ tmp = other.derived();
+ return derived();
+}
+
+/** replaces \c *this by \c *this / \a other coefficient wise.
+ *
+ * \returns a reference to \c *this
+ */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+ArrayBase<Derived>::operator/=(const ArrayBase<OtherDerived>& other)
+{
+ SelfCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, Derived, OtherDerived> tmp(derived());
+ tmp = other.derived();
+ return derived();
+}
+
+#endif // EIGEN_ARRAYBASE_H
diff --git a/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h b/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h
new file mode 100644
index 00000000000..07f082e1edc
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h
@@ -0,0 +1,239 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ARRAYWRAPPER_H
+#define EIGEN_ARRAYWRAPPER_H
+
+/** \class ArrayWrapper
+ * \ingroup Core_Module
+ *
+ * \brief Expression of a mathematical vector or matrix as an array object
+ *
+ * This class is the return type of MatrixBase::array(), and most of the time
+ * this is the only way it is use.
+ *
+ * \sa MatrixBase::array(), class MatrixWrapper
+ */
+
+namespace internal {
+template<typename ExpressionType>
+struct traits<ArrayWrapper<ExpressionType> >
+ : public traits<typename remove_all<typename ExpressionType::Nested>::type >
+{
+ typedef ArrayXpr XprKind;
+};
+}
+
+template<typename ExpressionType>
+class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
+{
+ public:
+ typedef ArrayBase<ArrayWrapper> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper)
+
+ typedef typename internal::conditional<
+ internal::is_lvalue<ExpressionType>::value,
+ Scalar,
+ const Scalar
+ >::type ScalarWithConstIfNotLvalue;
+
+ typedef typename internal::nested<ExpressionType>::type NestedExpressionType;
+
+ inline ArrayWrapper(const ExpressionType& matrix) : m_expression(matrix) {}
+
+ inline Index rows() const { return m_expression.rows(); }
+ inline Index cols() const { return m_expression.cols(); }
+ inline Index outerStride() const { return m_expression.outerStride(); }
+ inline Index innerStride() const { return m_expression.innerStride(); }
+
+ inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
+ inline const Scalar* data() const { return m_expression.data(); }
+
+ inline const CoeffReturnType coeff(Index row, Index col) const
+ {
+ return m_expression.coeff(row, col);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ return m_expression.const_cast_derived().coeffRef(row, col);
+ }
+
+ inline const Scalar& coeffRef(Index row, Index col) const
+ {
+ return m_expression.const_cast_derived().coeffRef(row, col);
+ }
+
+ inline const CoeffReturnType coeff(Index index) const
+ {
+ return m_expression.coeff(index);
+ }
+
+ inline Scalar& coeffRef(Index index)
+ {
+ return m_expression.const_cast_derived().coeffRef(index);
+ }
+
+ inline const Scalar& coeffRef(Index index) const
+ {
+ return m_expression.const_cast_derived().coeffRef(index);
+ }
+
+ template<int LoadMode>
+ inline const PacketScalar packet(Index row, Index col) const
+ {
+ return m_expression.template packet<LoadMode>(row, col);
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
+ {
+ m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
+ }
+
+ template<int LoadMode>
+ inline const PacketScalar packet(Index index) const
+ {
+ return m_expression.template packet<LoadMode>(index);
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index index, const PacketScalar& x)
+ {
+ m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
+ }
+
+ template<typename Dest>
+ inline void evalTo(Dest& dst) const { dst = m_expression; }
+
+ protected:
+ const NestedExpressionType m_expression;
+};
+
+/** \class MatrixWrapper
+ * \ingroup Core_Module
+ *
+ * \brief Expression of an array as a mathematical vector or matrix
+ *
+ * This class is the return type of ArrayBase::matrix(), and most of the time
+ * this is the only way it is use.
+ *
+ * \sa MatrixBase::matrix(), class ArrayWrapper
+ */
+
+namespace internal {
+template<typename ExpressionType>
+struct traits<MatrixWrapper<ExpressionType> >
+ : public traits<typename remove_all<typename ExpressionType::Nested>::type >
+{
+ typedef MatrixXpr XprKind;
+};
+}
+
+template<typename ExpressionType>
+class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
+{
+ public:
+ typedef MatrixBase<MatrixWrapper<ExpressionType> > Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper)
+
+ typedef typename internal::conditional<
+ internal::is_lvalue<ExpressionType>::value,
+ Scalar,
+ const Scalar
+ >::type ScalarWithConstIfNotLvalue;
+
+ typedef typename internal::nested<ExpressionType>::type NestedExpressionType;
+
+ inline MatrixWrapper(const ExpressionType& matrix) : m_expression(matrix) {}
+
+ inline Index rows() const { return m_expression.rows(); }
+ inline Index cols() const { return m_expression.cols(); }
+ inline Index outerStride() const { return m_expression.outerStride(); }
+ inline Index innerStride() const { return m_expression.innerStride(); }
+
+ inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
+ inline const Scalar* data() const { return m_expression.data(); }
+
+ inline const CoeffReturnType coeff(Index row, Index col) const
+ {
+ return m_expression.coeff(row, col);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ return m_expression.const_cast_derived().coeffRef(row, col);
+ }
+
+ inline const Scalar& coeffRef(Index row, Index col) const
+ {
+ return m_expression.derived().coeffRef(row, col);
+ }
+
+ inline const CoeffReturnType coeff(Index index) const
+ {
+ return m_expression.coeff(index);
+ }
+
+ inline Scalar& coeffRef(Index index)
+ {
+ return m_expression.const_cast_derived().coeffRef(index);
+ }
+
+ inline const Scalar& coeffRef(Index index) const
+ {
+ return m_expression.const_cast_derived().coeffRef(index);
+ }
+
+ template<int LoadMode>
+ inline const PacketScalar packet(Index row, Index col) const
+ {
+ return m_expression.template packet<LoadMode>(row, col);
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
+ {
+ m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
+ }
+
+ template<int LoadMode>
+ inline const PacketScalar packet(Index index) const
+ {
+ return m_expression.template packet<LoadMode>(index);
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index index, const PacketScalar& x)
+ {
+ m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
+ }
+
+ protected:
+ const NestedExpressionType m_expression;
+};
+
+#endif // EIGEN_ARRAYWRAPPER_H
diff --git a/extern/Eigen3/Eigen/src/Core/Assign.h b/extern/Eigen3/Eigen/src/Core/Assign.h
new file mode 100644
index 00000000000..3a17152f043
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Assign.h
@@ -0,0 +1,593 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net>
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ASSIGN_H
+#define EIGEN_ASSIGN_H
+
+namespace internal {
+
+/***************************************************************************
+* Part 1 : the logic deciding a strategy for traversal and unrolling *
+***************************************************************************/
+
+template <typename Derived, typename OtherDerived>
+struct assign_traits
+{
+public:
+ enum {
+ DstIsAligned = Derived::Flags & AlignedBit,
+ DstHasDirectAccess = Derived::Flags & DirectAccessBit,
+ SrcIsAligned = OtherDerived::Flags & AlignedBit,
+ JointAlignment = bool(DstIsAligned) && bool(SrcIsAligned) ? Aligned : Unaligned
+ };
+
+private:
+ enum {
+ InnerSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::SizeAtCompileTime)
+ : int(Derived::Flags)&RowMajorBit ? int(Derived::ColsAtCompileTime)
+ : int(Derived::RowsAtCompileTime),
+ InnerMaxSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::MaxSizeAtCompileTime)
+ : int(Derived::Flags)&RowMajorBit ? int(Derived::MaxColsAtCompileTime)
+ : int(Derived::MaxRowsAtCompileTime),
+ MaxSizeAtCompileTime = Derived::SizeAtCompileTime,
+ PacketSize = packet_traits<typename Derived::Scalar>::size
+ };
+
+ enum {
+ StorageOrdersAgree = (int(Derived::IsRowMajor) == int(OtherDerived::IsRowMajor)),
+ MightVectorize = StorageOrdersAgree
+ && (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit),
+ MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0
+ && int(DstIsAligned) && int(SrcIsAligned),
+ MayLinearize = StorageOrdersAgree && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit),
+ MayLinearVectorize = MightVectorize && MayLinearize && DstHasDirectAccess
+ && (DstIsAligned || MaxSizeAtCompileTime == Dynamic),
+ /* If the destination isn't aligned, we have to do runtime checks and we don't unroll,
+ so it's only good for large enough sizes. */
+ MaySliceVectorize = MightVectorize && DstHasDirectAccess
+ && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*PacketSize)
+ /* slice vectorization can be slow, so we only want it if the slices are big, which is
+ indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block
+ in a fixed-size matrix */
+ };
+
+public:
+ enum {
+ Traversal = int(MayInnerVectorize) ? int(InnerVectorizedTraversal)
+ : int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
+ : int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
+ : int(MayLinearize) ? int(LinearTraversal)
+ : int(DefaultTraversal),
+ Vectorized = int(Traversal) == InnerVectorizedTraversal
+ || int(Traversal) == LinearVectorizedTraversal
+ || int(Traversal) == SliceVectorizedTraversal
+ };
+
+private:
+ enum {
+ UnrollingLimit = EIGEN_UNROLLING_LIMIT * (Vectorized ? int(PacketSize) : 1),
+ MayUnrollCompletely = int(Derived::SizeAtCompileTime) != Dynamic
+ && int(OtherDerived::CoeffReadCost) != Dynamic
+ && int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit),
+ MayUnrollInner = int(InnerSize) != Dynamic
+ && int(OtherDerived::CoeffReadCost) != Dynamic
+ && int(InnerSize) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit)
+ };
+
+public:
+ enum {
+ Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal))
+ ? (
+ int(MayUnrollCompletely) ? int(CompleteUnrolling)
+ : int(MayUnrollInner) ? int(InnerUnrolling)
+ : int(NoUnrolling)
+ )
+ : int(Traversal) == int(LinearVectorizedTraversal)
+ ? ( bool(MayUnrollCompletely) && bool(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) )
+ : int(Traversal) == int(LinearTraversal)
+ ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling) )
+ : int(NoUnrolling)
+ };
+
+#ifdef EIGEN_DEBUG_ASSIGN
+ static void debug()
+ {
+ EIGEN_DEBUG_VAR(DstIsAligned)
+ EIGEN_DEBUG_VAR(SrcIsAligned)
+ EIGEN_DEBUG_VAR(JointAlignment)
+ EIGEN_DEBUG_VAR(InnerSize)
+ EIGEN_DEBUG_VAR(InnerMaxSize)
+ EIGEN_DEBUG_VAR(PacketSize)
+ EIGEN_DEBUG_VAR(StorageOrdersAgree)
+ EIGEN_DEBUG_VAR(MightVectorize)
+ EIGEN_DEBUG_VAR(MayLinearize)
+ EIGEN_DEBUG_VAR(MayInnerVectorize)
+ EIGEN_DEBUG_VAR(MayLinearVectorize)
+ EIGEN_DEBUG_VAR(MaySliceVectorize)
+ EIGEN_DEBUG_VAR(Traversal)
+ EIGEN_DEBUG_VAR(UnrollingLimit)
+ EIGEN_DEBUG_VAR(MayUnrollCompletely)
+ EIGEN_DEBUG_VAR(MayUnrollInner)
+ EIGEN_DEBUG_VAR(Unrolling)
+ }
+#endif
+};
+
+/***************************************************************************
+* Part 2 : meta-unrollers
+***************************************************************************/
+
+/************************
+*** Default traversal ***
+************************/
+
+template<typename Derived1, typename Derived2, int Index, int Stop>
+struct assign_DefaultTraversal_CompleteUnrolling
+{
+ enum {
+ outer = Index / Derived1::InnerSizeAtCompileTime,
+ inner = Index % Derived1::InnerSizeAtCompileTime
+ };
+
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
+ {
+ dst.copyCoeffByOuterInner(outer, inner, src);
+ assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src);
+ }
+};
+
+template<typename Derived1, typename Derived2, int Stop>
+struct assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, int Index, int Stop>
+struct assign_DefaultTraversal_InnerUnrolling
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src, int outer)
+ {
+ dst.copyCoeffByOuterInner(outer, Index, src);
+ assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src, outer);
+ }
+};
+
+template<typename Derived1, typename Derived2, int Stop>
+struct assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Stop, Stop>
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &, int) {}
+};
+
+/***********************
+*** Linear traversal ***
+***********************/
+
+template<typename Derived1, typename Derived2, int Index, int Stop>
+struct assign_LinearTraversal_CompleteUnrolling
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
+ {
+ dst.copyCoeff(Index, src);
+ assign_LinearTraversal_CompleteUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src);
+ }
+};
+
+template<typename Derived1, typename Derived2, int Stop>
+struct assign_LinearTraversal_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {}
+};
+
+/**************************
+*** Inner vectorization ***
+**************************/
+
+template<typename Derived1, typename Derived2, int Index, int Stop>
+struct assign_innervec_CompleteUnrolling
+{
+ enum {
+ outer = Index / Derived1::InnerSizeAtCompileTime,
+ inner = Index % Derived1::InnerSizeAtCompileTime,
+ JointAlignment = assign_traits<Derived1,Derived2>::JointAlignment
+ };
+
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
+ {
+ dst.template copyPacketByOuterInner<Derived2, Aligned, JointAlignment>(outer, inner, src);
+ assign_innervec_CompleteUnrolling<Derived1, Derived2,
+ Index+packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src);
+ }
+};
+
+template<typename Derived1, typename Derived2, int Stop>
+struct assign_innervec_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, int Index, int Stop>
+struct assign_innervec_InnerUnrolling
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src, int outer)
+ {
+ dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, Index, src);
+ assign_innervec_InnerUnrolling<Derived1, Derived2,
+ Index+packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src, outer);
+ }
+};
+
+template<typename Derived1, typename Derived2, int Stop>
+struct assign_innervec_InnerUnrolling<Derived1, Derived2, Stop, Stop>
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &, int) {}
+};
+
+/***************************************************************************
+* Part 3 : implementation of all cases
+***************************************************************************/
+
+template<typename Derived1, typename Derived2,
+ int Traversal = assign_traits<Derived1, Derived2>::Traversal,
+ int Unrolling = assign_traits<Derived1, Derived2>::Unrolling>
+struct assign_impl;
+
+/************************
+*** Default traversal ***
+************************/
+
+template<typename Derived1, typename Derived2, int Unrolling>
+struct assign_impl<Derived1, Derived2, InvalidTraversal, Unrolling>
+{
+ inline static void run(Derived1 &, const Derived2 &) { }
+};
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, DefaultTraversal, NoUnrolling>
+{
+ typedef typename Derived1::Index Index;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ const Index innerSize = dst.innerSize();
+ const Index outerSize = dst.outerSize();
+ for(Index outer = 0; outer < outerSize; ++outer)
+ for(Index inner = 0; inner < innerSize; ++inner)
+ dst.copyCoeffByOuterInner(outer, inner, src);
+ }
+};
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, DefaultTraversal, CompleteUnrolling>
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
+ {
+ assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
+ ::run(dst, src);
+ }
+};
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, DefaultTraversal, InnerUnrolling>
+{
+ typedef typename Derived1::Index Index;
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
+ {
+ const Index outerSize = dst.outerSize();
+ for(Index outer = 0; outer < outerSize; ++outer)
+ assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime>
+ ::run(dst, src, outer);
+ }
+};
+
+/***********************
+*** Linear traversal ***
+***********************/
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, LinearTraversal, NoUnrolling>
+{
+ typedef typename Derived1::Index Index;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ const Index size = dst.size();
+ for(Index i = 0; i < size; ++i)
+ dst.copyCoeff(i, src);
+ }
+};
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, LinearTraversal, CompleteUnrolling>
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
+ {
+ assign_LinearTraversal_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
+ ::run(dst, src);
+ }
+};
+
+/**************************
+*** Inner vectorization ***
+**************************/
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, NoUnrolling>
+{
+ typedef typename Derived1::Index Index;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ const Index innerSize = dst.innerSize();
+ const Index outerSize = dst.outerSize();
+ const Index packetSize = packet_traits<typename Derived1::Scalar>::size;
+ for(Index outer = 0; outer < outerSize; ++outer)
+ for(Index inner = 0; inner < innerSize; inner+=packetSize)
+ dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, inner, src);
+ }
+};
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, CompleteUnrolling>
+{
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
+ {
+ assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
+ ::run(dst, src);
+ }
+};
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, InnerUnrolling>
+{
+ typedef typename Derived1::Index Index;
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
+ {
+ const Index outerSize = dst.outerSize();
+ for(Index outer = 0; outer < outerSize; ++outer)
+ assign_innervec_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime>
+ ::run(dst, src, outer);
+ }
+};
+
+/***************************
+*** Linear vectorization ***
+***************************/
+
+template <bool IsAligned = false>
+struct unaligned_assign_impl
+{
+ template <typename Derived, typename OtherDerived>
+ static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, typename Derived::Index, typename Derived::Index) {}
+};
+
+template <>
+struct unaligned_assign_impl<false>
+{
+ // MSVC must not inline this functions. If it does, it fails to optimize the
+ // packet access path.
+#ifdef _MSC_VER
+ template <typename Derived, typename OtherDerived>
+ static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
+#else
+ template <typename Derived, typename OtherDerived>
+ static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
+#endif
+ {
+ for (typename Derived::Index index = start; index < end; ++index)
+ dst.copyCoeff(index, src);
+ }
+};
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, LinearVectorizedTraversal, NoUnrolling>
+{
+ typedef typename Derived1::Index Index;
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
+ {
+ const Index size = dst.size();
+ typedef packet_traits<typename Derived1::Scalar> PacketTraits;
+ enum {
+ packetSize = PacketTraits::size,
+ dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
+ srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
+ };
+ const Index alignedStart = assign_traits<Derived1,Derived2>::DstIsAligned ? 0
+ : first_aligned(&dst.coeffRef(0), size);
+ const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
+
+ unaligned_assign_impl<assign_traits<Derived1,Derived2>::DstIsAligned!=0>::run(src,dst,0,alignedStart);
+
+ for(Index index = alignedStart; index < alignedEnd; index += packetSize)
+ {
+ dst.template copyPacket<Derived2, dstAlignment, srcAlignment>(index, src);
+ }
+
+ unaligned_assign_impl<>::run(src,dst,alignedEnd,size);
+ }
+};
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, LinearVectorizedTraversal, CompleteUnrolling>
+{
+ typedef typename Derived1::Index Index;
+ EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src)
+ {
+ enum { size = Derived1::SizeAtCompileTime,
+ packetSize = packet_traits<typename Derived1::Scalar>::size,
+ alignedSize = (size/packetSize)*packetSize };
+
+ assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, alignedSize>::run(dst, src);
+ assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, alignedSize, size>::run(dst, src);
+ }
+};
+
+/**************************
+*** Slice vectorization ***
+***************************/
+
+template<typename Derived1, typename Derived2>
+struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling>
+{
+ typedef typename Derived1::Index Index;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ typedef packet_traits<typename Derived1::Scalar> PacketTraits;
+ enum {
+ packetSize = PacketTraits::size,
+ alignable = PacketTraits::AlignedOnScalar,
+ dstAlignment = alignable ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
+ srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
+ };
+ const Index packetAlignedMask = packetSize - 1;
+ const Index innerSize = dst.innerSize();
+ const Index outerSize = dst.outerSize();
+ const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
+ Index alignedStart = ((!alignable) || assign_traits<Derived1,Derived2>::DstIsAligned) ? 0
+ : first_aligned(&dst.coeffRef(0,0), innerSize);
+
+ for(Index outer = 0; outer < outerSize; ++outer)
+ {
+ const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
+ // do the non-vectorizable part of the assignment
+ for(Index inner = 0; inner<alignedStart ; ++inner)
+ dst.copyCoeffByOuterInner(outer, inner, src);
+
+ // do the vectorizable part of the assignment
+ for(Index inner = alignedStart; inner<alignedEnd; inner+=packetSize)
+ dst.template copyPacketByOuterInner<Derived2, dstAlignment, Unaligned>(outer, inner, src);
+
+ // do the non-vectorizable part of the assignment
+ for(Index inner = alignedEnd; inner<innerSize ; ++inner)
+ dst.copyCoeffByOuterInner(outer, inner, src);
+
+ alignedStart = std::min<Index>((alignedStart+alignedStep)%packetSize, innerSize);
+ }
+ }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Part 4 : implementation of DenseBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>
+ ::lazyAssign(const DenseBase<OtherDerived>& other)
+{
+ enum{
+ SameType = internal::is_same<typename Derived::Scalar,typename OtherDerived::Scalar>::value
+ };
+
+ EIGEN_STATIC_ASSERT_LVALUE(Derived)
+ EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+#ifdef EIGEN_DEBUG_ASSIGN
+ internal::assign_traits<Derived, OtherDerived>::debug();
+#endif
+ eigen_assert(rows() == other.rows() && cols() == other.cols());
+ internal::assign_impl<Derived, OtherDerived, int(SameType) ? int(internal::assign_traits<Derived, OtherDerived>::Traversal)
+ : int(InvalidTraversal)>::run(derived(),other.derived());
+#ifndef EIGEN_NO_DEBUG
+ checkTransposeAliasing(other.derived());
+#endif
+ return derived();
+}
+
+namespace internal {
+
+template<typename Derived, typename OtherDerived,
+ bool EvalBeforeAssigning = (int(OtherDerived::Flags) & EvalBeforeAssigningBit) != 0,
+ bool NeedToTranspose = Derived::IsVectorAtCompileTime
+ && OtherDerived::IsVectorAtCompileTime
+ && ((int(Derived::RowsAtCompileTime) == 1 && int(OtherDerived::ColsAtCompileTime) == 1)
+ | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
+ // revert to || as soon as not needed anymore.
+ (int(Derived::ColsAtCompileTime) == 1 && int(OtherDerived::RowsAtCompileTime) == 1))
+ && int(Derived::SizeAtCompileTime) != 1>
+struct assign_selector;
+
+template<typename Derived, typename OtherDerived>
+struct assign_selector<Derived,OtherDerived,false,false> {
+ EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); }
+};
+template<typename Derived, typename OtherDerived>
+struct assign_selector<Derived,OtherDerived,true,false> {
+ EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); }
+};
+template<typename Derived, typename OtherDerived>
+struct assign_selector<Derived,OtherDerived,false,true> {
+ EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); }
+};
+template<typename Derived, typename OtherDerived>
+struct assign_selector<Derived,OtherDerived,true,true> {
+ EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); }
+};
+
+} // end namespace internal
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase<OtherDerived>& other)
+{
+ return internal::assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase& other)
+{
+ return internal::assign_selector<Derived,Derived>::run(derived(), other.derived());
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const MatrixBase& other)
+{
+ return internal::assign_selector<Derived,Derived>::run(derived(), other.derived());
+}
+
+template<typename Derived>
+template <typename OtherDerived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const DenseBase<OtherDerived>& other)
+{
+ return internal::assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
+}
+
+template<typename Derived>
+template <typename OtherDerived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const EigenBase<OtherDerived>& other)
+{
+ other.derived().evalTo(derived());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
+{
+ other.evalTo(derived());
+ return derived();
+}
+
+#endif // EIGEN_ASSIGN_H
diff --git a/extern/Eigen3/Eigen/src/Core/BandMatrix.h b/extern/Eigen3/Eigen/src/Core/BandMatrix.h
new file mode 100644
index 00000000000..2570d7b559f
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/BandMatrix.h
@@ -0,0 +1,346 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BANDMATRIX_H
+#define EIGEN_BANDMATRIX_H
+
+namespace internal {
+
+
+template<typename Derived>
+class BandMatrixBase : public EigenBase<Derived>
+{
+ public:
+
+ enum {
+ Flags = internal::traits<Derived>::Flags,
+ CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
+ Supers = internal::traits<Derived>::Supers,
+ Subs = internal::traits<Derived>::Subs,
+ Options = internal::traits<Derived>::Options
+ };
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
+ typedef typename DenseMatrixType::Index Index;
+ typedef typename internal::traits<Derived>::CoefficientsType CoefficientsType;
+ typedef EigenBase<Derived> Base;
+
+ protected:
+ enum {
+ DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic))
+ ? 1 + Supers + Subs
+ : Dynamic,
+ SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime)
+ };
+
+ public:
+
+ using Base::derived;
+ using Base::rows;
+ using Base::cols;
+
+ /** \returns the number of super diagonals */
+ inline Index supers() const { return derived().supers(); }
+
+ /** \returns the number of sub diagonals */
+ inline Index subs() const { return derived().subs(); }
+
+ /** \returns an expression of the underlying coefficient matrix */
+ inline const CoefficientsType& coeffs() const { return derived().coeffs(); }
+
+ /** \returns an expression of the underlying coefficient matrix */
+ inline CoefficientsType& coeffs() { return derived().coeffs(); }
+
+ /** \returns a vector expression of the \a i -th column,
+ * only the meaningful part is returned.
+ * \warning the internal storage must be column major. */
+ inline Block<CoefficientsType,Dynamic,1> col(Index i)
+ {
+ EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ Index start = 0;
+ Index len = coeffs().rows();
+ if (i<=supers())
+ {
+ start = supers()-i;
+ len = (std::min)(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i)));
+ }
+ else if (i>=rows()-subs())
+ len = std::max<Index>(0,coeffs().rows() - (i + 1 - rows() + subs()));
+ return Block<CoefficientsType,Dynamic,1>(coeffs(), start, i, len, 1);
+ }
+
+ /** \returns a vector expression of the main diagonal */
+ inline Block<CoefficientsType,1,SizeAtCompileTime> diagonal()
+ { return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }
+
+ /** \returns a vector expression of the main diagonal (const version) */
+ inline const Block<const CoefficientsType,1,SizeAtCompileTime> diagonal() const
+ { return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }
+
+ template<int Index> struct DiagonalIntReturnType {
+ enum {
+ ReturnOpposite = (Options&SelfAdjoint) && (((Index)>0 && Supers==0) || ((Index)<0 && Subs==0)),
+ Conjugate = ReturnOpposite && NumTraits<Scalar>::IsComplex,
+ ActualIndex = ReturnOpposite ? -Index : Index,
+ DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic)
+ ? Dynamic
+ : (ActualIndex<0
+ ? EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime, RowsAtCompileTime + ActualIndex)
+ : EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime - ActualIndex))
+ };
+ typedef Block<CoefficientsType,1, DiagonalSize> BuildType;
+ typedef typename internal::conditional<Conjugate,
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>,BuildType >,
+ BuildType>::type Type;
+ };
+
+ /** \returns a vector expression of the \a N -th sub or super diagonal */
+ template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal()
+ {
+ return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));
+ }
+
+ /** \returns a vector expression of the \a N -th sub or super diagonal */
+ template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const
+ {
+ return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));
+ }
+
+ /** \returns a vector expression of the \a i -th sub or super diagonal */
+ inline Block<CoefficientsType,1,Dynamic> diagonal(Index i)
+ {
+ eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));
+ return Block<CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));
+ }
+
+ /** \returns a vector expression of the \a i -th sub or super diagonal */
+ inline const Block<const CoefficientsType,1,Dynamic> diagonal(Index i) const
+ {
+ eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));
+ return Block<const CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));
+ }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ dst.resize(rows(),cols());
+ dst.setZero();
+ dst.diagonal() = diagonal();
+ for (Index i=1; i<=supers();++i)
+ dst.diagonal(i) = diagonal(i);
+ for (Index i=1; i<=subs();++i)
+ dst.diagonal(-i) = diagonal(-i);
+ }
+
+ DenseMatrixType toDenseMatrix() const
+ {
+ DenseMatrixType res(rows(),cols());
+ evalTo(res);
+ return res;
+ }
+
+ protected:
+
+ inline Index diagonalLength(Index i) const
+ { return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); }
+};
+
+/**
+ * \class BandMatrix
+ * \ingroup Core_Module
+ *
+ * \brief Represents a rectangular matrix with a banded storage
+ *
+ * \param _Scalar Numeric type, i.e. float, double, int
+ * \param Rows Number of rows, or \b Dynamic
+ * \param Cols Number of columns, or \b Dynamic
+ * \param Supers Number of super diagonal
+ * \param Subs Number of sub diagonal
+ * \param _Options A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint
+ * The former controls \ref TopicStorageOrders "storage order", and defaults to
+ * column-major. The latter controls whether the matrix represents a selfadjoint
+ * matrix in which case either Supers of Subs have to be null.
+ *
+ * \sa class TridiagonalMatrix
+ */
+
+template<typename _Scalar, int _Rows, int _Cols, int _Supers, int _Subs, int _Options>
+struct traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> >
+{
+ typedef _Scalar Scalar;
+ typedef Dense StorageKind;
+ typedef DenseIndex Index;
+ enum {
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ RowsAtCompileTime = _Rows,
+ ColsAtCompileTime = _Cols,
+ MaxRowsAtCompileTime = _Rows,
+ MaxColsAtCompileTime = _Cols,
+ Flags = LvalueBit,
+ Supers = _Supers,
+ Subs = _Subs,
+ Options = _Options,
+ DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic
+ };
+ typedef Matrix<Scalar,DataRowsAtCompileTime,ColsAtCompileTime,Options&RowMajor?RowMajor:ColMajor> CoefficientsType;
+};
+
+template<typename _Scalar, int Rows, int Cols, int Supers, int Subs, int Options>
+class BandMatrix : public BandMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> >
+{
+ public:
+
+ typedef typename internal::traits<BandMatrix>::Scalar Scalar;
+ typedef typename internal::traits<BandMatrix>::Index Index;
+ typedef typename internal::traits<BandMatrix>::CoefficientsType CoefficientsType;
+
+ inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs)
+ : m_coeffs(1+supers+subs,cols),
+ m_rows(rows), m_supers(supers), m_subs(subs)
+ {
+ }
+
+ /** \returns the number of columns */
+ inline Index rows() const { return m_rows.value(); }
+
+ /** \returns the number of rows */
+ inline Index cols() const { return m_coeffs.cols(); }
+
+ /** \returns the number of super diagonals */
+ inline Index supers() const { return m_supers.value(); }
+
+ /** \returns the number of sub diagonals */
+ inline Index subs() const { return m_subs.value(); }
+
+ inline const CoefficientsType& coeffs() const { return m_coeffs; }
+ inline CoefficientsType& coeffs() { return m_coeffs; }
+
+ protected:
+
+ CoefficientsType m_coeffs;
+ internal::variable_if_dynamic<Index, Rows> m_rows;
+ internal::variable_if_dynamic<Index, Supers> m_supers;
+ internal::variable_if_dynamic<Index, Subs> m_subs;
+};
+
+template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options>
+class BandMatrixWrapper;
+
+template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options>
+struct traits<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> >
+{
+ typedef typename _CoefficientsType::Scalar Scalar;
+ typedef typename _CoefficientsType::StorageKind StorageKind;
+ typedef typename _CoefficientsType::Index Index;
+ enum {
+ CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost,
+ RowsAtCompileTime = _Rows,
+ ColsAtCompileTime = _Cols,
+ MaxRowsAtCompileTime = _Rows,
+ MaxColsAtCompileTime = _Cols,
+ Flags = LvalueBit,
+ Supers = _Supers,
+ Subs = _Subs,
+ Options = _Options,
+ DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic
+ };
+ typedef _CoefficientsType CoefficientsType;
+};
+
+template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options>
+class BandMatrixWrapper : public BandMatrixBase<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> >
+{
+ public:
+
+ typedef typename internal::traits<BandMatrixWrapper>::Scalar Scalar;
+ typedef typename internal::traits<BandMatrixWrapper>::CoefficientsType CoefficientsType;
+ typedef typename internal::traits<BandMatrixWrapper>::Index Index;
+
+ inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs)
+ : m_coeffs(coeffs),
+ m_rows(rows), m_supers(supers), m_subs(subs)
+ {
+ EIGEN_UNUSED_VARIABLE(cols);
+ //internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows());
+ }
+
+ /** \returns the number of columns */
+ inline Index rows() const { return m_rows.value(); }
+
+ /** \returns the number of rows */
+ inline Index cols() const { return m_coeffs.cols(); }
+
+ /** \returns the number of super diagonals */
+ inline Index supers() const { return m_supers.value(); }
+
+ /** \returns the number of sub diagonals */
+ inline Index subs() const { return m_subs.value(); }
+
+ inline const CoefficientsType& coeffs() const { return m_coeffs; }
+
+ protected:
+
+ const CoefficientsType& m_coeffs;
+ internal::variable_if_dynamic<Index, _Rows> m_rows;
+ internal::variable_if_dynamic<Index, _Supers> m_supers;
+ internal::variable_if_dynamic<Index, _Subs> m_subs;
+};
+
+/**
+ * \class TridiagonalMatrix
+ * \ingroup Core_Module
+ *
+ * \brief Represents a tridiagonal matrix with a compact banded storage
+ *
+ * \param _Scalar Numeric type, i.e. float, double, int
+ * \param Size Number of rows and cols, or \b Dynamic
+ * \param _Options Can be 0 or \b SelfAdjoint
+ *
+ * \sa class BandMatrix
+ */
+template<typename Scalar, int Size, int Options>
+class TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor>
+{
+ typedef BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> Base;
+ typedef typename Base::Index Index;
+ public:
+ TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {}
+
+ inline typename Base::template DiagonalIntReturnType<1>::Type super()
+ { return Base::template diagonal<1>(); }
+ inline const typename Base::template DiagonalIntReturnType<1>::Type super() const
+ { return Base::template diagonal<1>(); }
+ inline typename Base::template DiagonalIntReturnType<-1>::Type sub()
+ { return Base::template diagonal<-1>(); }
+ inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const
+ { return Base::template diagonal<-1>(); }
+ protected:
+};
+
+} // end namespace internal
+
+#endif // EIGEN_BANDMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/Block.h b/extern/Eigen3/Eigen/src/Core/Block.h
new file mode 100644
index 00000000000..2b251bc2ca9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Block.h
@@ -0,0 +1,349 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BLOCK_H
+#define EIGEN_BLOCK_H
+
+/** \class Block
+ * \ingroup Core_Module
+ *
+ * \brief Expression of a fixed-size or dynamic-size block
+ *
+ * \param XprType the type of the expression in which we are taking a block
+ * \param BlockRows the number of rows of the block we are taking at compile time (optional)
+ * \param BlockCols the number of columns of the block we are taking at compile time (optional)
+ * \param _DirectAccessStatus \internal used for partial specialization
+ *
+ * This class represents an expression of either a fixed-size or dynamic-size block. It is the return
+ * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
+ * most of the time this is the only way it is used.
+ *
+ * However, if you want to directly maniputate block expressions,
+ * for instance if you want to write a function returning such an expression, you
+ * will need to use this class.
+ *
+ * Here is an example illustrating the dynamic case:
+ * \include class_Block.cpp
+ * Output: \verbinclude class_Block.out
+ *
+ * \note Even though this expression has dynamic size, in the case where \a XprType
+ * has fixed size, this expression inherits a fixed maximal size which means that evaluating
+ * it does not cause a dynamic memory allocation.
+ *
+ * Here is an example illustrating the fixed-size case:
+ * \include class_FixedBlock.cpp
+ * Output: \verbinclude class_FixedBlock.out
+ *
+ * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock
+ */
+
+namespace internal {
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess>
+struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> > : traits<XprType>
+{
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename traits<XprType>::StorageKind StorageKind;
+ typedef typename traits<XprType>::XprKind XprKind;
+ typedef typename nested<XprType>::type XprTypeNested;
+ typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
+ enum{
+ MatrixRows = traits<XprType>::RowsAtCompileTime,
+ MatrixCols = traits<XprType>::ColsAtCompileTime,
+ RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows,
+ ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols,
+ MaxRowsAtCompileTime = BlockRows==0 ? 0
+ : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime)
+ : int(traits<XprType>::MaxRowsAtCompileTime),
+ MaxColsAtCompileTime = BlockCols==0 ? 0
+ : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
+ : int(traits<XprType>::MaxColsAtCompileTime),
+ XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
+ IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
+ : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
+ : XprTypeIsRowMajor,
+ HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
+ InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
+ InnerStrideAtCompileTime = HasSameStorageOrderAsXprType
+ ? int(inner_stride_at_compile_time<XprType>::ret)
+ : int(outer_stride_at_compile_time<XprType>::ret),
+ OuterStrideAtCompileTime = HasSameStorageOrderAsXprType
+ ? int(outer_stride_at_compile_time<XprType>::ret)
+ : int(inner_stride_at_compile_time<XprType>::ret),
+ MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits<Scalar>::size) == 0)
+ && (InnerStrideAtCompileTime == 1)
+ ? PacketAccessBit : 0,
+ MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && ((OuterStrideAtCompileTime % packet_traits<Scalar>::size) == 0)) ? AlignedBit : 0,
+ FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
+ FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
+ FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
+ Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
+ DirectAccessBit |
+ MaskPacketAccessBit |
+ MaskAlignedBit),
+ Flags = Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit
+ };
+};
+}
+
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class Block
+ : public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel, HasDirectAccess> >::type
+{
+ public:
+
+ typedef typename internal::dense_xpr_base<Block>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Block)
+
+ class InnerIterator;
+
+ /** Column or Row constructor
+ */
+ inline Block(XprType& xpr, Index i)
+ : m_xpr(xpr),
+ // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
+ // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
+ // all other cases are invalid.
+ // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
+ m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
+ m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
+ m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
+ m_blockCols(BlockCols==1 ? 1 : xpr.cols())
+ {
+ eigen_assert( (i>=0) && (
+ ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
+ ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));
+ }
+
+ /** Fixed-size constructor
+ */
+ inline Block(XprType& xpr, Index startRow, Index startCol)
+ : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
+ m_blockRows(BlockRows), m_blockCols(BlockCols)
+ {
+ EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
+ eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows()
+ && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols());
+ }
+
+ /** Dynamic-size constructor
+ */
+ inline Block(XprType& xpr,
+ Index startRow, Index startCol,
+ Index blockRows, Index blockCols)
+ : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),
+ m_blockRows(blockRows), m_blockCols(blockCols)
+ {
+ eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
+ && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
+ eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows()
+ && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols());
+ }
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
+
+ inline Index rows() const { return m_blockRows.value(); }
+ inline Index cols() const { return m_blockCols.value(); }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(XprType)
+ return m_xpr.const_cast_derived()
+ .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+ }
+
+ inline const Scalar& coeffRef(Index row, Index col) const
+ {
+ return m_xpr.derived()
+ .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+ }
+
+ EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const
+ {
+ return m_xpr.coeff(row + m_startRow.value(), col + m_startCol.value());
+ }
+
+ inline Scalar& coeffRef(Index index)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(XprType)
+ return m_xpr.const_cast_derived()
+ .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ }
+
+ inline const Scalar& coeffRef(Index index) const
+ {
+ return m_xpr.const_cast_derived()
+ .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ }
+
+ inline const CoeffReturnType coeff(Index index) const
+ {
+ return m_xpr
+ .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ }
+
+ template<int LoadMode>
+ inline PacketScalar packet(Index row, Index col) const
+ {
+ return m_xpr.template packet<Unaligned>
+ (row + m_startRow.value(), col + m_startCol.value());
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
+ {
+ m_xpr.const_cast_derived().template writePacket<Unaligned>
+ (row + m_startRow.value(), col + m_startCol.value(), x);
+ }
+
+ template<int LoadMode>
+ inline PacketScalar packet(Index index) const
+ {
+ return m_xpr.template packet<Unaligned>
+ (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index index, const PacketScalar& x)
+ {
+ m_xpr.const_cast_derived().template writePacket<Unaligned>
+ (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), x);
+ }
+
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
+ /** \sa MapBase::data() */
+ inline const Scalar* data() const;
+ inline Index innerStride() const;
+ inline Index outerStride() const;
+ #endif
+
+ protected:
+
+ const typename XprType::Nested m_xpr;
+ const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
+ const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
+ const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
+ const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
+};
+
+/** \internal */
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+class Block<XprType,BlockRows,BlockCols, InnerPanel,true>
+ : public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel, true> >
+{
+ public:
+
+ typedef MapBase<Block> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Block)
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
+
+ /** Column or Row constructor
+ */
+ inline Block(XprType& xpr, Index i)
+ : Base(internal::const_cast_ptr(&xpr.coeffRef(
+ (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0,
+ (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)),
+ BlockRows==1 ? 1 : xpr.rows(),
+ BlockCols==1 ? 1 : xpr.cols()),
+ m_xpr(xpr)
+ {
+ eigen_assert( (i>=0) && (
+ ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
+ ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));
+ init();
+ }
+
+ /** Fixed-size constructor
+ */
+ inline Block(XprType& xpr, Index startRow, Index startCol)
+ : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol))), m_xpr(xpr)
+ {
+ eigen_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows()
+ && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols());
+ init();
+ }
+
+ /** Dynamic-size constructor
+ */
+ inline Block(XprType& xpr,
+ Index startRow, Index startCol,
+ Index blockRows, Index blockCols)
+ : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol)), blockRows, blockCols),
+ m_xpr(xpr)
+ {
+ eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
+ && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
+ eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows()
+ && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols());
+ init();
+ }
+
+ /** \sa MapBase::innerStride() */
+ inline Index innerStride() const
+ {
+ return internal::traits<Block>::HasSameStorageOrderAsXprType
+ ? m_xpr.innerStride()
+ : m_xpr.outerStride();
+ }
+
+ /** \sa MapBase::outerStride() */
+ inline Index outerStride() const
+ {
+ return m_outerStride;
+ }
+
+ #ifndef __SUNPRO_CC
+ // FIXME sunstudio is not friendly with the above friend...
+ // META-FIXME there is no 'friend' keyword around here. Is this obsolete?
+ protected:
+ #endif
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal used by allowAligned() */
+ inline Block(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
+ : Base(data, blockRows, blockCols), m_xpr(xpr)
+ {
+ init();
+ }
+ #endif
+
+ protected:
+ void init()
+ {
+ m_outerStride = internal::traits<Block>::HasSameStorageOrderAsXprType
+ ? m_xpr.outerStride()
+ : m_xpr.innerStride();
+ }
+
+ const typename XprType::Nested m_xpr;
+ int m_outerStride;
+};
+
+
+#endif // EIGEN_BLOCK_H
diff --git a/extern/Eigen2/Eigen/src/Array/BooleanRedux.h b/extern/Eigen3/Eigen/src/Core/BooleanRedux.h
index 4e8218327eb..5c3444a57c9 100644
--- a/extern/Eigen2/Eigen/src/Array/BooleanRedux.h
+++ b/extern/Eigen3/Eigen/src/Core/BooleanRedux.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -25,8 +25,10 @@
#ifndef EIGEN_ALLANDANY_H
#define EIGEN_ALLANDANY_H
+namespace internal {
+
template<typename Derived, int UnrollCount>
-struct ei_all_unroller
+struct all_unroller
{
enum {
col = (UnrollCount-1) / Derived::RowsAtCompileTime,
@@ -35,24 +37,24 @@ struct ei_all_unroller
inline static bool run(const Derived &mat)
{
- return ei_all_unroller<Derived, UnrollCount-1>::run(mat) && mat.coeff(row, col);
+ return all_unroller<Derived, UnrollCount-1>::run(mat) && mat.coeff(row, col);
}
};
template<typename Derived>
-struct ei_all_unroller<Derived, 1>
+struct all_unroller<Derived, 1>
{
inline static bool run(const Derived &mat) { return mat.coeff(0, 0); }
};
template<typename Derived>
-struct ei_all_unroller<Derived, Dynamic>
+struct all_unroller<Derived, Dynamic>
{
inline static bool run(const Derived &) { return false; }
};
template<typename Derived, int UnrollCount>
-struct ei_any_unroller
+struct any_unroller
{
enum {
col = (UnrollCount-1) / Derived::RowsAtCompileTime,
@@ -61,85 +63,87 @@ struct ei_any_unroller
inline static bool run(const Derived &mat)
{
- return ei_any_unroller<Derived, UnrollCount-1>::run(mat) || mat.coeff(row, col);
+ return any_unroller<Derived, UnrollCount-1>::run(mat) || mat.coeff(row, col);
}
};
template<typename Derived>
-struct ei_any_unroller<Derived, 1>
+struct any_unroller<Derived, 1>
{
inline static bool run(const Derived &mat) { return mat.coeff(0, 0); }
};
template<typename Derived>
-struct ei_any_unroller<Derived, Dynamic>
+struct any_unroller<Derived, Dynamic>
{
inline static bool run(const Derived &) { return false; }
};
-/** \array_module
- *
- * \returns true if all coefficients are true
- *
- * \addexample CwiseAll \label How to check whether a point is inside a box (using operator< and all())
+} // end namespace internal
+
+/** \returns true if all coefficients are true
*
* Example: \include MatrixBase_all.cpp
* Output: \verbinclude MatrixBase_all.out
*
- * \sa MatrixBase::any(), Cwise::operator<()
+ * \sa any(), Cwise::operator<()
*/
template<typename Derived>
-inline bool MatrixBase<Derived>::all() const
+inline bool DenseBase<Derived>::all() const
{
- const bool unroll = SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost)
- <= EIGEN_UNROLLING_LIMIT;
+ enum {
+ unroll = SizeAtCompileTime != Dynamic
+ && CoeffReadCost != Dynamic
+ && NumTraits<Scalar>::AddCost != Dynamic
+ && SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
+ };
if(unroll)
- return ei_all_unroller<Derived,
+ return internal::all_unroller<Derived,
unroll ? int(SizeAtCompileTime) : Dynamic
>::run(derived());
else
{
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i < rows(); ++i)
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = 0; i < rows(); ++i)
if (!coeff(i, j)) return false;
return true;
}
}
-/** \array_module
- *
- * \returns true if at least one coefficient is true
+/** \returns true if at least one coefficient is true
*
- * \sa MatrixBase::all()
+ * \sa all()
*/
template<typename Derived>
-inline bool MatrixBase<Derived>::any() const
+inline bool DenseBase<Derived>::any() const
{
- const bool unroll = SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost)
- <= EIGEN_UNROLLING_LIMIT;
+ enum {
+ unroll = SizeAtCompileTime != Dynamic
+ && CoeffReadCost != Dynamic
+ && NumTraits<Scalar>::AddCost != Dynamic
+ && SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
+ };
if(unroll)
- return ei_any_unroller<Derived,
+ return internal::any_unroller<Derived,
unroll ? int(SizeAtCompileTime) : Dynamic
>::run(derived());
else
{
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i < rows(); ++i)
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = 0; i < rows(); ++i)
if (coeff(i, j)) return true;
return false;
}
}
-/** \array_module
- *
- * \returns the number of coefficients which evaluate to true
+/** \returns the number of coefficients which evaluate to true
*
- * \sa MatrixBase::all(), MatrixBase::any()
+ * \sa all(), any()
*/
template<typename Derived>
-inline int MatrixBase<Derived>::count() const
+inline typename DenseBase<Derived>::Index DenseBase<Derived>::count() const
{
- return this->cast<bool>().cast<int>().sum();
+ return derived().template cast<bool>().template cast<Index>().sum();
}
#endif // EIGEN_ALLANDANY_H
diff --git a/extern/Eigen2/Eigen/src/Core/CommaInitializer.h b/extern/Eigen3/Eigen/src/Core/CommaInitializer.h
index f66cbd6d5e1..92422bf2fa0 100644
--- a/extern/Eigen2/Eigen/src/Core/CommaInitializer.h
+++ b/extern/Eigen3/Eigen/src/Core/CommaInitializer.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
@@ -27,6 +27,7 @@
#define EIGEN_COMMAINITIALIZER_H
/** \class CommaInitializer
+ * \ingroup Core_Module
*
* \brief Helper class used by the comma initializer operator
*
@@ -36,70 +37,72 @@
*
* \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
*/
-template<typename MatrixType>
+template<typename XprType>
struct CommaInitializer
{
- typedef typename ei_traits<MatrixType>::Scalar Scalar;
- inline CommaInitializer(MatrixType& mat, const Scalar& s)
- : m_matrix(mat), m_row(0), m_col(1), m_currentBlockRows(1)
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::Index Index;
+
+ inline CommaInitializer(XprType& xpr, const Scalar& s)
+ : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
{
- m_matrix.coeffRef(0,0) = s;
+ m_xpr.coeffRef(0,0) = s;
}
template<typename OtherDerived>
- inline CommaInitializer(MatrixType& mat, const MatrixBase<OtherDerived>& other)
- : m_matrix(mat), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
+ inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
+ : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
{
- m_matrix.block(0, 0, other.rows(), other.cols()) = other;
+ m_xpr.block(0, 0, other.rows(), other.cols()) = other;
}
/* inserts a scalar value in the target matrix */
CommaInitializer& operator,(const Scalar& s)
{
- if (m_col==m_matrix.cols())
+ if (m_col==m_xpr.cols())
{
m_row+=m_currentBlockRows;
m_col = 0;
m_currentBlockRows = 1;
- ei_assert(m_row<m_matrix.rows()
+ eigen_assert(m_row<m_xpr.rows()
&& "Too many rows passed to comma initializer (operator<<)");
}
- ei_assert(m_col<m_matrix.cols()
+ eigen_assert(m_col<m_xpr.cols()
&& "Too many coefficients passed to comma initializer (operator<<)");
- ei_assert(m_currentBlockRows==1);
- m_matrix.coeffRef(m_row, m_col++) = s;
+ eigen_assert(m_currentBlockRows==1);
+ m_xpr.coeffRef(m_row, m_col++) = s;
return *this;
}
/* inserts a matrix expression in the target matrix */
template<typename OtherDerived>
- CommaInitializer& operator,(const MatrixBase<OtherDerived>& other)
+ CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
{
- if (m_col==m_matrix.cols())
+ if (m_col==m_xpr.cols())
{
m_row+=m_currentBlockRows;
m_col = 0;
m_currentBlockRows = other.rows();
- ei_assert(m_row+m_currentBlockRows<=m_matrix.rows()
+ eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
&& "Too many rows passed to comma initializer (operator<<)");
}
- ei_assert(m_col<m_matrix.cols()
+ eigen_assert(m_col<m_xpr.cols()
&& "Too many coefficients passed to comma initializer (operator<<)");
- ei_assert(m_currentBlockRows==other.rows());
+ eigen_assert(m_currentBlockRows==other.rows());
if (OtherDerived::SizeAtCompileTime != Dynamic)
- m_matrix.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
+ m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
(m_row, m_col) = other;
else
- m_matrix.block(m_row, m_col, other.rows(), other.cols()) = other;
+ m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other;
m_col += other.cols();
return *this;
}
inline ~CommaInitializer()
{
- ei_assert((m_row+m_currentBlockRows) == m_matrix.rows()
- && m_col == m_matrix.cols()
+ eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows()
+ && m_col == m_xpr.cols()
&& "Too few coefficients passed to comma initializer (operator<<)");
}
@@ -110,15 +113,12 @@ struct CommaInitializer
* quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
* \endcode
*/
- inline MatrixType& finished() { return m_matrix; }
-
- MatrixType& m_matrix; // target matrix
- int m_row; // current row id
- int m_col; // current col id
- int m_currentBlockRows; // current block height
+ inline XprType& finished() { return m_xpr; }
-private:
- CommaInitializer& operator=(const CommaInitializer&);
+ XprType& m_xpr; // target expression
+ Index m_row; // current row id
+ Index m_col; // current col id
+ Index m_currentBlockRows; // current block height
};
/** \anchor MatrixBaseCommaInitRef
@@ -127,15 +127,13 @@ private:
* The coefficients must be provided in a row major order and exactly match
* the size of the matrix. Otherwise an assertion is raised.
*
- * \addexample CommaInit \label How to easily set all the coefficients of a matrix
- *
* Example: \include MatrixBase_set.cpp
* Output: \verbinclude MatrixBase_set.out
*
* \sa CommaInitializer::finished(), class CommaInitializer
*/
template<typename Derived>
-inline CommaInitializer<Derived> MatrixBase<Derived>::operator<< (const Scalar& s)
+inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
{
return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
}
@@ -144,7 +142,7 @@ inline CommaInitializer<Derived> MatrixBase<Derived>::operator<< (const Scalar&
template<typename Derived>
template<typename OtherDerived>
inline CommaInitializer<Derived>
-MatrixBase<Derived>::operator<<(const MatrixBase<OtherDerived>& other)
+DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
{
return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
}
diff --git a/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h b/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h
new file mode 100644
index 00000000000..7386b2e1843
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h
@@ -0,0 +1,240 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CWISE_BINARY_OP_H
+#define EIGEN_CWISE_BINARY_OP_H
+
+/** \class CwiseBinaryOp
+ * \ingroup Core_Module
+ *
+ * \brief Generic expression where a coefficient-wise binary operator is applied to two expressions
+ *
+ * \param BinaryOp template functor implementing the operator
+ * \param Lhs the type of the left-hand side
+ * \param Rhs the type of the right-hand side
+ *
+ * This class represents an expression where a coefficient-wise binary operator is applied to two expressions.
+ * It is the return type of binary operators, by which we mean only those binary operators where
+ * both the left-hand side and the right-hand side are Eigen expressions.
+ * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp.
+ *
+ * Most of the time, this is the only way that it is used, so you typically don't have to name
+ * CwiseBinaryOp types explicitly.
+ *
+ * \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
+ */
+
+namespace internal {
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+ // we must not inherit from traits<Lhs> since it has
+ // the potential to cause problems with MSVC
+ typedef typename remove_all<Lhs>::type Ancestor;
+ typedef typename traits<Ancestor>::XprKind XprKind;
+ enum {
+ RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,
+ ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,
+ MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime
+ };
+
+ // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor),
+ // we still want to handle the case when the result type is different.
+ typedef typename result_of<
+ BinaryOp(
+ typename Lhs::Scalar,
+ typename Rhs::Scalar
+ )
+ >::type Scalar;
+ typedef typename promote_storage_type<typename traits<Lhs>::StorageKind,
+ typename traits<Rhs>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+ typedef typename remove_reference<LhsNested>::type _LhsNested;
+ typedef typename remove_reference<RhsNested>::type _RhsNested;
+ enum {
+ LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+ RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+ LhsFlags = _LhsNested::Flags,
+ RhsFlags = _RhsNested::Flags,
+ SameType = is_same<typename _LhsNested::Scalar,typename _RhsNested::Scalar>::value,
+ StorageOrdersAgree = (int(Lhs::Flags)&RowMajorBit)==(int(Rhs::Flags)&RowMajorBit),
+ Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
+ HereditaryBits
+ | (int(LhsFlags) & int(RhsFlags) &
+ ( AlignedBit
+ | (StorageOrdersAgree ? LinearAccessBit : 0)
+ | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
+ )
+ )
+ ),
+ Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
+ CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits<BinaryOp>::Cost
+ };
+};
+} // end namespace internal
+
+// we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor
+// that would take two operands of different types. If there were such an example, then this check should be
+// moved to the BinaryOp functors, on a per-case basis. This would however require a change in the BinaryOp functors, as
+// currently they take only one typename Scalar template parameter.
+// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
+// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
+// add together a float matrix and a double matrix.
+#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
+ EIGEN_STATIC_ASSERT((internal::functor_allows_mixing_real_and_complex<BINOP>::ret \
+ ? int(internal::is_same<typename NumTraits<LHS>::Real, typename NumTraits<RHS>::Real>::value) \
+ : int(internal::is_same<LHS, RHS>::value)), \
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>
+class CwiseBinaryOpImpl;
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOp : internal::no_assignment_operator,
+ public CwiseBinaryOpImpl<
+ BinaryOp, Lhs, Rhs,
+ typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
+ typename internal::traits<Rhs>::StorageKind>::ret>
+{
+ public:
+
+ typedef typename CwiseBinaryOpImpl<
+ BinaryOp, Lhs, Rhs,
+ typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
+ typename internal::traits<Rhs>::StorageKind>::ret>::Base Base;
+ EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)
+
+ typedef typename internal::nested<Lhs>::type LhsNested;
+ typedef typename internal::nested<Rhs>::type RhsNested;
+ typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
+ typedef typename internal::remove_reference<RhsNested>::type _RhsNested;
+
+ EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp())
+ : m_lhs(lhs), m_rhs(rhs), m_functor(func)
+ {
+ EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar);
+ // require the sizes to match
+ EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
+ eigen_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols());
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const {
+ // return the fixed size type if available to enable compile time optimizations
+ if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic)
+ return m_rhs.rows();
+ else
+ return m_lhs.rows();
+ }
+ EIGEN_STRONG_INLINE Index cols() const {
+ // return the fixed size type if available to enable compile time optimizations
+ if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic)
+ return m_rhs.cols();
+ else
+ return m_lhs.cols();
+ }
+
+ /** \returns the left hand side nested expression */
+ const _LhsNested& lhs() const { return m_lhs; }
+ /** \returns the right hand side nested expression */
+ const _RhsNested& rhs() const { return m_rhs; }
+ /** \returns the functor representing the binary operation */
+ const BinaryOp& functor() const { return m_functor; }
+
+ protected:
+ const LhsNested m_lhs;
+ const RhsNested m_rhs;
+ const BinaryOp m_functor;
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Dense>
+ : public internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type
+{
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
+ public:
+
+ typedef typename internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
+
+ EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
+ {
+ return derived().functor()(derived().lhs().coeff(row, col),
+ derived().rhs().coeff(row, col));
+ }
+
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+ {
+ return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(row, col),
+ derived().rhs().template packet<LoadMode>(row, col));
+ }
+
+ EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+ {
+ return derived().functor()(derived().lhs().coeff(index),
+ derived().rhs().coeff(index));
+ }
+
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+ {
+ return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(index),
+ derived().rhs().template packet<LoadMode>(index));
+ }
+};
+
+/** replaces \c *this by \c *this - \a other.
+ *
+ * \returns a reference to \c *this
+ */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
+{
+ SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, Derived, OtherDerived> tmp(derived());
+ tmp = other.derived();
+ return derived();
+}
+
+/** replaces \c *this by \c *this + \a other.
+ *
+ * \returns a reference to \c *this
+ */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
+{
+ SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived> tmp(derived());
+ tmp = other.derived();
+ return derived();
+}
+
+#endif // EIGEN_CWISE_BINARY_OP_H
diff --git a/extern/Eigen2/Eigen/src/Core/CwiseNullaryOp.h b/extern/Eigen3/Eigen/src/Core/CwiseNullaryOp.h
index 4ee5b58afec..c616e7ae13d 100644
--- a/extern/Eigen2/Eigen/src/Core/CwiseNullaryOp.h
+++ b/extern/Eigen3/Eigen/src/Core/CwiseNullaryOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -26,81 +26,84 @@
#define EIGEN_CWISE_NULLARY_OP_H
/** \class CwiseNullaryOp
+ * \ingroup Core_Module
*
* \brief Generic expression of a matrix where all coefficients are defined by a functor
*
* \param NullaryOp template functor implementing the operator
+ * \param PlainObjectType the underlying plain matrix/array type
*
* This class represents an expression of a generic nullary operator.
- * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() functions,
+ * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods,
* and most of the time this is the only way it is used.
*
* However, if you want to write a function returning such an expression, you
* will need to use this class.
*
- * \sa class CwiseUnaryOp, class CwiseBinaryOp, MatrixBase::NullaryExpr()
+ * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr()
*/
-template<typename NullaryOp, typename MatrixType>
-struct ei_traits<CwiseNullaryOp<NullaryOp, MatrixType> > : ei_traits<MatrixType>
+
+namespace internal {
+template<typename NullaryOp, typename PlainObjectType>
+struct traits<CwiseNullaryOp<NullaryOp, PlainObjectType> > : traits<PlainObjectType>
{
enum {
- Flags = (ei_traits<MatrixType>::Flags
+ Flags = (traits<PlainObjectType>::Flags
& ( HereditaryBits
- | (ei_functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
- | (ei_functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
- | (ei_functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
- CoeffReadCost = ei_functor_traits<NullaryOp>::Cost
+ | (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
+ | (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
+ | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
+ CoeffReadCost = functor_traits<NullaryOp>::Cost
};
};
+}
-template<typename NullaryOp, typename MatrixType>
-class CwiseNullaryOp : ei_no_assignment_operator,
- public MatrixBase<CwiseNullaryOp<NullaryOp, MatrixType> >
+template<typename NullaryOp, typename PlainObjectType>
+class CwiseNullaryOp : internal::no_assignment_operator,
+ public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp, PlainObjectType> >::type
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseNullaryOp)
+ typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
- CwiseNullaryOp(int rows, int cols, const NullaryOp& func = NullaryOp())
+ CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp())
: m_rows(rows), m_cols(cols), m_functor(func)
{
- ei_assert(rows > 0
- && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
- && cols > 0
- && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
+ eigen_assert(rows >= 0
+ && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
+ && cols >= 0
+ && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
}
- EIGEN_STRONG_INLINE int rows() const { return m_rows.value(); }
- EIGEN_STRONG_INLINE int cols() const { return m_cols.value(); }
+ EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }
- EIGEN_STRONG_INLINE const Scalar coeff(int rows, int cols) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index rows, Index cols) const
{
return m_functor(rows, cols);
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int, int) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
{
- return m_functor.packetOp();
+ return m_functor.packetOp(row, col);
}
- EIGEN_STRONG_INLINE const Scalar coeff(int index) const
+ EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
{
- if(RowsAtCompileTime == 1)
- return m_functor(0, index);
- else
- return m_functor(index, 0);
+ return m_functor(index);
}
template<int LoadMode>
- EIGEN_STRONG_INLINE PacketScalar packet(int) const
+ EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
{
- return m_functor.packetOp();
+ return m_functor.packetOp(index);
}
protected:
- const ei_int_if_dynamic<RowsAtCompileTime> m_rows;
- const ei_int_if_dynamic<ColsAtCompileTime> m_cols;
+ const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
+ const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
const NullaryOp m_functor;
};
@@ -121,7 +124,7 @@ class CwiseNullaryOp : ei_no_assignment_operator,
template<typename Derived>
template<typename CustomNullaryOp>
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
-MatrixBase<Derived>::NullaryExpr(int rows, int cols, const CustomNullaryOp& func)
+DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func)
{
return CwiseNullaryOp<CustomNullaryOp, Derived>(rows, cols, func);
}
@@ -144,17 +147,16 @@ MatrixBase<Derived>::NullaryExpr(int rows, int cols, const CustomNullaryOp& func
template<typename Derived>
template<typename CustomNullaryOp>
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
-MatrixBase<Derived>::NullaryExpr(int size, const CustomNullaryOp& func)
+DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- ei_assert(IsVectorAtCompileTime);
if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, Derived>(1, size, func);
else return CwiseNullaryOp<CustomNullaryOp, Derived>(size, 1, func);
}
/** \returns an expression of a matrix defined by a custom functor \a func
*
- * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
+ * This variant is only for fixed-size DenseBase types. For dynamic-size types, you
* need to use the variants taking size arguments.
*
* The template parameter \a CustomNullaryOp is the type of the functor.
@@ -164,7 +166,7 @@ MatrixBase<Derived>::NullaryExpr(int size, const CustomNullaryOp& func)
template<typename Derived>
template<typename CustomNullaryOp>
EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
-MatrixBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
+DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
{
return CwiseNullaryOp<CustomNullaryOp, Derived>(RowsAtCompileTime, ColsAtCompileTime, func);
}
@@ -172,7 +174,7 @@ MatrixBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
/** \returns an expression of a constant matrix of value \a value
*
* The parameters \a rows and \a cols are the number of rows and of columns of
- * the returned matrix. Must be compatible with this MatrixBase type.
+ * the returned matrix. Must be compatible with this DenseBase type.
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
* it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
@@ -183,16 +185,16 @@ MatrixBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
* \sa class CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ConstantReturnType
-MatrixBase<Derived>::Constant(int rows, int cols, const Scalar& value)
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)
{
- return NullaryExpr(rows, cols, ei_scalar_constant_op<Scalar>(value));
+ return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value));
}
/** \returns an expression of a constant matrix of value \a value
*
* The parameter \a size is the size of the returned vector.
- * Must be compatible with this MatrixBase type.
+ * Must be compatible with this DenseBase type.
*
* \only_for_vectors
*
@@ -205,15 +207,15 @@ MatrixBase<Derived>::Constant(int rows, int cols, const Scalar& value)
* \sa class CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ConstantReturnType
-MatrixBase<Derived>::Constant(int size, const Scalar& value)
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Constant(Index size, const Scalar& value)
{
- return NullaryExpr(size, ei_scalar_constant_op<Scalar>(value));
+ return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value));
}
/** \returns an expression of a constant matrix of value \a value
*
- * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
+ * This variant is only for fixed-size DenseBase types. For dynamic-size types, you
* need to use the variants taking size arguments.
*
* The template parameter \a CustomNullaryOp is the type of the functor.
@@ -221,21 +223,90 @@ MatrixBase<Derived>::Constant(int size, const Scalar& value)
* \sa class CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ConstantReturnType
-MatrixBase<Derived>::Constant(const Scalar& value)
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Constant(const Scalar& value)
+{
+ EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+ return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op<Scalar>(value));
+}
+
+/**
+ * \brief Sets a linearly space vector.
+ *
+ * The function generates 'size' equally spaced values in the closed interval [low,high].
+ * This particular version of LinSpaced() uses sequential access, i.e. vector access is
+ * assumed to be a(0), a(1), ..., a(size). This assumption allows for better vectorization
+ * and yields faster code than the random access version.
+ *
+ * \only_for_vectors
+ *
+ * Example: \include DenseBase_LinSpaced_seq.cpp
+ * Output: \verbinclude DenseBase_LinSpaced_seq.out
+ *
+ * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Index,Scalar,Scalar), CwiseNullaryOp
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,false>(low,high,size));
+}
+
+/**
+ * \copydoc DenseBase::LinSpaced(Sequential_t, Index, const Scalar&, const Scalar&)
+ * Special version for fixed size types which does not require the size parameter.
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+ return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,false>(low,high,Derived::SizeAtCompileTime));
+}
+
+/**
+ * \brief Sets a linearly space vector.
+ *
+ * The function generates 'size' equally spaced values in the closed interval [low,high].
+ *
+ * \only_for_vectors
+ *
+ * Example: \include DenseBase_LinSpaced.cpp
+ * Output: \verbinclude DenseBase_LinSpaced.out
+ *
+ * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Sequential_t,Index,const Scalar&,const Scalar&,Index), CwiseNullaryOp
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,true>(low,high,size));
+}
+
+/**
+ * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&)
+ * Special version for fixed size types which does not require the size parameter.
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
- return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_constant_op<Scalar>(value));
+ return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,true>(low,high,Derived::SizeAtCompileTime));
}
/** \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
template<typename Derived>
-bool MatrixBase<Derived>::isApproxToConstant
+bool DenseBase<Derived>::isApproxToConstant
(const Scalar& value, RealScalar prec) const
{
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i < rows(); ++i)
- if(!ei_isApprox(coeff(i, j), value, prec))
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = 0; i < rows(); ++i)
+ if(!internal::isApprox(this->coeff(i, j), value, prec))
return false;
return true;
}
@@ -244,7 +315,7 @@ bool MatrixBase<Derived>::isApproxToConstant
*
* \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
template<typename Derived>
-bool MatrixBase<Derived>::isConstant
+bool DenseBase<Derived>::isConstant
(const Scalar& value, RealScalar prec) const
{
return isApproxToConstant(value, prec);
@@ -255,17 +326,17 @@ bool MatrixBase<Derived>::isConstant
* \sa setConstant(), Constant(), class CwiseNullaryOp
*/
template<typename Derived>
-EIGEN_STRONG_INLINE void MatrixBase<Derived>::fill(const Scalar& value)
+EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& value)
{
setConstant(value);
}
/** Sets all coefficients in this expression to \a value.
*
- * \sa fill(), setConstant(int,const Scalar&), setConstant(int,int,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
+ * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setConstant(const Scalar& value)
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& value)
{
return derived() = Constant(rows(), cols(), value);
}
@@ -274,14 +345,14 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setConstant(const Scalar& valu
*
* \only_for_vectors
*
- * Example: \include Matrix_set_int.cpp
+ * Example: \include Matrix_setConstant_int.cpp
* Output: \verbinclude Matrix_setConstant_int.out
*
- * \sa MatrixBase::setConstant(const Scalar&), setConstant(int,int,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
+ * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
*/
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>&
-Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setConstant(int size, const Scalar& value)
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setConstant(Index size, const Scalar& value)
{
resize(size);
return setConstant(value);
@@ -291,20 +362,39 @@ Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setConstant(int siz
*
* \param rows the new number of rows
* \param cols the new number of columns
+ * \param value the value to which all coefficients are set
*
* Example: \include Matrix_setConstant_int_int.cpp
* Output: \verbinclude Matrix_setConstant_int_int.out
*
- * \sa MatrixBase::setConstant(const Scalar&), setConstant(int,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
+ * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
*/
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>&
-Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setConstant(int rows, int cols, const Scalar& value)
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& value)
{
resize(rows, cols);
return setConstant(value);
}
+/**
+ * \brief Sets a linearly space vector.
+ *
+ * The function generates 'size' equally spaced values in the closed interval [low,high].
+ *
+ * \only_for_vectors
+ *
+ * Example: \include DenseBase_setLinSpaced.cpp
+ * Output: \verbinclude DenseBase_setLinSpaced.out
+ *
+ * \sa CwiseNullaryOp
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index size, const Scalar& low, const Scalar& high)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return derived() = Derived::NullaryExpr(size, internal::linspaced_op<Scalar,false>(low,high,size));
+}
// zero:
@@ -317,16 +407,14 @@ Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setConstant(int row
* it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
* instead.
*
- * \addexample Zero \label How to take get a zero matrix
- *
* Example: \include MatrixBase_zero_int_int.cpp
* Output: \verbinclude MatrixBase_zero_int_int.out
*
- * \sa Zero(), Zero(int)
+ * \sa Zero(), Zero(Index)
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ConstantReturnType
-MatrixBase<Derived>::Zero(int rows, int cols)
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Zero(Index rows, Index cols)
{
return Constant(rows, cols, Scalar(0));
}
@@ -345,11 +433,11 @@ MatrixBase<Derived>::Zero(int rows, int cols)
* Example: \include MatrixBase_zero_int.cpp
* Output: \verbinclude MatrixBase_zero_int.out
*
- * \sa Zero(), Zero(int,int)
+ * \sa Zero(), Zero(Index,Index)
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ConstantReturnType
-MatrixBase<Derived>::Zero(int size)
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Zero(Index size)
{
return Constant(size, Scalar(0));
}
@@ -362,11 +450,11 @@ MatrixBase<Derived>::Zero(int size)
* Example: \include MatrixBase_zero.cpp
* Output: \verbinclude MatrixBase_zero.out
*
- * \sa Zero(int), Zero(int,int)
+ * \sa Zero(Index), Zero(Index,Index)
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ConstantReturnType
-MatrixBase<Derived>::Zero()
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Zero()
{
return Constant(Scalar(0));
}
@@ -380,11 +468,11 @@ MatrixBase<Derived>::Zero()
* \sa class CwiseNullaryOp, Zero()
*/
template<typename Derived>
-bool MatrixBase<Derived>::isZero(RealScalar prec) const
+bool DenseBase<Derived>::isZero(RealScalar prec) const
{
- for(int j = 0; j < cols(); ++j)
- for(int i = 0; i < rows(); ++i)
- if(!ei_isMuchSmallerThan(coeff(i, j), static_cast<Scalar>(1), prec))
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = 0; i < rows(); ++i)
+ if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast<Scalar>(1), prec))
return false;
return true;
}
@@ -397,7 +485,7 @@ bool MatrixBase<Derived>::isZero(RealScalar prec) const
* \sa class CwiseNullaryOp, Zero()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setZero()
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
{
return setConstant(Scalar(0));
}
@@ -409,11 +497,11 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setZero()
* Example: \include Matrix_setZero_int.cpp
* Output: \verbinclude Matrix_setZero_int.out
*
- * \sa MatrixBase::setZero(), setZero(int,int), class CwiseNullaryOp, MatrixBase::Zero()
+ * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
*/
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>&
-Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setZero(int size)
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setZero(Index size)
{
resize(size);
return setConstant(Scalar(0));
@@ -427,11 +515,11 @@ Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setZero(int size)
* Example: \include Matrix_setZero_int_int.cpp
* Output: \verbinclude Matrix_setZero_int_int.out
*
- * \sa MatrixBase::setZero(), setZero(int), class CwiseNullaryOp, MatrixBase::Zero()
+ * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
*/
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>&
-Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setZero(int rows, int cols)
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setZero(Index rows, Index cols)
{
resize(rows, cols);
return setConstant(Scalar(0));
@@ -448,16 +536,14 @@ Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setZero(int rows, i
* it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used
* instead.
*
- * \addexample One \label How to get a matrix with all coefficients equal one
- *
* Example: \include MatrixBase_ones_int_int.cpp
* Output: \verbinclude MatrixBase_ones_int_int.out
*
- * \sa Ones(), Ones(int), isOnes(), class Ones
+ * \sa Ones(), Ones(Index), isOnes(), class Ones
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ConstantReturnType
-MatrixBase<Derived>::Ones(int rows, int cols)
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Ones(Index rows, Index cols)
{
return Constant(rows, cols, Scalar(1));
}
@@ -476,11 +562,11 @@ MatrixBase<Derived>::Ones(int rows, int cols)
* Example: \include MatrixBase_ones_int.cpp
* Output: \verbinclude MatrixBase_ones_int.out
*
- * \sa Ones(), Ones(int,int), isOnes(), class Ones
+ * \sa Ones(), Ones(Index,Index), isOnes(), class Ones
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ConstantReturnType
-MatrixBase<Derived>::Ones(int size)
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Ones(Index size)
{
return Constant(size, Scalar(1));
}
@@ -493,11 +579,11 @@ MatrixBase<Derived>::Ones(int size)
* Example: \include MatrixBase_ones.cpp
* Output: \verbinclude MatrixBase_ones.out
*
- * \sa Ones(int), Ones(int,int), isOnes(), class Ones
+ * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::ConstantReturnType
-MatrixBase<Derived>::Ones()
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Ones()
{
return Constant(Scalar(1));
}
@@ -511,7 +597,7 @@ MatrixBase<Derived>::Ones()
* \sa class CwiseNullaryOp, Ones()
*/
template<typename Derived>
-bool MatrixBase<Derived>::isOnes
+bool DenseBase<Derived>::isOnes
(RealScalar prec) const
{
return isApproxToConstant(Scalar(1), prec);
@@ -525,7 +611,7 @@ bool MatrixBase<Derived>::isOnes
* \sa class CwiseNullaryOp, Ones()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setOnes()
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
{
return setConstant(Scalar(1));
}
@@ -537,11 +623,11 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setOnes()
* Example: \include Matrix_setOnes_int.cpp
* Output: \verbinclude Matrix_setOnes_int.out
*
- * \sa MatrixBase::setOnes(), setOnes(int,int), class CwiseNullaryOp, MatrixBase::Ones()
+ * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
*/
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>&
-Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setOnes(int size)
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setOnes(Index size)
{
resize(size);
return setConstant(Scalar(1));
@@ -555,11 +641,11 @@ Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setOnes(int size)
* Example: \include Matrix_setOnes_int_int.cpp
* Output: \verbinclude Matrix_setOnes_int_int.out
*
- * \sa MatrixBase::setOnes(), setOnes(int), class CwiseNullaryOp, MatrixBase::Ones()
+ * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
*/
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>&
-Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setOnes(int rows, int cols)
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setOnes(Index rows, Index cols)
{
resize(rows, cols);
return setConstant(Scalar(1));
@@ -576,8 +662,6 @@ Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setOnes(int rows, i
* it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used
* instead.
*
- * \addexample Identity \label How to get an identity matrix
- *
* Example: \include MatrixBase_identity_int_int.cpp
* Output: \verbinclude MatrixBase_identity_int_int.out
*
@@ -585,9 +669,9 @@ Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setOnes(int rows, i
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
-MatrixBase<Derived>::Identity(int rows, int cols)
+MatrixBase<Derived>::Identity(Index rows, Index cols)
{
- return NullaryExpr(rows, cols, ei_scalar_identity_op<Scalar>());
+ return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_identity_op<Scalar>());
}
/** \returns an expression of the identity matrix (not necessarily square).
@@ -598,14 +682,14 @@ MatrixBase<Derived>::Identity(int rows, int cols)
* Example: \include MatrixBase_identity.cpp
* Output: \verbinclude MatrixBase_identity.out
*
- * \sa Identity(int,int), setIdentity(), isIdentity()
+ * \sa Identity(Index,Index), setIdentity(), isIdentity()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
MatrixBase<Derived>::Identity()
{
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
- return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_identity_op<Scalar>());
+ return MatrixBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op<Scalar>());
}
/** \returns true if *this is approximately equal to the identity matrix
@@ -615,24 +699,24 @@ MatrixBase<Derived>::Identity()
* Example: \include MatrixBase_isIdentity.cpp
* Output: \verbinclude MatrixBase_isIdentity.out
*
- * \sa class CwiseNullaryOp, Identity(), Identity(int,int), setIdentity()
+ * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity()
*/
template<typename Derived>
bool MatrixBase<Derived>::isIdentity
(RealScalar prec) const
{
- for(int j = 0; j < cols(); ++j)
+ for(Index j = 0; j < cols(); ++j)
{
- for(int i = 0; i < rows(); ++i)
+ for(Index i = 0; i < rows(); ++i)
{
if(i == j)
{
- if(!ei_isApprox(coeff(i, j), static_cast<Scalar>(1), prec))
+ if(!internal::isApprox(this->coeff(i, j), static_cast<Scalar>(1), prec))
return false;
}
else
{
- if(!ei_isMuchSmallerThan(coeff(i, j), static_cast<RealScalar>(1), prec))
+ if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast<RealScalar>(1), prec))
return false;
}
}
@@ -640,8 +724,10 @@ bool MatrixBase<Derived>::isIdentity
return true;
}
+namespace internal {
+
template<typename Derived, bool Big = (Derived::SizeAtCompileTime>=16)>
-struct ei_setIdentity_impl
+struct setIdentity_impl
{
static EIGEN_STRONG_INLINE Derived& run(Derived& m)
{
@@ -650,31 +736,34 @@ struct ei_setIdentity_impl
};
template<typename Derived>
-struct ei_setIdentity_impl<Derived, true>
+struct setIdentity_impl<Derived, true>
{
+ typedef typename Derived::Index Index;
static EIGEN_STRONG_INLINE Derived& run(Derived& m)
{
m.setZero();
- const int size = std::min(m.rows(), m.cols());
- for(int i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
+ const Index size = (std::min)(m.rows(), m.cols());
+ for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
return m;
}
};
+} // end namespace internal
+
/** Writes the identity expression (not necessarily square) into *this.
*
* Example: \include MatrixBase_setIdentity.cpp
* Output: \verbinclude MatrixBase_setIdentity.out
*
- * \sa class CwiseNullaryOp, Identity(), Identity(int,int), isIdentity()
+ * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
*/
template<typename Derived>
EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
{
- return ei_setIdentity_impl<Derived>::run(derived());
+ return internal::setIdentity_impl<Derived>::run(derived());
}
-/** Resizes to the given size, and writes the identity expression (not necessarily square) into *this.
+/** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this.
*
* \param rows the new number of rows
* \param cols the new number of columns
@@ -684,11 +773,10 @@ EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
*
* \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
*/
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>&
-Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setIdentity(int rows, int cols)
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols)
{
- resize(rows, cols);
+ derived().resize(rows, cols);
return setIdentity();
}
@@ -696,10 +784,10 @@ Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setIdentity(int row
*
* \only_for_vectors
*
- * \sa MatrixBase::Unit(int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(int size, int i)
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index size, Index i)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return BasisReturnType(SquareMatrixType::Identity(size,size), i);
@@ -711,10 +799,10 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
*
* This variant is for fixed-size vector only.
*
- * \sa MatrixBase::Unit(int,int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
-EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(int i)
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i)
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
return BasisReturnType(SquareMatrixType::Identity(),i);
@@ -724,7 +812,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
*
* \only_for_vectors
*
- * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()
@@ -734,7 +822,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
*
* \only_for_vectors
*
- * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()
@@ -744,7 +832,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
*
* \only_for_vectors
*
- * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()
@@ -754,7 +842,7 @@ EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBa
*
* \only_for_vectors
*
- * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+ * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
*/
template<typename Derived>
EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()
diff --git a/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h b/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h
new file mode 100644
index 00000000000..958571d64bf
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h
@@ -0,0 +1,137 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CWISE_UNARY_OP_H
+#define EIGEN_CWISE_UNARY_OP_H
+
+/** \class CwiseUnaryOp
+ * \ingroup Core_Module
+ *
+ * \brief Generic expression where a coefficient-wise unary operator is applied to an expression
+ *
+ * \param UnaryOp template functor implementing the operator
+ * \param XprType the type of the expression to which we are applying the unary operator
+ *
+ * This class represents an expression where a unary operator is applied to an expression.
+ * It is the return type of all operations taking exactly 1 input expression, regardless of the
+ * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix
+ * is considered unary, because only the right-hand side is an expression, and its
+ * return type is a specialization of CwiseUnaryOp.
+ *
+ * Most of the time, this is the only way that it is used, so you typically don't have to name
+ * CwiseUnaryOp types explicitly.
+ *
+ * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
+ */
+
+namespace internal {
+template<typename UnaryOp, typename XprType>
+struct traits<CwiseUnaryOp<UnaryOp, XprType> >
+ : traits<XprType>
+{
+ typedef typename result_of<
+ UnaryOp(typename XprType::Scalar)
+ >::type Scalar;
+ typedef typename XprType::Nested XprTypeNested;
+ typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
+ enum {
+ Flags = _XprTypeNested::Flags & (
+ HereditaryBits | LinearAccessBit | AlignedBit
+ | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
+ CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits<UnaryOp>::Cost
+ };
+};
+}
+
+template<typename UnaryOp, typename XprType, typename StorageKind>
+class CwiseUnaryOpImpl;
+
+template<typename UnaryOp, typename XprType>
+class CwiseUnaryOp : internal::no_assignment_operator,
+ public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>
+{
+ public:
+
+ typedef typename CwiseUnaryOpImpl<UnaryOp, XprType,typename internal::traits<XprType>::StorageKind>::Base Base;
+ EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)
+
+ inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
+ : m_xpr(xpr), m_functor(func) {}
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); }
+
+ /** \returns the functor representing the unary operation */
+ const UnaryOp& functor() const { return m_functor; }
+
+ /** \returns the nested expression */
+ const typename internal::remove_all<typename XprType::Nested>::type&
+ nestedExpression() const { return m_xpr; }
+
+ /** \returns the nested expression */
+ typename internal::remove_all<typename XprType::Nested>::type&
+ nestedExpression() { return m_xpr.const_cast_derived(); }
+
+ protected:
+ const typename XprType::Nested m_xpr;
+ const UnaryOp m_functor;
+};
+
+// This is the generic implementation for dense storage.
+// It can be used for any expression types implementing the dense concept.
+template<typename UnaryOp, typename XprType>
+class CwiseUnaryOpImpl<UnaryOp,XprType,Dense>
+ : public internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type
+{
+ public:
+
+ typedef CwiseUnaryOp<UnaryOp, XprType> Derived;
+ typedef typename internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
+
+ EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
+ {
+ return derived().functor()(derived().nestedExpression().coeff(row, col));
+ }
+
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+ {
+ return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(row, col));
+ }
+
+ EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+ {
+ return derived().functor()(derived().nestedExpression().coeff(index));
+ }
+
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+ {
+ return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(index));
+ }
+};
+
+#endif // EIGEN_CWISE_UNARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/Core/CwiseUnaryView.h b/extern/Eigen3/Eigen/src/Core/CwiseUnaryView.h
new file mode 100644
index 00000000000..d24ef037314
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/CwiseUnaryView.h
@@ -0,0 +1,148 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CWISE_UNARY_VIEW_H
+#define EIGEN_CWISE_UNARY_VIEW_H
+
+/** \class CwiseUnaryView
+ * \ingroup Core_Module
+ *
+ * \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector
+ *
+ * \param ViewOp template functor implementing the view
+ * \param MatrixType the type of the matrix we are applying the unary operator
+ *
+ * This class represents a lvalue expression of a generic unary view operator of a matrix or a vector.
+ * It is the return type of real() and imag(), and most of the time this is the only way it is used.
+ *
+ * \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp
+ */
+
+namespace internal {
+template<typename ViewOp, typename MatrixType>
+struct traits<CwiseUnaryView<ViewOp, MatrixType> >
+ : traits<MatrixType>
+{
+ typedef typename result_of<
+ ViewOp(typename traits<MatrixType>::Scalar)
+ >::type Scalar;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+ enum {
+ Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)),
+ CoeffReadCost = traits<_MatrixTypeNested>::CoeffReadCost + functor_traits<ViewOp>::Cost,
+ MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret,
+ // need to cast the sizeof's from size_t to int explicitly, otherwise:
+ // "error: no integral type can represent all of the enumerator values
+ InnerStrideAtCompileTime = MatrixTypeInnerStride == Dynamic
+ ? int(Dynamic)
+ : int(MatrixTypeInnerStride)
+ * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)),
+ OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret
+ };
+};
+}
+
+template<typename ViewOp, typename MatrixType, typename StorageKind>
+class CwiseUnaryViewImpl;
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryView : internal::no_assignment_operator,
+ public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind>
+{
+ public:
+
+ typedef typename CwiseUnaryViewImpl<ViewOp, MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
+ EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView)
+
+ inline CwiseUnaryView(const MatrixType& mat, const ViewOp& func = ViewOp())
+ : m_matrix(mat), m_functor(func) {}
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView)
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); }
+
+ /** \returns the functor representing unary operation */
+ const ViewOp& functor() const { return m_functor; }
+
+ /** \returns the nested expression */
+ const typename internal::remove_all<typename MatrixType::Nested>::type&
+ nestedExpression() const { return m_matrix; }
+
+ /** \returns the nested expression */
+ typename internal::remove_all<typename MatrixType::Nested>::type&
+ nestedExpression() { return m_matrix.const_cast_derived(); }
+
+ protected:
+ // FIXME changed from MatrixType::Nested because of a weird compilation error with sun CC
+ const typename internal::nested<MatrixType>::type m_matrix;
+ ViewOp m_functor;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
+ : public internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type
+{
+ public:
+
+ typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
+ typedef typename internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base;
+
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
+
+ inline Index innerStride() const
+ {
+ return derived().nestedExpression().innerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar);
+ }
+
+ inline Index outerStride() const
+ {
+ return derived().nestedExpression().outerStride();
+ }
+
+ EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
+ {
+ return derived().functor()(derived().nestedExpression().coeff(row, col));
+ }
+
+ EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+ {
+ return derived().functor()(derived().nestedExpression().coeff(index));
+ }
+
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
+ {
+ return derived().functor()(const_cast_derived().nestedExpression().coeffRef(row, col));
+ }
+
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
+ {
+ return derived().functor()(const_cast_derived().nestedExpression().coeffRef(index));
+ }
+};
+
+
+
+#endif // EIGEN_CWISE_UNARY_VIEW_H
diff --git a/extern/Eigen3/Eigen/src/Core/DenseBase.h b/extern/Eigen3/Eigen/src/Core/DenseBase.h
new file mode 100644
index 00000000000..838fa40307a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/DenseBase.h
@@ -0,0 +1,543 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DENSEBASE_H
+#define EIGEN_DENSEBASE_H
+
+/** \class DenseBase
+ * \ingroup Core_Module
+ *
+ * \brief Base class for all dense matrices, vectors, and arrays
+ *
+ * This class is the base that is inherited by all dense objects (matrix, vector, arrays,
+ * and related expression types). The common Eigen API for dense objects is contained in this class.
+ *
+ * \tparam Derived is the derived type, e.g., a matrix type or an expression.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN.
+ *
+ * \sa \ref TopicClassHierarchy
+ */
+template<typename Derived> class DenseBase
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ : public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
+ typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>
+#else
+ : public DenseCoeffsBase<Derived>
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+{
+ public:
+ using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
+ typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
+
+ class InnerIterator;
+
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+
+ /** \brief The type of indices
+ * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
+ * \sa \ref TopicPreprocessorDirectives.
+ */
+ typedef typename internal::traits<Derived>::Index Index;
+
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ typedef DenseCoeffsBase<Derived> Base;
+ using Base::derived;
+ using Base::const_cast_derived;
+ using Base::rows;
+ using Base::cols;
+ using Base::size;
+ using Base::rowIndexByOuterInner;
+ using Base::colIndexByOuterInner;
+ using Base::coeff;
+ using Base::coeffByOuterInner;
+ using Base::packet;
+ using Base::packetByOuterInner;
+ using Base::writePacket;
+ using Base::writePacketByOuterInner;
+ using Base::coeffRef;
+ using Base::coeffRefByOuterInner;
+ using Base::copyCoeff;
+ using Base::copyCoeffByOuterInner;
+ using Base::copyPacket;
+ using Base::copyPacketByOuterInner;
+ using Base::operator();
+ using Base::operator[];
+ using Base::x;
+ using Base::y;
+ using Base::z;
+ using Base::w;
+ using Base::stride;
+ using Base::innerStride;
+ using Base::outerStride;
+ using Base::rowStride;
+ using Base::colStride;
+ typedef typename Base::CoeffReturnType CoeffReturnType;
+
+ enum {
+
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+ /**< The number of rows at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ /**< The number of columns at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+ SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+ internal::traits<Derived>::ColsAtCompileTime>::ret),
+ /**< This is equal to the number of coefficients, i.e. the number of
+ * rows times the number of columns, or to \a Dynamic if this is not
+ * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+ MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
+ /**< This value is equal to the maximum possible number of rows that this expression
+ * might have. If this expression might have an arbitrarily high number of rows,
+ * this value is set to \a Dynamic.
+ *
+ * This value is useful to know when evaluating an expression, in order to determine
+ * whether it is possible to avoid doing a dynamic memory allocation.
+ *
+ * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime
+ */
+
+ MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
+ /**< This value is equal to the maximum possible number of columns that this expression
+ * might have. If this expression might have an arbitrarily high number of columns,
+ * this value is set to \a Dynamic.
+ *
+ * This value is useful to know when evaluating an expression, in order to determine
+ * whether it is possible to avoid doing a dynamic memory allocation.
+ *
+ * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime
+ */
+
+ MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
+ internal::traits<Derived>::MaxColsAtCompileTime>::ret),
+ /**< This value is equal to the maximum possible number of coefficients that this expression
+ * might have. If this expression might have an arbitrarily high number of coefficients,
+ * this value is set to \a Dynamic.
+ *
+ * This value is useful to know when evaluating an expression, in order to determine
+ * whether it is possible to avoid doing a dynamic memory allocation.
+ *
+ * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
+ */
+
+ IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1
+ || internal::traits<Derived>::MaxColsAtCompileTime == 1,
+ /**< This is set to true if either the number of rows or the number of
+ * columns is known at compile-time to be equal to 1. Indeed, in that case,
+ * we are dealing with a column-vector (if there is only one column) or with
+ * a row-vector (if there is only one row). */
+
+ Flags = internal::traits<Derived>::Flags,
+ /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+ * constructed from this one. See the \ref flags "list of flags".
+ */
+
+ IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */
+
+ InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? SizeAtCompileTime
+ : int(IsRowMajor) ? ColsAtCompileTime : RowsAtCompileTime,
+
+ CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+ /**< This is a rough measure of how expensive it is to read one coefficient from
+ * this expression.
+ */
+
+ InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret,
+ OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret
+ };
+
+ enum { ThisConstantIsPrivateInPlainObjectBase };
+
+ /** \returns the number of nonzero coefficients which is in practice the number
+ * of stored coefficients. */
+ inline Index nonZeros() const { return size(); }
+ /** \returns true if either the number of rows or the number of columns is equal to 1.
+ * In other words, this function returns
+ * \code rows()==1 || cols()==1 \endcode
+ * \sa rows(), cols(), IsVectorAtCompileTime. */
+
+ /** \returns the outer size.
+ *
+ * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
+ * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a
+ * column-major matrix, and the number of rows for a row-major matrix. */
+ Index outerSize() const
+ {
+ return IsVectorAtCompileTime ? 1
+ : int(IsRowMajor) ? this->rows() : this->cols();
+ }
+
+ /** \returns the inner size.
+ *
+ * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
+ * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a
+ * column-major matrix, and the number of columns for a row-major matrix. */
+ Index innerSize() const
+ {
+ return IsVectorAtCompileTime ? this->size()
+ : int(IsRowMajor) ? this->cols() : this->rows();
+ }
+
+ /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
+ * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
+ * nothing else.
+ */
+ void resize(Index size)
+ {
+ EIGEN_ONLY_USED_FOR_DEBUG(size);
+ eigen_assert(size == this->size()
+ && "DenseBase::resize() does not actually allow to resize.");
+ }
+ /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
+ * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
+ * nothing else.
+ */
+ void resize(Index rows, Index cols)
+ {
+ EIGEN_ONLY_USED_FOR_DEBUG(rows);
+ EIGEN_ONLY_USED_FOR_DEBUG(cols);
+ eigen_assert(rows == this->rows() && cols == this->cols()
+ && "DenseBase::resize() does not actually allow to resize.");
+ }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+ /** \internal Represents a matrix with all coefficients equal to one another*/
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType;
+ /** \internal Represents a vector with linearly spaced coefficients that allows sequential access only. */
+ typedef CwiseNullaryOp<internal::linspaced_op<Scalar,false>,Derived> SequentialLinSpacedReturnType;
+ /** \internal Represents a vector with linearly spaced coefficients that allows random access. */
+ typedef CwiseNullaryOp<internal::linspaced_op<Scalar,true>,Derived> RandomAccessLinSpacedReturnType;
+ /** \internal the return type of MatrixBase::eigenvalues() */
+ typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;
+
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ /** Copies \a other into *this. \returns a reference to *this. */
+ template<typename OtherDerived>
+ Derived& operator=(const DenseBase<OtherDerived>& other);
+
+ /** Special case of the template operator=, in order to prevent the compiler
+ * from generating a default operator= (issue hit with g++ 4.1)
+ */
+ Derived& operator=(const DenseBase& other);
+
+ template<typename OtherDerived>
+ Derived& operator=(const EigenBase<OtherDerived> &other);
+
+ template<typename OtherDerived>
+ Derived& operator+=(const EigenBase<OtherDerived> &other);
+
+ template<typename OtherDerived>
+ Derived& operator-=(const EigenBase<OtherDerived> &other);
+
+ template<typename OtherDerived>
+ Derived& operator=(const ReturnByValue<OtherDerived>& func);
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** Copies \a other into *this without evaluating other. \returns a reference to *this. */
+ template<typename OtherDerived>
+ Derived& lazyAssign(const DenseBase<OtherDerived>& other);
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ CommaInitializer<Derived> operator<< (const Scalar& s);
+
+ template<unsigned int Added,unsigned int Removed>
+ const Flagged<Derived, Added, Removed> flagged() const;
+
+ template<typename OtherDerived>
+ CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other);
+
+ Eigen::Transpose<Derived> transpose();
+ typedef const Transpose<const Derived> ConstTransposeReturnType;
+ ConstTransposeReturnType transpose() const;
+ void transposeInPlace();
+#ifndef EIGEN_NO_DEBUG
+ protected:
+ template<typename OtherDerived>
+ void checkTransposeAliasing(const OtherDerived& other) const;
+ public:
+#endif
+
+ typedef VectorBlock<Derived> SegmentReturnType;
+ typedef const VectorBlock<const Derived> ConstSegmentReturnType;
+ template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };
+ template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };
+
+ // Note: The "DenseBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
+ SegmentReturnType segment(Index start, Index size);
+ typename DenseBase::ConstSegmentReturnType segment(Index start, Index size) const;
+
+ SegmentReturnType head(Index size);
+ typename DenseBase::ConstSegmentReturnType head(Index size) const;
+
+ SegmentReturnType tail(Index size);
+ typename DenseBase::ConstSegmentReturnType tail(Index size) const;
+
+ template<int Size> typename FixedSegmentReturnType<Size>::Type head();
+ template<int Size> typename ConstFixedSegmentReturnType<Size>::Type head() const;
+
+ template<int Size> typename FixedSegmentReturnType<Size>::Type tail();
+ template<int Size> typename ConstFixedSegmentReturnType<Size>::Type tail() const;
+
+ template<int Size> typename FixedSegmentReturnType<Size>::Type segment(Index start);
+ template<int Size> typename ConstFixedSegmentReturnType<Size>::Type segment(Index start) const;
+
+ static const ConstantReturnType
+ Constant(Index rows, Index cols, const Scalar& value);
+ static const ConstantReturnType
+ Constant(Index size, const Scalar& value);
+ static const ConstantReturnType
+ Constant(const Scalar& value);
+
+ static const SequentialLinSpacedReturnType
+ LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high);
+ static const RandomAccessLinSpacedReturnType
+ LinSpaced(Index size, const Scalar& low, const Scalar& high);
+ static const SequentialLinSpacedReturnType
+ LinSpaced(Sequential_t, const Scalar& low, const Scalar& high);
+ static const RandomAccessLinSpacedReturnType
+ LinSpaced(const Scalar& low, const Scalar& high);
+
+ template<typename CustomNullaryOp>
+ static const CwiseNullaryOp<CustomNullaryOp, Derived>
+ NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func);
+ template<typename CustomNullaryOp>
+ static const CwiseNullaryOp<CustomNullaryOp, Derived>
+ NullaryExpr(Index size, const CustomNullaryOp& func);
+ template<typename CustomNullaryOp>
+ static const CwiseNullaryOp<CustomNullaryOp, Derived>
+ NullaryExpr(const CustomNullaryOp& func);
+
+ static const ConstantReturnType Zero(Index rows, Index cols);
+ static const ConstantReturnType Zero(Index size);
+ static const ConstantReturnType Zero();
+ static const ConstantReturnType Ones(Index rows, Index cols);
+ static const ConstantReturnType Ones(Index size);
+ static const ConstantReturnType Ones();
+
+ void fill(const Scalar& value);
+ Derived& setConstant(const Scalar& value);
+ Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high);
+ Derived& setLinSpaced(const Scalar& low, const Scalar& high);
+ Derived& setZero();
+ Derived& setOnes();
+ Derived& setRandom();
+
+ template<typename OtherDerived>
+ bool isApprox(const DenseBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isMuchSmallerThan(const RealScalar& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ template<typename OtherDerived>
+ bool isMuchSmallerThan(const DenseBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+ bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isZero(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isOnes(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+ inline Derived& operator*=(const Scalar& other);
+ inline Derived& operator/=(const Scalar& other);
+
+ /** \returns the matrix or vector obtained by evaluating this expression.
+ *
+ * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+ * a const reference, in order to avoid a useless copy.
+ */
+ EIGEN_STRONG_INLINE const typename internal::eval<Derived>::type eval() const
+ {
+ // Even though MSVC does not honor strong inlining when the return type
+ // is a dynamic matrix, we desperately need strong inlining for fixed
+ // size types on MSVC.
+ return typename internal::eval<Derived>::type(derived());
+ }
+
+ /** swaps *this with the expression \a other.
+ *
+ */
+ template<typename OtherDerived>
+ void swap(const DenseBase<OtherDerived>& other,
+ int = OtherDerived::ThisConstantIsPrivateInPlainObjectBase)
+ {
+ SwapWrapper<Derived>(derived()).lazyAssign(other.derived());
+ }
+
+ /** swaps *this with the matrix or array \a other.
+ *
+ */
+ template<typename OtherDerived>
+ void swap(PlainObjectBase<OtherDerived>& other)
+ {
+ SwapWrapper<Derived>(derived()).lazyAssign(other.derived());
+ }
+
+
+ inline const NestByValue<Derived> nestByValue() const;
+ inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
+ inline ForceAlignedAccess<Derived> forceAlignedAccess();
+ template<bool Enable> inline const typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf() const;
+ template<bool Enable> inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();
+
+ Scalar sum() const;
+ Scalar mean() const;
+ Scalar trace() const;
+
+ Scalar prod() const;
+
+ typename internal::traits<Derived>::Scalar minCoeff() const;
+ typename internal::traits<Derived>::Scalar maxCoeff() const;
+
+ template<typename IndexType>
+ typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const;
+ template<typename IndexType>
+ typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const;
+ template<typename IndexType>
+ typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const;
+ template<typename IndexType>
+ typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const;
+
+ template<typename BinaryOp>
+ typename internal::result_of<BinaryOp(typename internal::traits<Derived>::Scalar)>::type
+ redux(const BinaryOp& func) const;
+
+ template<typename Visitor>
+ void visit(Visitor& func) const;
+
+ inline const WithFormat<Derived> format(const IOFormat& fmt) const;
+
+ /** \returns the unique coefficient of a 1x1 expression */
+ CoeffReturnType value() const
+ {
+ EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+ eigen_assert(this->rows() == 1 && this->cols() == 1);
+ return derived().coeff(0,0);
+ }
+
+/////////// Array module ///////////
+
+ bool all(void) const;
+ bool any(void) const;
+ Index count() const;
+
+ typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType;
+ typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType;
+ typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType;
+ typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType;
+
+ ConstRowwiseReturnType rowwise() const;
+ RowwiseReturnType rowwise();
+ ConstColwiseReturnType colwise() const;
+ ColwiseReturnType colwise();
+
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(Index rows, Index cols);
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(Index size);
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random();
+
+ template<typename ThenDerived,typename ElseDerived>
+ const Select<Derived,ThenDerived,ElseDerived>
+ select(const DenseBase<ThenDerived>& thenMatrix,
+ const DenseBase<ElseDerived>& elseMatrix) const;
+
+ template<typename ThenDerived>
+ inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
+ select(const DenseBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const;
+
+ template<typename ElseDerived>
+ inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
+ select(typename ElseDerived::Scalar thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;
+
+ template<int p> RealScalar lpNorm() const;
+
+ template<int RowFactor, int ColFactor>
+ const Replicate<Derived,RowFactor,ColFactor> replicate() const;
+ const Replicate<Derived,Dynamic,Dynamic> replicate(Index rowFacor,Index colFactor) const;
+
+ typedef Reverse<Derived, BothDirections> ReverseReturnType;
+ typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;
+ ReverseReturnType reverse();
+ ConstReverseReturnType reverse() const;
+ void reverseInPlace();
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase
+# include "../plugins/BlockMethods.h"
+# ifdef EIGEN_DENSEBASE_PLUGIN
+# include EIGEN_DENSEBASE_PLUGIN
+# endif
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+#ifdef EIGEN2_SUPPORT
+
+ Block<Derived> corner(CornerType type, Index cRows, Index cCols);
+ const Block<Derived> corner(CornerType type, Index cRows, Index cCols) const;
+ template<int CRows, int CCols>
+ Block<Derived, CRows, CCols> corner(CornerType type);
+ template<int CRows, int CCols>
+ const Block<Derived, CRows, CCols> corner(CornerType type) const;
+
+#endif // EIGEN2_SUPPORT
+
+
+ // disable the use of evalTo for dense objects with a nice compilation error
+ template<typename Dest> inline void evalTo(Dest& ) const
+ {
+ EIGEN_STATIC_ASSERT((internal::is_same<Dest,void>::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS);
+ }
+
+ protected:
+ /** Default constructor. Do nothing. */
+ DenseBase()
+ {
+ /* Just checks for self-consistency of the flags.
+ * Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down
+ */
+#ifdef EIGEN_INTERNAL_DEBUGGING
+ EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor))
+ && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))),
+ INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION)
+#endif
+ }
+
+ private:
+ explicit DenseBase(int);
+ DenseBase(int,int);
+ template<typename OtherDerived> explicit DenseBase(const DenseBase<OtherDerived>&);
+};
+
+#endif // EIGEN_DENSEBASE_H
diff --git a/extern/Eigen3/Eigen/src/Core/DenseCoeffsBase.h b/extern/Eigen3/Eigen/src/Core/DenseCoeffsBase.h
new file mode 100644
index 00000000000..e45238fb584
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/DenseCoeffsBase.h
@@ -0,0 +1,765 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DENSECOEFFSBASE_H
+#define EIGEN_DENSECOEFFSBASE_H
+
+namespace internal {
+template<typename T> struct add_const_on_value_type_if_arithmetic
+{
+ typedef typename conditional<is_arithmetic<T>::value, T, typename add_const_on_value_type<T>::type>::type type;
+};
+}
+
+/** \brief Base class providing read-only coefficient access to matrices and arrays.
+ * \ingroup Core_Module
+ * \tparam Derived Type of the derived class
+ * \tparam #ReadOnlyAccessors Constant indicating read-only access
+ *
+ * This class defines the \c operator() \c const function and friends, which can be used to read specific
+ * entries of a matrix or array.
+ *
+ * \sa DenseCoeffsBase<Derived, WriteAccessors>, DenseCoeffsBase<Derived, DirectAccessors>,
+ * \ref TopicClassHierarchy
+ */
+template<typename Derived>
+class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
+{
+ public:
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+
+ // Explanation for this CoeffReturnType typedef.
+ // - This is the return type of the coeff() method.
+ // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references
+ // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value).
+ // - The is_artihmetic check is required since "const int", "const double", etc. will cause warnings on some systems
+ // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is
+ // not possible, since the underlying expressions might not offer a valid address the reference could be referring to.
+ typedef typename internal::conditional<bool(internal::traits<Derived>::Flags&LvalueBit),
+ const Scalar&,
+ typename internal::conditional<internal::is_arithmetic<Scalar>::value, Scalar, const Scalar>::type
+ >::type CoeffReturnType;
+
+ typedef typename internal::add_const_on_value_type_if_arithmetic<
+ typename internal::packet_traits<Scalar>::type
+ >::type PacketReturnType;
+
+ typedef EigenBase<Derived> Base;
+ using Base::rows;
+ using Base::cols;
+ using Base::size;
+ using Base::derived;
+
+ EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const
+ {
+ return int(Derived::RowsAtCompileTime) == 1 ? 0
+ : int(Derived::ColsAtCompileTime) == 1 ? inner
+ : int(Derived::Flags)&RowMajorBit ? outer
+ : inner;
+ }
+
+ EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const
+ {
+ return int(Derived::ColsAtCompileTime) == 1 ? 0
+ : int(Derived::RowsAtCompileTime) == 1 ? inner
+ : int(Derived::Flags)&RowMajorBit ? inner
+ : outer;
+ }
+
+ /** Short version: don't use this function, use
+ * \link operator()(Index,Index) const \endlink instead.
+ *
+ * Long version: this function is similar to
+ * \link operator()(Index,Index) const \endlink, but without the assertion.
+ * Use this for limiting the performance cost of debugging code when doing
+ * repeated coefficient access. Only use this when it is guaranteed that the
+ * parameters \a row and \a col are in range.
+ *
+ * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+ * function equivalent to \link operator()(Index,Index) const \endlink.
+ *
+ * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const
+ */
+ EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
+ {
+ eigen_internal_assert(row >= 0 && row < rows()
+ && col >= 0 && col < cols());
+ return derived().coeff(row, col);
+ }
+
+ EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
+ {
+ return coeff(rowIndexByOuterInner(outer, inner),
+ colIndexByOuterInner(outer, inner));
+ }
+
+ /** \returns the coefficient at given the given row and column.
+ *
+ * \sa operator()(Index,Index), operator[](Index)
+ */
+ EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const
+ {
+ eigen_assert(row >= 0 && row < rows()
+ && col >= 0 && col < cols());
+ return derived().coeff(row, col);
+ }
+
+ /** Short version: don't use this function, use
+ * \link operator[](Index) const \endlink instead.
+ *
+ * Long version: this function is similar to
+ * \link operator[](Index) const \endlink, but without the assertion.
+ * Use this for limiting the performance cost of debugging code when doing
+ * repeated coefficient access. Only use this when it is guaranteed that the
+ * parameter \a index is in range.
+ *
+ * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+ * function equivalent to \link operator[](Index) const \endlink.
+ *
+ * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const
+ */
+
+ EIGEN_STRONG_INLINE CoeffReturnType
+ coeff(Index index) const
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return derived().coeff(index);
+ }
+
+
+ /** \returns the coefficient at given index.
+ *
+ * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+ *
+ * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
+ * z() const, w() const
+ */
+
+ EIGEN_STRONG_INLINE CoeffReturnType
+ operator[](Index index) const
+ {
+ #ifndef EIGEN2_SUPPORT
+ EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
+ THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
+ #endif
+ eigen_assert(index >= 0 && index < size());
+ return derived().coeff(index);
+ }
+
+ /** \returns the coefficient at given index.
+ *
+ * This is synonymous to operator[](Index) const.
+ *
+ * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+ *
+ * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
+ * z() const, w() const
+ */
+
+ EIGEN_STRONG_INLINE CoeffReturnType
+ operator()(Index index) const
+ {
+ eigen_assert(index >= 0 && index < size());
+ return derived().coeff(index);
+ }
+
+ /** equivalent to operator[](0). */
+
+ EIGEN_STRONG_INLINE CoeffReturnType
+ x() const { return (*this)[0]; }
+
+ /** equivalent to operator[](1). */
+
+ EIGEN_STRONG_INLINE CoeffReturnType
+ y() const { return (*this)[1]; }
+
+ /** equivalent to operator[](2). */
+
+ EIGEN_STRONG_INLINE CoeffReturnType
+ z() const { return (*this)[2]; }
+
+ /** equivalent to operator[](3). */
+
+ EIGEN_STRONG_INLINE CoeffReturnType
+ w() const { return (*this)[3]; }
+
+ /** \internal
+ * \returns the packet of coefficients starting at the given row and column. It is your responsibility
+ * to ensure that a packet really starts there. This method is only available on expressions having the
+ * PacketAccessBit.
+ *
+ * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
+ * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+ * starting at an address which is a multiple of the packet size.
+ */
+
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const
+ {
+ eigen_internal_assert(row >= 0 && row < rows()
+ && col >= 0 && col < cols());
+ return derived().template packet<LoadMode>(row,col);
+ }
+
+
+ /** \internal */
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const
+ {
+ return packet<LoadMode>(rowIndexByOuterInner(outer, inner),
+ colIndexByOuterInner(outer, inner));
+ }
+
+ /** \internal
+ * \returns the packet of coefficients starting at the given index. It is your responsibility
+ * to ensure that a packet really starts there. This method is only available on expressions having the
+ * PacketAccessBit and the LinearAccessBit.
+ *
+ * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
+ * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+ * starting at an address which is a multiple of the packet size.
+ */
+
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return derived().template packet<LoadMode>(index);
+ }
+
+ protected:
+ // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase.
+ // But some methods are only available in the DirectAccess case.
+ // So we add dummy methods here with these names, so that "using... " doesn't fail.
+ // It's not private so that the child class DenseBase can access them, and it's not public
+ // either since it's an implementation detail, so has to be protected.
+ void coeffRef();
+ void coeffRefByOuterInner();
+ void writePacket();
+ void writePacketByOuterInner();
+ void copyCoeff();
+ void copyCoeffByOuterInner();
+ void copyPacket();
+ void copyPacketByOuterInner();
+ void stride();
+ void innerStride();
+ void outerStride();
+ void rowStride();
+ void colStride();
+};
+
+/** \brief Base class providing read/write coefficient access to matrices and arrays.
+ * \ingroup Core_Module
+ * \tparam Derived Type of the derived class
+ * \tparam #WriteAccessors Constant indicating read/write access
+ *
+ * This class defines the non-const \c operator() function and friends, which can be used to write specific
+ * entries of a matrix or array. This class inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which
+ * defines the const variant for reading specific entries.
+ *
+ * \sa DenseCoeffsBase<Derived, DirectAccessors>, \ref TopicClassHierarchy
+ */
+template<typename Derived>
+class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>
+{
+ public:
+
+ typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
+
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ using Base::coeff;
+ using Base::rows;
+ using Base::cols;
+ using Base::size;
+ using Base::derived;
+ using Base::rowIndexByOuterInner;
+ using Base::colIndexByOuterInner;
+ using Base::operator[];
+ using Base::operator();
+ using Base::x;
+ using Base::y;
+ using Base::z;
+ using Base::w;
+
+ /** Short version: don't use this function, use
+ * \link operator()(Index,Index) \endlink instead.
+ *
+ * Long version: this function is similar to
+ * \link operator()(Index,Index) \endlink, but without the assertion.
+ * Use this for limiting the performance cost of debugging code when doing
+ * repeated coefficient access. Only use this when it is guaranteed that the
+ * parameters \a row and \a col are in range.
+ *
+ * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+ * function equivalent to \link operator()(Index,Index) \endlink.
+ *
+ * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)
+ */
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
+ {
+ eigen_internal_assert(row >= 0 && row < rows()
+ && col >= 0 && col < cols());
+ return derived().coeffRef(row, col);
+ }
+
+ EIGEN_STRONG_INLINE Scalar&
+ coeffRefByOuterInner(Index outer, Index inner)
+ {
+ return coeffRef(rowIndexByOuterInner(outer, inner),
+ colIndexByOuterInner(outer, inner));
+ }
+
+ /** \returns a reference to the coefficient at given the given row and column.
+ *
+ * \sa operator[](Index)
+ */
+
+ EIGEN_STRONG_INLINE Scalar&
+ operator()(Index row, Index col)
+ {
+ eigen_assert(row >= 0 && row < rows()
+ && col >= 0 && col < cols());
+ return derived().coeffRef(row, col);
+ }
+
+
+ /** Short version: don't use this function, use
+ * \link operator[](Index) \endlink instead.
+ *
+ * Long version: this function is similar to
+ * \link operator[](Index) \endlink, but without the assertion.
+ * Use this for limiting the performance cost of debugging code when doing
+ * repeated coefficient access. Only use this when it is guaranteed that the
+ * parameters \a row and \a col are in range.
+ *
+ * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+ * function equivalent to \link operator[](Index) \endlink.
+ *
+ * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)
+ */
+
+ EIGEN_STRONG_INLINE Scalar&
+ coeffRef(Index index)
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ return derived().coeffRef(index);
+ }
+
+ /** \returns a reference to the coefficient at given index.
+ *
+ * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+ *
+ * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
+ */
+
+ EIGEN_STRONG_INLINE Scalar&
+ operator[](Index index)
+ {
+ #ifndef EIGEN2_SUPPORT
+ EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
+ THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
+ #endif
+ eigen_assert(index >= 0 && index < size());
+ return derived().coeffRef(index);
+ }
+
+ /** \returns a reference to the coefficient at given index.
+ *
+ * This is synonymous to operator[](Index).
+ *
+ * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+ *
+ * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
+ */
+
+ EIGEN_STRONG_INLINE Scalar&
+ operator()(Index index)
+ {
+ eigen_assert(index >= 0 && index < size());
+ return derived().coeffRef(index);
+ }
+
+ /** equivalent to operator[](0). */
+
+ EIGEN_STRONG_INLINE Scalar&
+ x() { return (*this)[0]; }
+
+ /** equivalent to operator[](1). */
+
+ EIGEN_STRONG_INLINE Scalar&
+ y() { return (*this)[1]; }
+
+ /** equivalent to operator[](2). */
+
+ EIGEN_STRONG_INLINE Scalar&
+ z() { return (*this)[2]; }
+
+ /** equivalent to operator[](3). */
+
+ EIGEN_STRONG_INLINE Scalar&
+ w() { return (*this)[3]; }
+
+ /** \internal
+ * Stores the given packet of coefficients, at the given row and column of this expression. It is your responsibility
+ * to ensure that a packet really starts there. This method is only available on expressions having the
+ * PacketAccessBit.
+ *
+ * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
+ * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+ * starting at an address which is a multiple of the packet size.
+ */
+
+ template<int StoreMode>
+ EIGEN_STRONG_INLINE void writePacket
+ (Index row, Index col, const typename internal::packet_traits<Scalar>::type& x)
+ {
+ eigen_internal_assert(row >= 0 && row < rows()
+ && col >= 0 && col < cols());
+ derived().template writePacket<StoreMode>(row,col,x);
+ }
+
+
+ /** \internal */
+ template<int StoreMode>
+ EIGEN_STRONG_INLINE void writePacketByOuterInner
+ (Index outer, Index inner, const typename internal::packet_traits<Scalar>::type& x)
+ {
+ writePacket<StoreMode>(rowIndexByOuterInner(outer, inner),
+ colIndexByOuterInner(outer, inner),
+ x);
+ }
+
+ /** \internal
+ * Stores the given packet of coefficients, at the given index in this expression. It is your responsibility
+ * to ensure that a packet really starts there. This method is only available on expressions having the
+ * PacketAccessBit and the LinearAccessBit.
+ *
+ * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
+ * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+ * starting at an address which is a multiple of the packet size.
+ */
+ template<int StoreMode>
+ EIGEN_STRONG_INLINE void writePacket
+ (Index index, const typename internal::packet_traits<Scalar>::type& x)
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ derived().template writePacket<StoreMode>(index,x);
+ }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+ /** \internal Copies the coefficient at position (row,col) of other into *this.
+ *
+ * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+ * with usual assignments.
+ *
+ * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+ */
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
+ {
+ eigen_internal_assert(row >= 0 && row < rows()
+ && col >= 0 && col < cols());
+ derived().coeffRef(row, col) = other.derived().coeff(row, col);
+ }
+
+ /** \internal Copies the coefficient at the given index of other into *this.
+ *
+ * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+ * with usual assignments.
+ *
+ * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+ */
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ derived().coeffRef(index) = other.derived().coeff(index);
+ }
+
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
+ {
+ const Index row = rowIndexByOuterInner(outer,inner);
+ const Index col = colIndexByOuterInner(outer,inner);
+ // derived() is important here: copyCoeff() may be reimplemented in Derived!
+ derived().copyCoeff(row, col, other);
+ }
+
+ /** \internal Copies the packet at position (row,col) of other into *this.
+ *
+ * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+ * with usual assignments.
+ *
+ * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+ */
+
+ template<typename OtherDerived, int StoreMode, int LoadMode>
+ EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
+ {
+ eigen_internal_assert(row >= 0 && row < rows()
+ && col >= 0 && col < cols());
+ derived().template writePacket<StoreMode>(row, col,
+ other.derived().template packet<LoadMode>(row, col));
+ }
+
+ /** \internal Copies the packet at the given index of other into *this.
+ *
+ * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+ * with usual assignments.
+ *
+ * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+ */
+
+ template<typename OtherDerived, int StoreMode, int LoadMode>
+ EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase<OtherDerived>& other)
+ {
+ eigen_internal_assert(index >= 0 && index < size());
+ derived().template writePacket<StoreMode>(index,
+ other.derived().template packet<LoadMode>(index));
+ }
+
+ /** \internal */
+ template<typename OtherDerived, int StoreMode, int LoadMode>
+ EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
+ {
+ const Index row = rowIndexByOuterInner(outer,inner);
+ const Index col = colIndexByOuterInner(outer,inner);
+ // derived() is important here: copyCoeff() may be reimplemented in Derived!
+ derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other);
+ }
+#endif
+
+};
+
+/** \brief Base class providing direct read-only coefficient access to matrices and arrays.
+ * \ingroup Core_Module
+ * \tparam Derived Type of the derived class
+ * \tparam #DirectAccessors Constant indicating direct access
+ *
+ * This class defines functions to work with strides which can be used to access entries directly. This class
+ * inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using
+ * \c operator() .
+ *
+ * \sa \ref TopicClassHierarchy
+ */
+template<typename Derived>
+class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>
+{
+ public:
+
+ typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ using Base::rows;
+ using Base::cols;
+ using Base::size;
+ using Base::derived;
+
+ /** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
+ *
+ * \sa outerStride(), rowStride(), colStride()
+ */
+ inline Index innerStride() const
+ {
+ return derived().innerStride();
+ }
+
+ /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
+ * in a column-major matrix).
+ *
+ * \sa innerStride(), rowStride(), colStride()
+ */
+ inline Index outerStride() const
+ {
+ return derived().outerStride();
+ }
+
+ // FIXME shall we remove it ?
+ inline Index stride() const
+ {
+ return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
+ }
+
+ /** \returns the pointer increment between two consecutive rows.
+ *
+ * \sa innerStride(), outerStride(), colStride()
+ */
+ inline Index rowStride() const
+ {
+ return Derived::IsRowMajor ? outerStride() : innerStride();
+ }
+
+ /** \returns the pointer increment between two consecutive columns.
+ *
+ * \sa innerStride(), outerStride(), rowStride()
+ */
+ inline Index colStride() const
+ {
+ return Derived::IsRowMajor ? innerStride() : outerStride();
+ }
+};
+
+/** \brief Base class providing direct read/write coefficient access to matrices and arrays.
+ * \ingroup Core_Module
+ * \tparam Derived Type of the derived class
+ * \tparam #DirectWriteAccessors Constant indicating direct access
+ *
+ * This class defines functions to work with strides which can be used to access entries directly. This class
+ * inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using
+ * \c operator().
+ *
+ * \sa \ref TopicClassHierarchy
+ */
+template<typename Derived>
+class DenseCoeffsBase<Derived, DirectWriteAccessors>
+ : public DenseCoeffsBase<Derived, WriteAccessors>
+{
+ public:
+
+ typedef DenseCoeffsBase<Derived, WriteAccessors> Base;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ using Base::rows;
+ using Base::cols;
+ using Base::size;
+ using Base::derived;
+
+ /** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
+ *
+ * \sa outerStride(), rowStride(), colStride()
+ */
+ inline Index innerStride() const
+ {
+ return derived().innerStride();
+ }
+
+ /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
+ * in a column-major matrix).
+ *
+ * \sa innerStride(), rowStride(), colStride()
+ */
+ inline Index outerStride() const
+ {
+ return derived().outerStride();
+ }
+
+ // FIXME shall we remove it ?
+ inline Index stride() const
+ {
+ return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
+ }
+
+ /** \returns the pointer increment between two consecutive rows.
+ *
+ * \sa innerStride(), outerStride(), colStride()
+ */
+ inline Index rowStride() const
+ {
+ return Derived::IsRowMajor ? outerStride() : innerStride();
+ }
+
+ /** \returns the pointer increment between two consecutive columns.
+ *
+ * \sa innerStride(), outerStride(), rowStride()
+ */
+ inline Index colStride() const
+ {
+ return Derived::IsRowMajor ? innerStride() : outerStride();
+ }
+};
+
+namespace internal {
+
+template<typename Derived, bool JustReturnZero>
+struct first_aligned_impl
+{
+ inline static typename Derived::Index run(const Derived&)
+ { return 0; }
+};
+
+template<typename Derived>
+struct first_aligned_impl<Derived, false>
+{
+ inline static typename Derived::Index run(const Derived& m)
+ {
+ return first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size());
+ }
+};
+
+/** \internal \returns the index of the first element of the array that is well aligned for vectorization.
+ *
+ * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more
+ * documentation.
+ */
+template<typename Derived>
+inline static typename Derived::Index first_aligned(const Derived& m)
+{
+ return first_aligned_impl
+ <Derived, (Derived::Flags & AlignedBit) || !(Derived::Flags & DirectAccessBit)>
+ ::run(m);
+}
+
+template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
+struct inner_stride_at_compile_time
+{
+ enum { ret = traits<Derived>::InnerStrideAtCompileTime };
+};
+
+template<typename Derived>
+struct inner_stride_at_compile_time<Derived, false>
+{
+ enum { ret = 0 };
+};
+
+template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
+struct outer_stride_at_compile_time
+{
+ enum { ret = traits<Derived>::OuterStrideAtCompileTime };
+};
+
+template<typename Derived>
+struct outer_stride_at_compile_time<Derived, false>
+{
+ enum { ret = 0 };
+};
+
+} // end namespace internal
+
+#endif // EIGEN_DENSECOEFFSBASE_H
diff --git a/extern/Eigen3/Eigen/src/Core/DenseStorage.h b/extern/Eigen3/Eigen/src/Core/DenseStorage.h
new file mode 100644
index 00000000000..813053b00dd
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/DenseStorage.h
@@ -0,0 +1,304 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MATRIXSTORAGE_H
+#define EIGEN_MATRIXSTORAGE_H
+
+#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_DENSE_STORAGE_CTOR_PLUGIN;
+#else
+ #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+#endif
+
+namespace internal {
+
+struct constructor_without_unaligned_array_assert {};
+
+/** \internal
+ * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
+ * to 16 bytes boundary if the total size is a multiple of 16 bytes.
+ */
+template <typename T, int Size, int MatrixOrArrayOptions,
+ int Alignment = (MatrixOrArrayOptions&DontAlign) ? 0
+ : (((Size*sizeof(T))%16)==0) ? 16
+ : 0 >
+struct plain_array
+{
+ T array[Size];
+ plain_array() {}
+ plain_array(constructor_without_unaligned_array_assert) {}
+};
+
+#ifdef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+ #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
+#else
+ #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
+ eigen_assert((reinterpret_cast<size_t>(array) & sizemask) == 0 \
+ && "this assertion is explained here: " \
+ "http://eigen.tuxfamily.org/dox-devel/TopicUnalignedArrayAssert.html" \
+ " **** READ THIS WEB PAGE !!! ****");
+#endif
+
+template <typename T, int Size, int MatrixOrArrayOptions>
+struct plain_array<T, Size, MatrixOrArrayOptions, 16>
+{
+ EIGEN_USER_ALIGN16 T array[Size];
+ plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf) }
+ plain_array(constructor_without_unaligned_array_assert) {}
+};
+
+template <typename T, int MatrixOrArrayOptions, int Alignment>
+struct plain_array<T, 0, MatrixOrArrayOptions, Alignment>
+{
+ EIGEN_USER_ALIGN16 T array[1];
+ plain_array() {}
+ plain_array(constructor_without_unaligned_array_assert) {}
+};
+
+} // end namespace internal
+
+/** \internal
+ *
+ * \class DenseStorage
+ * \ingroup Core_Module
+ *
+ * \brief Stores the data of a matrix
+ *
+ * This class stores the data of fixed-size, dynamic-size or mixed matrices
+ * in a way as compact as possible.
+ *
+ * \sa Matrix
+ */
+template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage;
+
+// purely fixed-size matrix
+template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage
+{
+ internal::plain_array<T,Size,_Options> m_data;
+ public:
+ inline explicit DenseStorage() {}
+ inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+ : m_data(internal::constructor_without_unaligned_array_assert()) {}
+ inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
+ inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
+ inline static DenseIndex rows(void) {return _Rows;}
+ inline static DenseIndex cols(void) {return _Cols;}
+ inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
+ inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
+ inline const T *data() const { return m_data.array; }
+ inline T *data() { return m_data.array; }
+};
+
+// null matrix
+template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
+{
+ public:
+ inline explicit DenseStorage() {}
+ inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
+ inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
+ inline void swap(DenseStorage& ) {}
+ inline static DenseIndex rows(void) {return _Rows;}
+ inline static DenseIndex cols(void) {return _Cols;}
+ inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
+ inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
+ inline const T *data() const { return 0; }
+ inline T *data() { return 0; }
+};
+
+// dynamic-size matrix with fixed-size storage
+template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options>
+{
+ internal::plain_array<T,Size,_Options> m_data;
+ DenseIndex m_rows;
+ DenseIndex m_cols;
+ public:
+ inline explicit DenseStorage() : m_rows(0), m_cols(0) {}
+ inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
+ inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {}
+ inline void swap(DenseStorage& other)
+ { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
+ inline DenseIndex rows(void) const {return m_rows;}
+ inline DenseIndex cols(void) const {return m_cols;}
+ inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
+ inline void resize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; }
+ inline const T *data() const { return m_data.array; }
+ inline T *data() { return m_data.array; }
+};
+
+// dynamic-size matrix with fixed-size storage and fixed width
+template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options>
+{
+ internal::plain_array<T,Size,_Options> m_data;
+ DenseIndex m_rows;
+ public:
+ inline explicit DenseStorage() : m_rows(0) {}
+ inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
+ inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {}
+ inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
+ inline DenseIndex rows(void) const {return m_rows;}
+ inline DenseIndex cols(void) const {return _Cols;}
+ inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
+ inline void resize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; }
+ inline const T *data() const { return m_data.array; }
+ inline T *data() { return m_data.array; }
+};
+
+// dynamic-size matrix with fixed-size storage and fixed height
+template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options>
+{
+ internal::plain_array<T,Size,_Options> m_data;
+ DenseIndex m_cols;
+ public:
+ inline explicit DenseStorage() : m_cols(0) {}
+ inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+ : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
+ inline DenseStorage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {}
+ inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
+ inline DenseIndex rows(void) const {return _Rows;}
+ inline DenseIndex cols(void) const {return m_cols;}
+ inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
+ inline void resize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; }
+ inline const T *data() const { return m_data.array; }
+ inline T *data() { return m_data.array; }
+};
+
+// purely dynamic matrix.
+template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options>
+{
+ T *m_data;
+ DenseIndex m_rows;
+ DenseIndex m_cols;
+ public:
+ inline explicit DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
+ inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+ : m_data(0), m_rows(0), m_cols(0) {}
+ inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex cols)
+ : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols)
+ { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
+ inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
+ inline void swap(DenseStorage& other)
+ { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
+ inline DenseIndex rows(void) const {return m_rows;}
+ inline DenseIndex cols(void) const {return m_cols;}
+ inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols)
+ {
+ m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
+ m_rows = rows;
+ m_cols = cols;
+ }
+ void resize(DenseIndex size, DenseIndex rows, DenseIndex cols)
+ {
+ if(size != m_rows*m_cols)
+ {
+ internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols);
+ if (size)
+ m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
+ else
+ m_data = 0;
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+ }
+ m_rows = rows;
+ m_cols = cols;
+ }
+ inline const T *data() const { return m_data; }
+ inline T *data() { return m_data; }
+};
+
+// matrix with dynamic width and fixed height (so that matrix has dynamic size).
+template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options>
+{
+ T *m_data;
+ DenseIndex m_cols;
+ public:
+ inline explicit DenseStorage() : m_data(0), m_cols(0) {}
+ inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
+ inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(cols)
+ { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
+ inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
+ inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
+ inline static DenseIndex rows(void) {return _Rows;}
+ inline DenseIndex cols(void) const {return m_cols;}
+ inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols)
+ {
+ m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
+ m_cols = cols;
+ }
+ EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex cols)
+ {
+ if(size != _Rows*m_cols)
+ {
+ internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols);
+ if (size)
+ m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
+ else
+ m_data = 0;
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+ }
+ m_cols = cols;
+ }
+ inline const T *data() const { return m_data; }
+ inline T *data() { return m_data; }
+};
+
+// matrix with dynamic height and fixed width (so that matrix has dynamic size).
+template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options>
+{
+ T *m_data;
+ DenseIndex m_rows;
+ public:
+ inline explicit DenseStorage() : m_data(0), m_rows(0) {}
+ inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
+ inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows)
+ { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
+ inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
+ inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
+ inline DenseIndex rows(void) const {return m_rows;}
+ inline static DenseIndex cols(void) {return _Cols;}
+ inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex)
+ {
+ m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
+ m_rows = rows;
+ }
+ EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex rows, DenseIndex)
+ {
+ if(size != m_rows*_Cols)
+ {
+ internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows);
+ if (size)
+ m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
+ else
+ m_data = 0;
+ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+ }
+ m_rows = rows;
+ }
+ inline const T *data() const { return m_data; }
+ inline T *data() { return m_data; }
+};
+
+#endif // EIGEN_MATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/Diagonal.h b/extern/Eigen3/Eigen/src/Core/Diagonal.h
new file mode 100644
index 00000000000..61d3b063a44
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Diagonal.h
@@ -0,0 +1,227 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DIAGONAL_H
+#define EIGEN_DIAGONAL_H
+
+/** \class Diagonal
+ * \ingroup Core_Module
+ *
+ * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix
+ *
+ * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal
+ * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
+ * A positive value means a superdiagonal, a negative value means a subdiagonal.
+ * You can also use Dynamic so the index can be set at runtime.
+ *
+ * The matrix is not required to be square.
+ *
+ * This class represents an expression of the main diagonal, or any sub/super diagonal
+ * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the
+ * time this is the only way it is used.
+ *
+ * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index)
+ */
+
+namespace internal {
+template<typename MatrixType, int DiagIndex>
+struct traits<Diagonal<MatrixType,DiagIndex> >
+ : traits<MatrixType>
+{
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+ typedef typename MatrixType::StorageKind StorageKind;
+ enum {
+ AbsDiagIndex = DiagIndex<0 ? -DiagIndex : DiagIndex, // only used if DiagIndex != Dynamic
+ // FIXME these computations are broken in the case where the matrix is rectangular and DiagIndex!=0
+ RowsAtCompileTime = (int(DiagIndex) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
+ : (EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime,
+ MatrixType::ColsAtCompileTime) - AbsDiagIndex),
+ ColsAtCompileTime = 1,
+ MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
+ : DiagIndex == Dynamic ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,
+ MatrixType::MaxColsAtCompileTime)
+ : (EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime) - AbsDiagIndex),
+ MaxColsAtCompileTime = 1,
+ MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
+ Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit,
+ CoeffReadCost = _MatrixTypeNested::CoeffReadCost,
+ MatrixTypeOuterStride = outer_stride_at_compile_time<MatrixType>::ret,
+ InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1,
+ OuterStrideAtCompileTime = 0
+ };
+};
+}
+
+template<typename MatrixType, int DiagIndex> class Diagonal
+ : public internal::dense_xpr_base< Diagonal<MatrixType,DiagIndex> >::type
+{
+ public:
+
+ typedef typename internal::dense_xpr_base<Diagonal>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
+
+ inline Diagonal(MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {}
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
+
+ inline Index rows() const
+ { return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
+
+ inline Index cols() const { return 1; }
+
+ inline Index innerStride() const
+ {
+ return m_matrix.outerStride() + 1;
+ }
+
+ inline Index outerStride() const
+ {
+ return 0;
+ }
+
+ inline Scalar& coeffRef(Index row, Index)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+ return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset());
+ }
+
+ inline const Scalar& coeffRef(Index row, Index) const
+ {
+ return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset());
+ }
+
+ inline CoeffReturnType coeff(Index row, Index) const
+ {
+ return m_matrix.coeff(row+rowOffset(), row+colOffset());
+ }
+
+ inline Scalar& coeffRef(Index index)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+ return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset());
+ }
+
+ inline const Scalar& coeffRef(Index index) const
+ {
+ return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset());
+ }
+
+ inline CoeffReturnType coeff(Index index) const
+ {
+ return m_matrix.coeff(index+rowOffset(), index+colOffset());
+ }
+
+ protected:
+ const typename MatrixType::Nested m_matrix;
+ const internal::variable_if_dynamic<Index, DiagIndex> m_index;
+
+ private:
+ // some compilers may fail to optimize std::max etc in case of compile-time constants...
+ EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
+ EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
+ EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
+ // triger a compile time error is someone try to call packet
+ template<int LoadMode> typename MatrixType::PacketReturnType packet(Index) const;
+ template<int LoadMode> typename MatrixType::PacketReturnType packet(Index,Index) const;
+};
+
+/** \returns an expression of the main diagonal of the matrix \c *this
+ *
+ * \c *this is not required to be square.
+ *
+ * Example: \include MatrixBase_diagonal.cpp
+ * Output: \verbinclude MatrixBase_diagonal.out
+ *
+ * \sa class Diagonal */
+template<typename Derived>
+inline typename MatrixBase<Derived>::DiagonalReturnType
+MatrixBase<Derived>::diagonal()
+{
+ return derived();
+}
+
+/** This is the const version of diagonal(). */
+template<typename Derived>
+inline const typename MatrixBase<Derived>::ConstDiagonalReturnType
+MatrixBase<Derived>::diagonal() const
+{
+ return ConstDiagonalReturnType(derived());
+}
+
+/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
+ *
+ * \c *this is not required to be square.
+ *
+ * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
+ * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
+ *
+ * Example: \include MatrixBase_diagonal_int.cpp
+ * Output: \verbinclude MatrixBase_diagonal_int.out
+ *
+ * \sa MatrixBase::diagonal(), class Diagonal */
+template<typename Derived>
+inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Dynamic>::Type
+MatrixBase<Derived>::diagonal(Index index)
+{
+ return typename DiagonalIndexReturnType<Dynamic>::Type(derived(), index);
+}
+
+/** This is the const version of diagonal(Index). */
+template<typename Derived>
+inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Dynamic>::Type
+MatrixBase<Derived>::diagonal(Index index) const
+{
+ return typename ConstDiagonalIndexReturnType<Dynamic>::Type(derived(), index);
+}
+
+/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
+ *
+ * \c *this is not required to be square.
+ *
+ * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
+ * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
+ *
+ * Example: \include MatrixBase_diagonal_template_int.cpp
+ * Output: \verbinclude MatrixBase_diagonal_template_int.out
+ *
+ * \sa MatrixBase::diagonal(), class Diagonal */
+template<typename Derived>
+template<int Index>
+inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index>::Type
+MatrixBase<Derived>::diagonal()
+{
+ return derived();
+}
+
+/** This is the const version of diagonal<int>(). */
+template<typename Derived>
+template<int Index>
+inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index>::Type
+MatrixBase<Derived>::diagonal() const
+{
+ return derived();
+}
+
+#endif // EIGEN_DIAGONAL_H
diff --git a/extern/Eigen3/Eigen/src/Core/DiagonalMatrix.h b/extern/Eigen3/Eigen/src/Core/DiagonalMatrix.h
new file mode 100644
index 00000000000..f41a74bfae7
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/DiagonalMatrix.h
@@ -0,0 +1,306 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DIAGONALMATRIX_H
+#define EIGEN_DIAGONALMATRIX_H
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+template<typename Derived>
+class DiagonalBase : public EigenBase<Derived>
+{
+ public:
+ typedef typename internal::traits<Derived>::DiagonalVectorType DiagonalVectorType;
+ typedef typename DiagonalVectorType::Scalar Scalar;
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+
+ enum {
+ RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+ ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+ MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
+ MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
+ IsVectorAtCompileTime = 0,
+ Flags = 0
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime> DenseMatrixType;
+ typedef DenseMatrixType DenseType;
+ typedef DiagonalMatrix<Scalar,DiagonalVectorType::SizeAtCompileTime,DiagonalVectorType::MaxSizeAtCompileTime> PlainObject;
+
+ inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+ inline Derived& derived() { return *static_cast<Derived*>(this); }
+
+ DenseMatrixType toDenseMatrix() const { return derived(); }
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived> &other) const;
+ template<typename DenseDerived>
+ void addTo(MatrixBase<DenseDerived> &other) const
+ { other.diagonal() += diagonal(); }
+ template<typename DenseDerived>
+ void subTo(MatrixBase<DenseDerived> &other) const
+ { other.diagonal() -= diagonal(); }
+
+ inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
+ inline DiagonalVectorType& diagonal() { return derived().diagonal(); }
+
+ inline Index rows() const { return diagonal().size(); }
+ inline Index cols() const { return diagonal().size(); }
+
+ template<typename MatrixDerived>
+ const DiagonalProduct<MatrixDerived, Derived, OnTheLeft>
+ operator*(const MatrixBase<MatrixDerived> &matrix) const;
+
+ inline const DiagonalWrapper<CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType> >
+ inverse() const
+ {
+ return diagonal().cwiseInverse();
+ }
+
+ #ifdef EIGEN2_SUPPORT
+ template<typename OtherDerived>
+ bool isApprox(const DiagonalBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+ {
+ return diagonal().isApprox(other.diagonal(), precision);
+ }
+ template<typename OtherDerived>
+ bool isApprox(const MatrixBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+ {
+ return toDenseMatrix().isApprox(other, precision);
+ }
+ #endif
+};
+
+template<typename Derived>
+template<typename DenseDerived>
+void DiagonalBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
+{
+ other.setZero();
+ other.diagonal() = diagonal();
+}
+#endif
+
+/** \class DiagonalMatrix
+ * \ingroup Core_Module
+ *
+ * \brief Represents a diagonal matrix with its storage
+ *
+ * \param _Scalar the type of coefficients
+ * \param SizeAtCompileTime the dimension of the matrix, or Dynamic
+ * \param MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults
+ * to SizeAtCompileTime. Most of the time, you do not need to specify it.
+ *
+ * \sa class DiagonalWrapper
+ */
+
+namespace internal {
+template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
+struct traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
+ : traits<Matrix<_Scalar,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+ typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;
+ typedef Dense StorageKind;
+ typedef DenseIndex Index;
+ enum {
+ Flags = LvalueBit
+ };
+};
+}
+template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
+class DiagonalMatrix
+ : public DiagonalBase<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+ public:
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ typedef typename internal::traits<DiagonalMatrix>::DiagonalVectorType DiagonalVectorType;
+ typedef const DiagonalMatrix& Nested;
+ typedef _Scalar Scalar;
+ typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind;
+ typedef typename internal::traits<DiagonalMatrix>::Index Index;
+ #endif
+
+ protected:
+
+ DiagonalVectorType m_diagonal;
+
+ public:
+
+ /** const version of diagonal(). */
+ inline const DiagonalVectorType& diagonal() const { return m_diagonal; }
+ /** \returns a reference to the stored vector of diagonal coefficients. */
+ inline DiagonalVectorType& diagonal() { return m_diagonal; }
+
+ /** Default constructor without initialization */
+ inline DiagonalMatrix() {}
+
+ /** Constructs a diagonal matrix with given dimension */
+ inline DiagonalMatrix(Index dim) : m_diagonal(dim) {}
+
+ /** 2D constructor. */
+ inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {}
+
+ /** 3D constructor. */
+ inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {}
+
+ /** Copy constructor. */
+ template<typename OtherDerived>
+ inline DiagonalMatrix(const DiagonalBase<OtherDerived>& other) : m_diagonal(other.diagonal()) {}
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */
+ inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {}
+ #endif
+
+ /** generic constructor from expression of the diagonal coefficients */
+ template<typename OtherDerived>
+ explicit inline DiagonalMatrix(const MatrixBase<OtherDerived>& other) : m_diagonal(other)
+ {}
+
+ /** Copy operator. */
+ template<typename OtherDerived>
+ DiagonalMatrix& operator=(const DiagonalBase<OtherDerived>& other)
+ {
+ m_diagonal = other.diagonal();
+ return *this;
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is a special case of the templated operator=. Its purpose is to
+ * prevent a default operator= from hiding the templated operator=.
+ */
+ DiagonalMatrix& operator=(const DiagonalMatrix& other)
+ {
+ m_diagonal = other.diagonal();
+ return *this;
+ }
+ #endif
+
+ /** Resizes to given size. */
+ inline void resize(Index size) { m_diagonal.resize(size); }
+ /** Sets all coefficients to zero. */
+ inline void setZero() { m_diagonal.setZero(); }
+ /** Resizes and sets all coefficients to zero. */
+ inline void setZero(Index size) { m_diagonal.setZero(size); }
+ /** Sets this matrix to be the identity matrix of the current size. */
+ inline void setIdentity() { m_diagonal.setOnes(); }
+ /** Sets this matrix to be the identity matrix of the given size. */
+ inline void setIdentity(Index size) { m_diagonal.setOnes(size); }
+};
+
+/** \class DiagonalWrapper
+ * \ingroup Core_Module
+ *
+ * \brief Expression of a diagonal matrix
+ *
+ * \param _DiagonalVectorType the type of the vector of diagonal coefficients
+ *
+ * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients,
+ * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal()
+ * and most of the time this is the only way that it is used.
+ *
+ * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal()
+ */
+
+namespace internal {
+template<typename _DiagonalVectorType>
+struct traits<DiagonalWrapper<_DiagonalVectorType> >
+{
+ typedef _DiagonalVectorType DiagonalVectorType;
+ typedef typename DiagonalVectorType::Scalar Scalar;
+ typedef typename DiagonalVectorType::Index Index;
+ typedef typename DiagonalVectorType::StorageKind StorageKind;
+ enum {
+ RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+ ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+ MaxRowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+ MaxColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+ Flags = traits<DiagonalVectorType>::Flags & LvalueBit
+ };
+};
+}
+
+template<typename _DiagonalVectorType>
+class DiagonalWrapper
+ : public DiagonalBase<DiagonalWrapper<_DiagonalVectorType> >, internal::no_assignment_operator
+{
+ public:
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ typedef _DiagonalVectorType DiagonalVectorType;
+ typedef DiagonalWrapper Nested;
+ #endif
+
+ /** Constructor from expression of diagonal coefficients to wrap. */
+ inline DiagonalWrapper(const DiagonalVectorType& diagonal) : m_diagonal(diagonal) {}
+
+ /** \returns a const reference to the wrapped expression of diagonal coefficients. */
+ const DiagonalVectorType& diagonal() const { return m_diagonal; }
+
+ protected:
+ const typename DiagonalVectorType::Nested m_diagonal;
+};
+
+/** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients
+ *
+ * \only_for_vectors
+ *
+ * Example: \include MatrixBase_asDiagonal.cpp
+ * Output: \verbinclude MatrixBase_asDiagonal.out
+ *
+ * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal()
+ **/
+template<typename Derived>
+inline const DiagonalWrapper<const Derived>
+MatrixBase<Derived>::asDiagonal() const
+{
+ return derived();
+}
+
+/** \returns true if *this is approximately equal to a diagonal matrix,
+ * within the precision given by \a prec.
+ *
+ * Example: \include MatrixBase_isDiagonal.cpp
+ * Output: \verbinclude MatrixBase_isDiagonal.out
+ *
+ * \sa asDiagonal()
+ */
+template<typename Derived>
+bool MatrixBase<Derived>::isDiagonal(RealScalar prec) const
+{
+ if(cols() != rows()) return false;
+ RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
+ for(Index j = 0; j < cols(); ++j)
+ {
+ RealScalar absOnDiagonal = internal::abs(coeff(j,j));
+ if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
+ }
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = 0; i < j; ++i)
+ {
+ if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;
+ if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;
+ }
+ return true;
+}
+
+#endif // EIGEN_DIAGONALMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h b/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h
new file mode 100644
index 00000000000..de0c6ed11b7
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h
@@ -0,0 +1,135 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DIAGONALPRODUCT_H
+#define EIGEN_DIAGONALPRODUCT_H
+
+namespace internal {
+template<typename MatrixType, typename DiagonalType, int ProductOrder>
+struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
+ : traits<MatrixType>
+{
+ typedef typename scalar_product_traits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+
+ _StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor,
+ _PacketOnDiag = !((int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
+ ||(int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)),
+ _SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value,
+ // FIXME currently we need same types, but in the future the next rule should be the one
+ //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::Flags)&PacketAccessBit))),
+ _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && ((!_PacketOnDiag) || (bool(int(DiagonalType::Flags)&PacketAccessBit))),
+
+ Flags = (HereditaryBits & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),
+ CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
+ };
+};
+}
+
+template<typename MatrixType, typename DiagonalType, int ProductOrder>
+class DiagonalProduct : internal::no_assignment_operator,
+ public MatrixBase<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
+{
+ public:
+
+ typedef MatrixBase<DiagonalProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(DiagonalProduct)
+
+ inline DiagonalProduct(const MatrixType& matrix, const DiagonalType& diagonal)
+ : m_matrix(matrix), m_diagonal(diagonal)
+ {
+ eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols()));
+ }
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ const Scalar coeff(Index row, Index col) const
+ {
+ return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col);
+ }
+
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+ {
+ enum {
+ StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor
+ };
+ const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
+
+ return packet_impl<LoadMode>(row,col,indexInDiagonalVector,typename internal::conditional<
+ ((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
+ ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type());
+ }
+
+ protected:
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::true_type) const
+ {
+ return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
+ internal::pset1<PacketScalar>(m_diagonal.diagonal().coeff(id)));
+ }
+
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::false_type) const
+ {
+ enum {
+ InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
+ DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned
+ };
+ return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
+ m_diagonal.diagonal().template packet<DiagonalVectorPacketLoadMode>(id));
+ }
+
+ const typename MatrixType::Nested m_matrix;
+ const typename DiagonalType::Nested m_diagonal;
+};
+
+/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal.
+ */
+template<typename Derived>
+template<typename DiagonalDerived>
+inline const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
+MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &diagonal) const
+{
+ return DiagonalProduct<Derived, DiagonalDerived, OnTheRight>(derived(), diagonal.derived());
+}
+
+/** \returns the diagonal matrix product of \c *this by the matrix \a matrix.
+ */
+template<typename DiagonalDerived>
+template<typename MatrixDerived>
+inline const DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>
+DiagonalBase<DiagonalDerived>::operator*(const MatrixBase<MatrixDerived> &matrix) const
+{
+ return DiagonalProduct<MatrixDerived, DiagonalDerived, OnTheLeft>(matrix.derived(), derived());
+}
+
+
+#endif // EIGEN_DIAGONALPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Core/Dot.h b/extern/Eigen3/Eigen/src/Core/Dot.h
new file mode 100644
index 00000000000..42da7849896
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Dot.h
@@ -0,0 +1,272 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008, 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DOT_H
+#define EIGEN_DOT_H
+
+namespace internal {
+
+// helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot
+// with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE
+// looking at the static assertions. Thus this is a trick to get better compile errors.
+template<typename T, typename U,
+// the NeedToTranspose condition here is taken straight from Assign.h
+ bool NeedToTranspose = T::IsVectorAtCompileTime
+ && U::IsVectorAtCompileTime
+ && ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1)
+ | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
+ // revert to || as soon as not needed anymore.
+ (int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1))
+>
+struct dot_nocheck
+{
+ typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar;
+ static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
+ {
+ return a.template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum();
+ }
+};
+
+template<typename T, typename U>
+struct dot_nocheck<T, U, true>
+{
+ typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar;
+ static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
+ {
+ return a.transpose().template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum();
+ }
+};
+
+} // end namespace internal
+
+/** \returns the dot product of *this with other.
+ *
+ * \only_for_vectors
+ *
+ * \note If the scalar type is complex numbers, then this function returns the hermitian
+ * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the
+ * second variable.
+ *
+ * \sa squaredNorm(), norm()
+ */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
+MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func;
+ EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar);
+
+ eigen_assert(size() == other.size());
+
+ return internal::dot_nocheck<Derived,OtherDerived>::run(*this, other);
+}
+
+#ifdef EIGEN2_SUPPORT
+/** \returns the dot product of *this with other, with the Eigen2 convention that the dot product is linear in the first variable
+ * (conjugating the second variable). Of course this only makes a difference in the complex case.
+ *
+ * This method is only available in EIGEN2_SUPPORT mode.
+ *
+ * \only_for_vectors
+ *
+ * \sa dot()
+ */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+MatrixBase<Derived>::eigen2_dot(const MatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ eigen_assert(size() == other.size());
+
+ return internal::dot_nocheck<OtherDerived,Derived>::run(other,*this);
+}
+#endif
+
+
+//---------- implementation of L2 norm and related functions ----------
+
+/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm.
+ * In both cases, it consists in the sum of the square of all the matrix entries.
+ * For vectors, this is also equals to the dot product of \c *this with itself.
+ *
+ * \sa dot(), norm()
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
+{
+ return internal::real((*this).cwiseAbs2().sum());
+}
+
+/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
+ * In both cases, it consists in the square root of the sum of the square of all the matrix entries.
+ * For vectors, this is also equals to the square root of the dot product of \c *this with itself.
+ *
+ * \sa dot(), squaredNorm()
+ */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
+{
+ return internal::sqrt(squaredNorm());
+}
+
+/** \returns an expression of the quotient of *this by its own norm.
+ *
+ * \only_for_vectors
+ *
+ * \sa norm(), normalize()
+ */
+template<typename Derived>
+inline const typename MatrixBase<Derived>::PlainObject
+MatrixBase<Derived>::normalized() const
+{
+ typedef typename internal::nested<Derived>::type Nested;
+ typedef typename internal::remove_reference<Nested>::type _Nested;
+ _Nested n(derived());
+ return n / n.norm();
+}
+
+/** Normalizes the vector, i.e. divides it by its own norm.
+ *
+ * \only_for_vectors
+ *
+ * \sa norm(), normalized()
+ */
+template<typename Derived>
+inline void MatrixBase<Derived>::normalize()
+{
+ *this /= norm();
+}
+
+//---------- implementation of other norms ----------
+
+namespace internal {
+
+template<typename Derived, int p>
+struct lpNorm_selector
+{
+ typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
+ inline static RealScalar run(const MatrixBase<Derived>& m)
+ {
+ return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p);
+ }
+};
+
+template<typename Derived>
+struct lpNorm_selector<Derived, 1>
+{
+ inline static typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
+ {
+ return m.cwiseAbs().sum();
+ }
+};
+
+template<typename Derived>
+struct lpNorm_selector<Derived, 2>
+{
+ inline static typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
+ {
+ return m.norm();
+ }
+};
+
+template<typename Derived>
+struct lpNorm_selector<Derived, Infinity>
+{
+ inline static typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
+ {
+ return m.cwiseAbs().maxCoeff();
+ }
+};
+
+} // end namespace internal
+
+/** \returns the \f$ \ell^p \f$ norm of *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values
+ * of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$
+ * norm, that is the maximum of the absolute values of the coefficients of *this.
+ *
+ * \sa norm()
+ */
+template<typename Derived>
+template<int p>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::lpNorm() const
+{
+ return internal::lpNorm_selector<Derived, p>::run(*this);
+}
+
+//---------- implementation of isOrthogonal / isUnitary ----------
+
+/** \returns true if *this is approximately orthogonal to \a other,
+ * within the precision given by \a prec.
+ *
+ * Example: \include MatrixBase_isOrthogonal.cpp
+ * Output: \verbinclude MatrixBase_isOrthogonal.out
+ */
+template<typename Derived>
+template<typename OtherDerived>
+bool MatrixBase<Derived>::isOrthogonal
+(const MatrixBase<OtherDerived>& other, RealScalar prec) const
+{
+ typename internal::nested<Derived,2>::type nested(derived());
+ typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
+ return internal::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
+}
+
+/** \returns true if *this is approximately an unitary matrix,
+ * within the precision given by \a prec. In the case where the \a Scalar
+ * type is real numbers, a unitary matrix is an orthogonal matrix, whence the name.
+ *
+ * \note This can be used to check whether a family of vectors forms an orthonormal basis.
+ * Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an
+ * orthonormal basis.
+ *
+ * Example: \include MatrixBase_isUnitary.cpp
+ * Output: \verbinclude MatrixBase_isUnitary.out
+ */
+template<typename Derived>
+bool MatrixBase<Derived>::isUnitary(RealScalar prec) const
+{
+ typename Derived::Nested nested(derived());
+ for(Index i = 0; i < cols(); ++i)
+ {
+ if(!internal::isApprox(nested.col(i).squaredNorm(), static_cast<RealScalar>(1), prec))
+ return false;
+ for(Index j = 0; j < i; ++j)
+ if(!internal::isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast<Scalar>(1), prec))
+ return false;
+ }
+ return true;
+}
+
+#endif // EIGEN_DOT_H
diff --git a/extern/Eigen3/Eigen/src/Core/EigenBase.h b/extern/Eigen3/Eigen/src/Core/EigenBase.h
new file mode 100644
index 00000000000..0472539af33
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/EigenBase.h
@@ -0,0 +1,172 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_EIGENBASE_H
+#define EIGEN_EIGENBASE_H
+
+
+/** Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).
+ *
+ * In other words, an EigenBase object is an object that can be copied into a MatrixBase.
+ *
+ * Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc.
+ *
+ * Notice that this class is trivial, it is only used to disambiguate overloaded functions.
+ *
+ * \sa \ref TopicClassHierarchy
+ */
+template<typename Derived> struct EigenBase
+{
+// typedef typename internal::plain_matrix_type<Derived>::type PlainObject;
+
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+
+ /** \returns a reference to the derived object */
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ /** \returns a const reference to the derived object */
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+ inline Derived& const_cast_derived() const
+ { return *static_cast<Derived*>(const_cast<EigenBase*>(this)); }
+ inline const Derived& const_derived() const
+ { return *static_cast<const Derived*>(this); }
+
+ /** \returns the number of rows. \sa cols(), RowsAtCompileTime */
+ inline Index rows() const { return derived().rows(); }
+ /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
+ inline Index cols() const { return derived().cols(); }
+ /** \returns the number of coefficients, which is rows()*cols().
+ * \sa rows(), cols(), SizeAtCompileTime. */
+ inline Index size() const { return rows() * cols(); }
+
+ /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ { derived().evalTo(dst); }
+
+ /** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */
+ template<typename Dest> inline void addTo(Dest& dst) const
+ {
+ // This is the default implementation,
+ // derived class can reimplement it in a more optimized way.
+ typename Dest::PlainObject res(rows(),cols());
+ evalTo(res);
+ dst += res;
+ }
+
+ /** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */
+ template<typename Dest> inline void subTo(Dest& dst) const
+ {
+ // This is the default implementation,
+ // derived class can reimplement it in a more optimized way.
+ typename Dest::PlainObject res(rows(),cols());
+ evalTo(res);
+ dst -= res;
+ }
+
+ /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */
+ template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const
+ {
+ // This is the default implementation,
+ // derived class can reimplement it in a more optimized way.
+ dst = dst * this->derived();
+ }
+
+ /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */
+ template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const
+ {
+ // This is the default implementation,
+ // derived class can reimplement it in a more optimized way.
+ dst = this->derived() * dst;
+ }
+
+};
+
+/***************************************************************************
+* Implementation of matrix base methods
+***************************************************************************/
+
+/** \brief Copies the generic expression \a other into *this.
+ *
+ * \details The expression must provide a (templated) evalTo(Derived& dst) const
+ * function which does the actual job. In practice, this allows any user to write
+ * its own special matrix without having to modify MatrixBase
+ *
+ * \returns a reference to *this.
+ */
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
+{
+ other.derived().evalTo(derived());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
+{
+ other.derived().addTo(derived());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
+{
+ other.derived().subTo(derived());
+ return derived();
+}
+
+/** replaces \c *this by \c *this * \a other.
+ *
+ * \returns a reference to \c *this
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline Derived&
+MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
+{
+ other.derived().applyThisOnTheRight(derived());
+ return derived();
+}
+
+/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=() */
+template<typename Derived>
+template<typename OtherDerived>
+inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
+{
+ other.derived().applyThisOnTheRight(derived());
+}
+
+/** replaces \c *this by \c *this * \a other. */
+template<typename Derived>
+template<typename OtherDerived>
+inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
+{
+ other.derived().applyThisOnTheLeft(derived());
+}
+
+#endif // EIGEN_EIGENBASE_H
diff --git a/extern/Eigen2/Eigen/src/Core/Flagged.h b/extern/Eigen3/Eigen/src/Core/Flagged.h
index e3d25341d9e..458213ab553 100644
--- a/extern/Eigen2/Eigen/src/Core/Flagged.h
+++ b/extern/Eigen3/Eigen/src/Core/Flagged.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
@@ -26,6 +26,7 @@
#define EIGEN_FLAGGED_H
/** \class Flagged
+ * \ingroup Core_Module
*
* \brief Expression with modified flags
*
@@ -39,109 +40,110 @@
*
* \sa MatrixBase::flagged()
*/
+
+namespace internal {
template<typename ExpressionType, unsigned int Added, unsigned int Removed>
-struct ei_traits<Flagged<ExpressionType, Added, Removed> > : ei_traits<ExpressionType>
+struct traits<Flagged<ExpressionType, Added, Removed> > : traits<ExpressionType>
{
enum { Flags = (ExpressionType::Flags | Added) & ~Removed };
};
+}
template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged
: public MatrixBase<Flagged<ExpressionType, Added, Removed> >
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(Flagged)
- typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
- ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
+ typedef MatrixBase<Flagged> Base;
+
+ EIGEN_DENSE_PUBLIC_INTERFACE(Flagged)
+ typedef typename internal::conditional<internal::must_nest_by_value<ExpressionType>::ret,
+ ExpressionType, const ExpressionType&>::type ExpressionTypeNested;
typedef typename ExpressionType::InnerIterator InnerIterator;
inline Flagged(const ExpressionType& matrix) : m_matrix(matrix) {}
- inline int rows() const { return m_matrix.rows(); }
- inline int cols() const { return m_matrix.cols(); }
- inline int stride() const { return m_matrix.stride(); }
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+ inline Index outerStride() const { return m_matrix.outerStride(); }
+ inline Index innerStride() const { return m_matrix.innerStride(); }
- inline const Scalar coeff(int row, int col) const
+ inline CoeffReturnType coeff(Index row, Index col) const
{
return m_matrix.coeff(row, col);
}
- inline Scalar& coeffRef(int row, int col)
+ inline CoeffReturnType coeff(Index index) const
+ {
+ return m_matrix.coeff(index);
+ }
+
+ inline const Scalar& coeffRef(Index row, Index col) const
{
return m_matrix.const_cast_derived().coeffRef(row, col);
}
- inline const Scalar coeff(int index) const
+ inline const Scalar& coeffRef(Index index) const
{
- return m_matrix.coeff(index);
+ return m_matrix.const_cast_derived().coeffRef(index);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ return m_matrix.const_cast_derived().coeffRef(row, col);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_matrix.const_cast_derived().coeffRef(index);
}
template<int LoadMode>
- inline const PacketScalar packet(int row, int col) const
+ inline const PacketScalar packet(Index row, Index col) const
{
return m_matrix.template packet<LoadMode>(row, col);
}
template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_matrix.const_cast_derived().template writePacket<LoadMode>(row, col, x);
}
template<int LoadMode>
- inline const PacketScalar packet(int index) const
+ inline const PacketScalar packet(Index index) const
{
return m_matrix.template packet<LoadMode>(index);
}
template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
m_matrix.const_cast_derived().template writePacket<LoadMode>(index, x);
}
const ExpressionType& _expression() const { return m_matrix; }
+ template<typename OtherDerived>
+ typename ExpressionType::PlainObject solveTriangular(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived>
+ void solveTriangularInPlace(const MatrixBase<OtherDerived>& other) const;
+
protected:
ExpressionTypeNested m_matrix;
-
-private:
- Flagged& operator=(const Flagged&);
};
-/** \returns an expression of *this with added flags
- *
- * \addexample MarkExample \label How to mark a triangular matrix as triangular
- *
- * Example: \include MatrixBase_marked.cpp
- * Output: \verbinclude MatrixBase_marked.out
- *
- * \sa class Flagged, extract(), part()
- */
-template<typename Derived>
-template<unsigned int Added>
-inline const Flagged<Derived, Added, 0>
-MatrixBase<Derived>::marked() const
-{
- return derived();
-}
-
-/** \returns an expression of *this with the following flags removed:
- * EvalBeforeNestingBit and EvalBeforeAssigningBit.
+/** \returns an expression of *this with added and removed flags
*
- * Example: \include MatrixBase_lazy.cpp
- * Output: \verbinclude MatrixBase_lazy.out
+ * This is mostly for internal use.
*
- * \sa class Flagged, marked()
+ * \sa class Flagged
*/
template<typename Derived>
-inline const Flagged<Derived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>
-MatrixBase<Derived>::lazy() const
+template<unsigned int Added,unsigned int Removed>
+inline const Flagged<Derived, Added, Removed>
+DenseBase<Derived>::flagged() const
{
return derived();
}
diff --git a/extern/Eigen3/Eigen/src/Core/ForceAlignedAccess.h b/extern/Eigen3/Eigen/src/Core/ForceAlignedAccess.h
new file mode 100644
index 00000000000..11c1f8f709a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/ForceAlignedAccess.h
@@ -0,0 +1,157 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_FORCEALIGNEDACCESS_H
+#define EIGEN_FORCEALIGNEDACCESS_H
+
+/** \class ForceAlignedAccess
+ * \ingroup Core_Module
+ *
+ * \brief Enforce aligned packet loads and stores regardless of what is requested
+ *
+ * \param ExpressionType the type of the object of which we are forcing aligned packet access
+ *
+ * This class is the return type of MatrixBase::forceAlignedAccess()
+ * and most of the time this is the only way it is used.
+ *
+ * \sa MatrixBase::forceAlignedAccess()
+ */
+
+namespace internal {
+template<typename ExpressionType>
+struct traits<ForceAlignedAccess<ExpressionType> > : public traits<ExpressionType>
+{};
+}
+
+template<typename ExpressionType> class ForceAlignedAccess
+ : public internal::dense_xpr_base< ForceAlignedAccess<ExpressionType> >::type
+{
+ public:
+
+ typedef typename internal::dense_xpr_base<ForceAlignedAccess>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess)
+
+ inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {}
+
+ inline Index rows() const { return m_expression.rows(); }
+ inline Index cols() const { return m_expression.cols(); }
+ inline Index outerStride() const { return m_expression.outerStride(); }
+ inline Index innerStride() const { return m_expression.innerStride(); }
+
+ inline const CoeffReturnType coeff(Index row, Index col) const
+ {
+ return m_expression.coeff(row, col);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ return m_expression.const_cast_derived().coeffRef(row, col);
+ }
+
+ inline const CoeffReturnType coeff(Index index) const
+ {
+ return m_expression.coeff(index);
+ }
+
+ inline Scalar& coeffRef(Index index)
+ {
+ return m_expression.const_cast_derived().coeffRef(index);
+ }
+
+ template<int LoadMode>
+ inline const PacketScalar packet(Index row, Index col) const
+ {
+ return m_expression.template packet<Aligned>(row, col);
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
+ {
+ m_expression.const_cast_derived().template writePacket<Aligned>(row, col, x);
+ }
+
+ template<int LoadMode>
+ inline const PacketScalar packet(Index index) const
+ {
+ return m_expression.template packet<Aligned>(index);
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index index, const PacketScalar& x)
+ {
+ m_expression.const_cast_derived().template writePacket<Aligned>(index, x);
+ }
+
+ operator const ExpressionType&() const { return m_expression; }
+
+ protected:
+ const ExpressionType& m_expression;
+
+ private:
+ ForceAlignedAccess& operator=(const ForceAlignedAccess&);
+};
+
+/** \returns an expression of *this with forced aligned access
+ * \sa forceAlignedAccessIf(),class ForceAlignedAccess
+ */
+template<typename Derived>
+inline const ForceAlignedAccess<Derived>
+MatrixBase<Derived>::forceAlignedAccess() const
+{
+ return ForceAlignedAccess<Derived>(derived());
+}
+
+/** \returns an expression of *this with forced aligned access
+ * \sa forceAlignedAccessIf(), class ForceAlignedAccess
+ */
+template<typename Derived>
+inline ForceAlignedAccess<Derived>
+MatrixBase<Derived>::forceAlignedAccess()
+{
+ return ForceAlignedAccess<Derived>(derived());
+}
+
+/** \returns an expression of *this with forced aligned access if \a Enable is true.
+ * \sa forceAlignedAccess(), class ForceAlignedAccess
+ */
+template<typename Derived>
+template<bool Enable>
+inline typename internal::add_const_on_value_type<typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type>::type
+MatrixBase<Derived>::forceAlignedAccessIf() const
+{
+ return derived();
+}
+
+/** \returns an expression of *this with forced aligned access if \a Enable is true.
+ * \sa forceAlignedAccess(), class ForceAlignedAccess
+ */
+template<typename Derived>
+template<bool Enable>
+inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type
+MatrixBase<Derived>::forceAlignedAccessIf()
+{
+ return derived();
+}
+
+#endif // EIGEN_FORCEALIGNEDACCESS_H
diff --git a/extern/Eigen3/Eigen/src/Core/Functors.h b/extern/Eigen3/Eigen/src/Core/Functors.h
new file mode 100644
index 00000000000..54636e0d459
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Functors.h
@@ -0,0 +1,942 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_FUNCTORS_H
+#define EIGEN_FUNCTORS_H
+
+namespace internal {
+
+// associative functors:
+
+/** \internal
+ * \brief Template functor to compute the sum of two scalars
+ *
+ * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, MatrixBase::sum()
+ */
+template<typename Scalar> struct scalar_sum_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op)
+ EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return internal::padd(a,b); }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
+ { return internal::predux(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_sum_op<Scalar> > {
+ enum {
+ Cost = NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasAdd
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the product of two scalars
+ *
+ * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux()
+ */
+template<typename LhsScalar,typename RhsScalar> struct scalar_product_op {
+ enum {
+ // TODO vectorize mixed product
+ Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasMul && packet_traits<RhsScalar>::HasMul
+ };
+ typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op)
+ EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return internal::pmul(a,b); }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
+ { return internal::predux_mul(a); }
+};
+template<typename LhsScalar,typename RhsScalar>
+struct functor_traits<scalar_product_op<LhsScalar,RhsScalar> > {
+ enum {
+ Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost)/2, // rough estimate!
+ PacketAccess = scalar_product_op<LhsScalar,RhsScalar>::Vectorizable
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the conjugate product of two scalars
+ *
+ * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y)
+ */
+template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op {
+
+ enum {
+ Conj = NumTraits<LhsScalar>::IsComplex
+ };
+
+ typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
+
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op)
+ EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const
+ { return conj_helper<LhsScalar,RhsScalar,Conj,false>().pmul(a,b); }
+
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return conj_helper<Packet,Packet,Conj,false>().pmul(a,b); }
+};
+template<typename LhsScalar,typename RhsScalar>
+struct functor_traits<scalar_conj_product_op<LhsScalar,RhsScalar> > {
+ enum {
+ Cost = NumTraits<LhsScalar>::MulCost,
+ PacketAccess = internal::is_same<LhsScalar, RhsScalar>::value && packet_traits<LhsScalar>::HasMul
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the min of two scalars
+ *
+ * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff()
+ */
+template<typename Scalar> struct scalar_min_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)
+ EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return (min)(a, b); }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return internal::pmin(a,b); }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
+ { return internal::predux_min(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_min_op<Scalar> > {
+ enum {
+ Cost = NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasMin
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the max of two scalars
+ *
+ * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff()
+ */
+template<typename Scalar> struct scalar_max_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)
+ EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return (max)(a, b); }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return internal::pmax(a,b); }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
+ { return internal::predux_max(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_max_op<Scalar> > {
+ enum {
+ Cost = NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasMax
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the hypot of two scalars
+ *
+ * \sa MatrixBase::stableNorm(), class Redux
+ */
+template<typename Scalar> struct scalar_hypot_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op)
+// typedef typename NumTraits<Scalar>::Real result_type;
+ EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const
+ {
+ using std::max;
+ using std::min;
+ Scalar p = (max)(_x, _y);
+ Scalar q = (min)(_x, _y);
+ Scalar qp = q/p;
+ return p * sqrt(Scalar(1) + qp*qp);
+ }
+};
+template<typename Scalar>
+struct functor_traits<scalar_hypot_op<Scalar> > {
+ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess=0 };
+};
+
+// other binary functors:
+
+/** \internal
+ * \brief Template functor to compute the difference of two scalars
+ *
+ * \sa class CwiseBinaryOp, MatrixBase::operator-
+ */
+template<typename Scalar> struct scalar_difference_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op)
+ EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return internal::psub(a,b); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_difference_op<Scalar> > {
+ enum {
+ Cost = NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasSub
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the quotient of two scalars
+ *
+ * \sa class CwiseBinaryOp, Cwise::operator/()
+ */
+template<typename Scalar> struct scalar_quotient_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)
+ EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a / b; }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+ { return internal::pdiv(a,b); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_quotient_op<Scalar> > {
+ enum {
+ Cost = 2 * NumTraits<Scalar>::MulCost,
+ PacketAccess = packet_traits<Scalar>::HasDiv
+ };
+};
+
+// unary functors:
+
+/** \internal
+ * \brief Template functor to compute the opposite of a scalar
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::operator-
+ */
+template<typename Scalar> struct scalar_opposite_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_opposite_op)
+ EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+ { return internal::pnegate(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_opposite_op<Scalar> >
+{ enum {
+ Cost = NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasNegate };
+};
+
+/** \internal
+ * \brief Template functor to compute the absolute value of a scalar
+ *
+ * \sa class CwiseUnaryOp, Cwise::abs
+ */
+template<typename Scalar> struct scalar_abs_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op)
+ typedef typename NumTraits<Scalar>::Real result_type;
+ EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return abs(a); }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+ { return internal::pabs(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_abs_op<Scalar> >
+{
+ enum {
+ Cost = NumTraits<Scalar>::AddCost,
+ PacketAccess = packet_traits<Scalar>::HasAbs
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the squared absolute value of a scalar
+ *
+ * \sa class CwiseUnaryOp, Cwise::abs2
+ */
+template<typename Scalar> struct scalar_abs2_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op)
+ typedef typename NumTraits<Scalar>::Real result_type;
+ EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return abs2(a); }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+ { return internal::pmul(a,a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_abs2_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasAbs2 }; };
+
+/** \internal
+ * \brief Template functor to compute the conjugate of a complex value
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::conjugate()
+ */
+template<typename Scalar> struct scalar_conjugate_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op)
+ EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return conj(a); }
+ template<typename Packet>
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_conjugate_op<Scalar> >
+{
+ enum {
+ Cost = NumTraits<Scalar>::IsComplex ? NumTraits<Scalar>::AddCost : 0,
+ PacketAccess = packet_traits<Scalar>::HasConj
+ };
+};
+
+/** \internal
+ * \brief Template functor to cast a scalar to another type
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::cast()
+ */
+template<typename Scalar, typename NewType>
+struct scalar_cast_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
+ typedef NewType result_type;
+ EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return cast<Scalar, NewType>(a); }
+};
+template<typename Scalar, typename NewType>
+struct functor_traits<scalar_cast_op<Scalar,NewType> >
+{ enum { Cost = is_same<Scalar, NewType>::value ? 0 : NumTraits<NewType>::AddCost, PacketAccess = false }; };
+
+/** \internal
+ * \brief Template functor to extract the real part of a complex
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::real()
+ */
+template<typename Scalar>
+struct scalar_real_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op)
+ typedef typename NumTraits<Scalar>::Real result_type;
+ EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return real(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_real_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+ * \brief Template functor to extract the imaginary part of a complex
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::imag()
+ */
+template<typename Scalar>
+struct scalar_imag_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op)
+ typedef typename NumTraits<Scalar>::Real result_type;
+ EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return imag(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_imag_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+ * \brief Template functor to extract the real part of a complex as a reference
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::real()
+ */
+template<typename Scalar>
+struct scalar_real_ref_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op)
+ typedef typename NumTraits<Scalar>::Real result_type;
+ EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return real_ref(*const_cast<Scalar*>(&a)); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_real_ref_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+ * \brief Template functor to extract the imaginary part of a complex as a reference
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::imag()
+ */
+template<typename Scalar>
+struct scalar_imag_ref_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op)
+ typedef typename NumTraits<Scalar>::Real result_type;
+ EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return imag_ref(*const_cast<Scalar*>(&a)); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_imag_ref_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+ *
+ * \brief Template functor to compute the exponential of a scalar
+ *
+ * \sa class CwiseUnaryOp, Cwise::exp()
+ */
+template<typename Scalar> struct scalar_exp_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op)
+ inline const Scalar operator() (const Scalar& a) const { return exp(a); }
+ typedef typename packet_traits<Scalar>::type Packet;
+ inline Packet packetOp(const Packet& a) const { return internal::pexp(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_exp_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasExp }; };
+
+/** \internal
+ *
+ * \brief Template functor to compute the logarithm of a scalar
+ *
+ * \sa class CwiseUnaryOp, Cwise::log()
+ */
+template<typename Scalar> struct scalar_log_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op)
+ inline const Scalar operator() (const Scalar& a) const { return log(a); }
+ typedef typename packet_traits<Scalar>::type Packet;
+ inline Packet packetOp(const Packet& a) const { return internal::plog(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_log_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasLog }; };
+
+/** \internal
+ * \brief Template functor to multiply a scalar by a fixed other one
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::operator*, MatrixBase::operator/
+ */
+/* NOTE why doing the pset1() in packetOp *is* an optimization ?
+ * indeed it seems better to declare m_other as a Packet and do the pset1() once
+ * in the constructor. However, in practice:
+ * - GCC does not like m_other as a Packet and generate a load every time it needs it
+ * - on the other hand GCC is able to moves the pset1() away the loop :)
+ * - simpler code ;)
+ * (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y)
+ */
+template<typename Scalar>
+struct scalar_multiple_op {
+ typedef typename packet_traits<Scalar>::type Packet;
+ // FIXME default copy constructors seems bugged with std::complex<>
+ EIGEN_STRONG_INLINE scalar_multiple_op(const scalar_multiple_op& other) : m_other(other.m_other) { }
+ EIGEN_STRONG_INLINE scalar_multiple_op(const Scalar& other) : m_other(other) { }
+ EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; }
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+ { return internal::pmul(a, pset1<Packet>(m_other)); }
+ typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_multiple_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+
+template<typename Scalar1, typename Scalar2>
+struct scalar_multiple2_op {
+ typedef typename scalar_product_traits<Scalar1,Scalar2>::ReturnType result_type;
+ EIGEN_STRONG_INLINE scalar_multiple2_op(const scalar_multiple2_op& other) : m_other(other.m_other) { }
+ EIGEN_STRONG_INLINE scalar_multiple2_op(const Scalar2& other) : m_other(other) { }
+ EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a * m_other; }
+ typename add_const_on_value_type<typename NumTraits<Scalar2>::Nested>::type m_other;
+};
+template<typename Scalar1,typename Scalar2>
+struct functor_traits<scalar_multiple2_op<Scalar1,Scalar2> >
+{ enum { Cost = NumTraits<Scalar1>::MulCost, PacketAccess = false }; };
+
+template<typename Scalar, bool IsInteger>
+struct scalar_quotient1_impl {
+ typedef typename packet_traits<Scalar>::type Packet;
+ // FIXME default copy constructors seems bugged with std::complex<>
+ EIGEN_STRONG_INLINE scalar_quotient1_impl(const scalar_quotient1_impl& other) : m_other(other.m_other) { }
+ EIGEN_STRONG_INLINE scalar_quotient1_impl(const Scalar& other) : m_other(static_cast<Scalar>(1) / other) {}
+ EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; }
+ EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+ { return internal::pmul(a, pset1<Packet>(m_other)); }
+ const Scalar m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_quotient1_impl<Scalar,false> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+
+template<typename Scalar>
+struct scalar_quotient1_impl<Scalar,true> {
+ // FIXME default copy constructors seems bugged with std::complex<>
+ EIGEN_STRONG_INLINE scalar_quotient1_impl(const scalar_quotient1_impl& other) : m_other(other.m_other) { }
+ EIGEN_STRONG_INLINE scalar_quotient1_impl(const Scalar& other) : m_other(other) {}
+ EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; }
+ typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_quotient1_impl<Scalar,true> >
+{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
+
+/** \internal
+ * \brief Template functor to divide a scalar by a fixed other one
+ *
+ * This functor is used to implement the quotient of a matrix by
+ * a scalar where the scalar type is not necessarily a floating point type.
+ *
+ * \sa class CwiseUnaryOp, MatrixBase::operator/
+ */
+template<typename Scalar>
+struct scalar_quotient1_op : scalar_quotient1_impl<Scalar, NumTraits<Scalar>::IsInteger > {
+ EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other)
+ : scalar_quotient1_impl<Scalar, NumTraits<Scalar>::IsInteger >(other) {}
+};
+template<typename Scalar>
+struct functor_traits<scalar_quotient1_op<Scalar> >
+: functor_traits<scalar_quotient1_impl<Scalar, NumTraits<Scalar>::IsInteger> >
+{};
+
+// nullary functors
+
+template<typename Scalar>
+struct scalar_constant_op {
+ typedef typename packet_traits<Scalar>::type Packet;
+ EIGEN_STRONG_INLINE scalar_constant_op(const scalar_constant_op& other) : m_other(other.m_other) { }
+ EIGEN_STRONG_INLINE scalar_constant_op(const Scalar& other) : m_other(other) { }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index, Index = 0) const { return m_other; }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Packet packetOp(Index, Index = 0) const { return internal::pset1<Packet>(m_other); }
+ const Scalar m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_constant_op<Scalar> >
+// FIXME replace this packet test by a safe one
+{ enum { Cost = 1, PacketAccess = packet_traits<Scalar>::Vectorizable, IsRepeatable = true }; };
+
+template<typename Scalar> struct scalar_identity_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_identity_op)
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { return row==col ? Scalar(1) : Scalar(0); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_identity_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };
+
+template <typename Scalar, bool RandomAccess> struct linspaced_op_impl;
+
+// linear access for packet ops:
+// 1) initialization
+// base = [low, ..., low] + ([step, ..., step] * [-size, ..., 0])
+// 2) each step
+// base += [size*step, ..., size*step]
+template <typename Scalar>
+struct linspaced_op_impl<Scalar,false>
+{
+ typedef typename packet_traits<Scalar>::type Packet;
+
+ linspaced_op_impl(Scalar low, Scalar step) :
+ m_low(low), m_step(step),
+ m_packetStep(pset1<Packet>(packet_traits<Scalar>::size*step)),
+ m_base(padd(pset1<Packet>(low),pmul(pset1<Packet>(step),plset<Scalar>(-packet_traits<Scalar>::size)))) {}
+
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Packet packetOp(Index) const { return m_base = padd(m_base,m_packetStep); }
+
+ const Scalar m_low;
+ const Scalar m_step;
+ const Packet m_packetStep;
+ mutable Packet m_base;
+};
+
+// random access for packet ops:
+// 1) each step
+// [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) )
+template <typename Scalar>
+struct linspaced_op_impl<Scalar,true>
+{
+ typedef typename packet_traits<Scalar>::type Packet;
+
+ linspaced_op_impl(Scalar low, Scalar step) :
+ m_low(low), m_step(step),
+ m_lowPacket(pset1<Packet>(m_low)), m_stepPacket(pset1<Packet>(m_step)), m_interPacket(plset<Scalar>(0)) {}
+
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
+
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Packet packetOp(Index i) const
+ { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(i),m_interPacket))); }
+
+ const Scalar m_low;
+ const Scalar m_step;
+ const Packet m_lowPacket;
+ const Packet m_stepPacket;
+ const Packet m_interPacket;
+};
+
+// ----- Linspace functor ----------------------------------------------------------------
+
+// Forward declaration (we default to random access which does not really give
+// us a speed gain when using packet access but it allows to use the functor in
+// nested expressions).
+template <typename Scalar, bool RandomAccess = true> struct linspaced_op;
+template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_op<Scalar,RandomAccess> >
+{ enum { Cost = 1, PacketAccess = packet_traits<Scalar>::HasSetLinear, IsRepeatable = true }; };
+template <typename Scalar, bool RandomAccess> struct linspaced_op
+{
+ typedef typename packet_traits<Scalar>::type Packet;
+ linspaced_op(Scalar low, Scalar high, int num_steps) : impl(low, (high-low)/(num_steps-1)) {}
+
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
+
+ // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since
+ // there row==0 and col is used for the actual iteration.
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const
+ {
+ eigen_assert(col==0 || row==0);
+ return impl(col + row);
+ }
+
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Packet packetOp(Index i) const { return impl.packetOp(i); }
+
+ // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since
+ // there row==0 and col is used for the actual iteration.
+ template<typename Index>
+ EIGEN_STRONG_INLINE const Packet packetOp(Index row, Index col) const
+ {
+ eigen_assert(col==0 || row==0);
+ return impl.packetOp(col + row);
+ }
+
+ // This proxy object handles the actual required temporaries, the different
+ // implementations (random vs. sequential access) as well as the
+ // correct piping to size 2/4 packet operations.
+ const linspaced_op_impl<Scalar,RandomAccess> impl;
+};
+
+// all functors allow linear access, except scalar_identity_op. So we fix here a quick meta
+// to indicate whether a functor allows linear access, just always answering 'yes' except for
+// scalar_identity_op.
+// FIXME move this to functor_traits adding a functor_default
+template<typename Functor> struct functor_has_linear_access { enum { ret = 1 }; };
+template<typename Scalar> struct functor_has_linear_access<scalar_identity_op<Scalar> > { enum { ret = 0 }; };
+
+// in CwiseBinaryOp, we require the Lhs and Rhs to have the same scalar type, except for multiplication
+// where we only require them to have the same _real_ scalar type so one may multiply, say, float by complex<float>.
+// FIXME move this to functor_traits adding a functor_default
+template<typename Functor> struct functor_allows_mixing_real_and_complex { enum { ret = 0 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_allows_mixing_real_and_complex<scalar_conj_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+
+
+/** \internal
+ * \brief Template functor to add a scalar to a fixed other one
+ * \sa class CwiseUnaryOp, Array::operator+
+ */
+/* If you wonder why doing the pset1() in packetOp() is an optimization check scalar_multiple_op */
+template<typename Scalar>
+struct scalar_add_op {
+ typedef typename packet_traits<Scalar>::type Packet;
+ // FIXME default copy constructors seems bugged with std::complex<>
+ inline scalar_add_op(const scalar_add_op& other) : m_other(other.m_other) { }
+ inline scalar_add_op(const Scalar& other) : m_other(other) { }
+ inline Scalar operator() (const Scalar& a) const { return a + m_other; }
+ inline const Packet packetOp(const Packet& a) const
+ { return internal::padd(a, pset1<Packet>(m_other)); }
+ const Scalar m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_add_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = packet_traits<Scalar>::HasAdd }; };
+
+/** \internal
+ * \brief Template functor to compute the square root of a scalar
+ * \sa class CwiseUnaryOp, Cwise::sqrt()
+ */
+template<typename Scalar> struct scalar_sqrt_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op)
+ inline const Scalar operator() (const Scalar& a) const { return sqrt(a); }
+ typedef typename packet_traits<Scalar>::type Packet;
+ inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_sqrt_op<Scalar> >
+{ enum {
+ Cost = 5 * NumTraits<Scalar>::MulCost,
+ PacketAccess = packet_traits<Scalar>::HasSqrt
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the cosine of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::cos()
+ */
+template<typename Scalar> struct scalar_cos_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op)
+ inline Scalar operator() (const Scalar& a) const { return cos(a); }
+ typedef typename packet_traits<Scalar>::type Packet;
+ inline Packet packetOp(const Packet& a) const { return internal::pcos(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_cos_op<Scalar> >
+{
+ enum {
+ Cost = 5 * NumTraits<Scalar>::MulCost,
+ PacketAccess = packet_traits<Scalar>::HasCos
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the sine of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::sin()
+ */
+template<typename Scalar> struct scalar_sin_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op)
+ inline const Scalar operator() (const Scalar& a) const { return sin(a); }
+ typedef typename packet_traits<Scalar>::type Packet;
+ inline Packet packetOp(const Packet& a) const { return internal::psin(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_sin_op<Scalar> >
+{
+ enum {
+ Cost = 5 * NumTraits<Scalar>::MulCost,
+ PacketAccess = packet_traits<Scalar>::HasSin
+ };
+};
+
+
+/** \internal
+ * \brief Template functor to compute the tan of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::tan()
+ */
+template<typename Scalar> struct scalar_tan_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op)
+ inline const Scalar operator() (const Scalar& a) const { return tan(a); }
+ typedef typename packet_traits<Scalar>::type Packet;
+ inline Packet packetOp(const Packet& a) const { return internal::ptan(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_tan_op<Scalar> >
+{
+ enum {
+ Cost = 5 * NumTraits<Scalar>::MulCost,
+ PacketAccess = packet_traits<Scalar>::HasTan
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the arc cosine of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::acos()
+ */
+template<typename Scalar> struct scalar_acos_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op)
+ inline const Scalar operator() (const Scalar& a) const { return acos(a); }
+ typedef typename packet_traits<Scalar>::type Packet;
+ inline Packet packetOp(const Packet& a) const { return internal::pacos(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_acos_op<Scalar> >
+{
+ enum {
+ Cost = 5 * NumTraits<Scalar>::MulCost,
+ PacketAccess = packet_traits<Scalar>::HasACos
+ };
+};
+
+/** \internal
+ * \brief Template functor to compute the arc sine of a scalar
+ * \sa class CwiseUnaryOp, ArrayBase::asin()
+ */
+template<typename Scalar> struct scalar_asin_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op)
+ inline const Scalar operator() (const Scalar& a) const { return asin(a); }
+ typedef typename packet_traits<Scalar>::type Packet;
+ inline Packet packetOp(const Packet& a) const { return internal::pasin(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_asin_op<Scalar> >
+{
+ enum {
+ Cost = 5 * NumTraits<Scalar>::MulCost,
+ PacketAccess = packet_traits<Scalar>::HasASin
+ };
+};
+
+/** \internal
+ * \brief Template functor to raise a scalar to a power
+ * \sa class CwiseUnaryOp, Cwise::pow
+ */
+template<typename Scalar>
+struct scalar_pow_op {
+ // FIXME default copy constructors seems bugged with std::complex<>
+ inline scalar_pow_op(const scalar_pow_op& other) : m_exponent(other.m_exponent) { }
+ inline scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {}
+ inline Scalar operator() (const Scalar& a) const { return internal::pow(a, m_exponent); }
+ const Scalar m_exponent;
+};
+template<typename Scalar>
+struct functor_traits<scalar_pow_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
+
+/** \internal
+ * \brief Template functor to compute the inverse of a scalar
+ * \sa class CwiseUnaryOp, Cwise::inverse()
+ */
+template<typename Scalar>
+struct scalar_inverse_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_inverse_op)
+ inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; }
+ template<typename Packet>
+ inline const Packet packetOp(const Packet& a) const
+ { return internal::pdiv(pset1<Packet>(Scalar(1)),a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_inverse_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
+
+/** \internal
+ * \brief Template functor to compute the square of a scalar
+ * \sa class CwiseUnaryOp, Cwise::square()
+ */
+template<typename Scalar>
+struct scalar_square_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op)
+ inline Scalar operator() (const Scalar& a) const { return a*a; }
+ template<typename Packet>
+ inline const Packet packetOp(const Packet& a) const
+ { return internal::pmul(a,a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_square_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+
+/** \internal
+ * \brief Template functor to compute the cube of a scalar
+ * \sa class CwiseUnaryOp, Cwise::cube()
+ */
+template<typename Scalar>
+struct scalar_cube_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op)
+ inline Scalar operator() (const Scalar& a) const { return a*a*a; }
+ template<typename Packet>
+ inline const Packet packetOp(const Packet& a) const
+ { return internal::pmul(a,pmul(a,a)); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_cube_op<Scalar> >
+{ enum { Cost = 2*NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+
+// default functor traits for STL functors:
+
+template<typename T>
+struct functor_traits<std::multiplies<T> >
+{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::divides<T> >
+{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::plus<T> >
+{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::minus<T> >
+{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::negate<T> >
+{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::logical_or<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::logical_and<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::logical_not<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::greater<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::less<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::greater_equal<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::less_equal<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::equal_to<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::not_equal_to<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::binder2nd<T> >
+{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::binder1st<T> >
+{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::unary_negate<T> >
+{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::binary_negate<T> >
+{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
+
+#ifdef EIGEN_STDEXT_SUPPORT
+
+template<typename T0,typename T1>
+struct functor_traits<std::project1st<T0,T1> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::project2nd<T0,T1> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::select2nd<std::pair<T0,T1> > >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::select1st<std::pair<T0,T1> > >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::unary_compose<T0,T1> >
+{ enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost, PacketAccess = false }; };
+
+template<typename T0,typename T1,typename T2>
+struct functor_traits<std::binary_compose<T0,T1,T2> >
+{ enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost + functor_traits<T2>::Cost, PacketAccess = false }; };
+
+#endif // EIGEN_STDEXT_SUPPORT
+
+// allow to add new functors and specializations of functor_traits from outside Eigen.
+// this macro is really needed because functor_traits must be specialized after it is declared but before it is used...
+#ifdef EIGEN_FUNCTORS_PLUGIN
+#include EIGEN_FUNCTORS_PLUGIN
+#endif
+
+} // end namespace internal
+
+#endif // EIGEN_FUNCTORS_H
diff --git a/extern/Eigen3/Eigen/src/Core/Fuzzy.h b/extern/Eigen3/Eigen/src/Core/Fuzzy.h
new file mode 100644
index 00000000000..d266eed0ac6
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Fuzzy.h
@@ -0,0 +1,161 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_FUZZY_H
+#define EIGEN_FUZZY_H
+
+namespace internal
+{
+
+template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
+struct isApprox_selector
+{
+ static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec)
+ {
+ using std::min;
+ const typename internal::nested<Derived,2>::type nested(x);
+ const typename internal::nested<OtherDerived,2>::type otherNested(y);
+ return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
+ }
+};
+
+template<typename Derived, typename OtherDerived>
+struct isApprox_selector<Derived, OtherDerived, true>
+{
+ static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar)
+ {
+ return x.matrix() == y.matrix();
+ }
+};
+
+template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
+struct isMuchSmallerThan_object_selector
+{
+ static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec)
+ {
+ return x.cwiseAbs2().sum() <= abs2(prec) * y.cwiseAbs2().sum();
+ }
+};
+
+template<typename Derived, typename OtherDerived>
+struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true>
+{
+ static bool run(const Derived& x, const OtherDerived&, typename Derived::RealScalar)
+ {
+ return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
+ }
+};
+
+template<typename Derived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
+struct isMuchSmallerThan_scalar_selector
+{
+ static bool run(const Derived& x, const typename Derived::RealScalar& y, typename Derived::RealScalar prec)
+ {
+ return x.cwiseAbs2().sum() <= abs2(prec * y);
+ }
+};
+
+template<typename Derived>
+struct isMuchSmallerThan_scalar_selector<Derived, true>
+{
+ static bool run(const Derived& x, const typename Derived::RealScalar&, typename Derived::RealScalar)
+ {
+ return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
+ }
+};
+
+} // end namespace internal
+
+
+/** \returns \c true if \c *this is approximately equal to \a other, within the precision
+ * determined by \a prec.
+ *
+ * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
+ * are considered to be approximately equal within precision \f$ p \f$ if
+ * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
+ * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
+ * L2 norm).
+ *
+ * \note Because of the multiplicativeness of this comparison, one can't use this function
+ * to check whether \c *this is approximately equal to the zero matrix or vector.
+ * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix
+ * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const
+ * RealScalar&, RealScalar) instead.
+ *
+ * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const
+ */
+template<typename Derived>
+template<typename OtherDerived>
+bool DenseBase<Derived>::isApprox(
+ const DenseBase<OtherDerived>& other,
+ RealScalar prec
+) const
+{
+ return internal::isApprox_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
+}
+
+/** \returns \c true if the norm of \c *this is much smaller than \a other,
+ * within the precision determined by \a prec.
+ *
+ * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
+ * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if
+ * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f]
+ *
+ * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason,
+ * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm
+ * of a reference matrix of same dimensions.
+ *
+ * \sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const
+ */
+template<typename Derived>
+bool DenseBase<Derived>::isMuchSmallerThan(
+ const typename NumTraits<Scalar>::Real& other,
+ RealScalar prec
+) const
+{
+ return internal::isMuchSmallerThan_scalar_selector<Derived>::run(derived(), other, prec);
+}
+
+/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other,
+ * within the precision determined by \a prec.
+ *
+ * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
+ * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if
+ * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f]
+ * For matrices, the comparison is done using the Hilbert-Schmidt norm.
+ *
+ * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const
+ */
+template<typename Derived>
+template<typename OtherDerived>
+bool DenseBase<Derived>::isMuchSmallerThan(
+ const DenseBase<OtherDerived>& other,
+ RealScalar prec
+) const
+{
+ return internal::isMuchSmallerThan_object_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
+}
+
+#endif // EIGEN_FUZZY_H
diff --git a/extern/Eigen3/Eigen/src/Core/GenericPacketMath.h b/extern/Eigen3/Eigen/src/Core/GenericPacketMath.h
new file mode 100644
index 00000000000..8ed83532712
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/GenericPacketMath.h
@@ -0,0 +1,339 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_GENERIC_PACKET_MATH_H
+#define EIGEN_GENERIC_PACKET_MATH_H
+
+namespace internal {
+
+/** \internal
+ * \file GenericPacketMath.h
+ *
+ * Default implementation for types not supported by the vectorization.
+ * In practice these functions are provided to make easier the writing
+ * of generic vectorized code.
+ */
+
+#ifndef EIGEN_DEBUG_ALIGNED_LOAD
+#define EIGEN_DEBUG_ALIGNED_LOAD
+#endif
+
+#ifndef EIGEN_DEBUG_UNALIGNED_LOAD
+#define EIGEN_DEBUG_UNALIGNED_LOAD
+#endif
+
+#ifndef EIGEN_DEBUG_ALIGNED_STORE
+#define EIGEN_DEBUG_ALIGNED_STORE
+#endif
+
+#ifndef EIGEN_DEBUG_UNALIGNED_STORE
+#define EIGEN_DEBUG_UNALIGNED_STORE
+#endif
+
+struct default_packet_traits
+{
+ enum {
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasNegate = 1,
+ HasAbs = 1,
+ HasAbs2 = 1,
+ HasMin = 1,
+ HasMax = 1,
+ HasConj = 1,
+ HasSetLinear = 1,
+
+ HasDiv = 0,
+ HasSqrt = 0,
+ HasExp = 0,
+ HasLog = 0,
+ HasPow = 0,
+
+ HasSin = 0,
+ HasCos = 0,
+ HasTan = 0,
+ HasASin = 0,
+ HasACos = 0,
+ HasATan = 0
+ };
+};
+
+template<typename T> struct packet_traits : default_packet_traits
+{
+ typedef T type;
+ enum {
+ Vectorizable = 0,
+ size = 1,
+ AlignedOnScalar = 0
+ };
+ enum {
+ HasAdd = 0,
+ HasSub = 0,
+ HasMul = 0,
+ HasNegate = 0,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasConj = 0,
+ HasSetLinear = 0
+ };
+};
+
+/** \internal \returns a + b (coeff-wise) */
+template<typename Packet> inline Packet
+padd(const Packet& a,
+ const Packet& b) { return a+b; }
+
+/** \internal \returns a - b (coeff-wise) */
+template<typename Packet> inline Packet
+psub(const Packet& a,
+ const Packet& b) { return a-b; }
+
+/** \internal \returns -a (coeff-wise) */
+template<typename Packet> inline Packet
+pnegate(const Packet& a) { return -a; }
+
+/** \internal \returns conj(a) (coeff-wise) */
+template<typename Packet> inline Packet
+pconj(const Packet& a) { return conj(a); }
+
+/** \internal \returns a * b (coeff-wise) */
+template<typename Packet> inline Packet
+pmul(const Packet& a,
+ const Packet& b) { return a*b; }
+
+/** \internal \returns a / b (coeff-wise) */
+template<typename Packet> inline Packet
+pdiv(const Packet& a,
+ const Packet& b) { return a/b; }
+
+/** \internal \returns the min of \a a and \a b (coeff-wise) */
+template<typename Packet> inline Packet
+pmin(const Packet& a,
+ const Packet& b) { using std::min; return (min)(a, b); }
+
+/** \internal \returns the max of \a a and \a b (coeff-wise) */
+template<typename Packet> inline Packet
+pmax(const Packet& a,
+ const Packet& b) { using std::max; return (max)(a, b); }
+
+/** \internal \returns the absolute value of \a a */
+template<typename Packet> inline Packet
+pabs(const Packet& a) { return abs(a); }
+
+/** \internal \returns the bitwise and of \a a and \a b */
+template<typename Packet> inline Packet
+pand(const Packet& a, const Packet& b) { return a & b; }
+
+/** \internal \returns the bitwise or of \a a and \a b */
+template<typename Packet> inline Packet
+por(const Packet& a, const Packet& b) { return a | b; }
+
+/** \internal \returns the bitwise xor of \a a and \a b */
+template<typename Packet> inline Packet
+pxor(const Packet& a, const Packet& b) { return a ^ b; }
+
+/** \internal \returns the bitwise andnot of \a a and \a b */
+template<typename Packet> inline Packet
+pandnot(const Packet& a, const Packet& b) { return a & (!b); }
+
+/** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */
+template<typename Packet> inline Packet
+pload(const typename unpacket_traits<Packet>::type* from) { return *from; }
+
+/** \internal \returns a packet version of \a *from, (un-aligned load) */
+template<typename Packet> inline Packet
+ploadu(const typename unpacket_traits<Packet>::type* from) { return *from; }
+
+/** \internal \returns a packet with elements of \a *from duplicated, e.g.: (from[0],from[0],from[1],from[1]) */
+template<typename Packet> inline Packet
+ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
+
+/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */
+template<typename Packet> inline Packet
+pset1(const typename unpacket_traits<Packet>::type& a) { return a; }
+
+/** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */
+template<typename Scalar> inline typename packet_traits<Scalar>::type
+plset(const Scalar& a) { return a; }
+
+/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */
+template<typename Scalar, typename Packet> inline void pstore(Scalar* to, const Packet& from)
+{ (*to) = from; }
+
+/** \internal copy the packet \a from to \a *to, (un-aligned store) */
+template<typename Scalar, typename Packet> inline void pstoreu(Scalar* to, const Packet& from)
+{ (*to) = from; }
+
+/** \internal tries to do cache prefetching of \a addr */
+template<typename Scalar> inline void prefetch(const Scalar* addr)
+{
+#if !defined(_MSC_VER)
+__builtin_prefetch(addr);
+#endif
+}
+
+/** \internal \returns the first element of a packet */
+template<typename Packet> inline typename unpacket_traits<Packet>::type pfirst(const Packet& a)
+{ return a; }
+
+/** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */
+template<typename Packet> inline Packet
+preduxp(const Packet* vecs) { return vecs[0]; }
+
+/** \internal \returns the sum of the elements of \a a*/
+template<typename Packet> inline typename unpacket_traits<Packet>::type predux(const Packet& a)
+{ return a; }
+
+/** \internal \returns the product of the elements of \a a*/
+template<typename Packet> inline typename unpacket_traits<Packet>::type predux_mul(const Packet& a)
+{ return a; }
+
+/** \internal \returns the min of the elements of \a a*/
+template<typename Packet> inline typename unpacket_traits<Packet>::type predux_min(const Packet& a)
+{ return a; }
+
+/** \internal \returns the max of the elements of \a a*/
+template<typename Packet> inline typename unpacket_traits<Packet>::type predux_max(const Packet& a)
+{ return a; }
+
+/** \internal \returns the reversed elements of \a a*/
+template<typename Packet> inline Packet preverse(const Packet& a)
+{ return a; }
+
+
+/** \internal \returns \a a with real and imaginary part flipped (for complex type only) */
+template<typename Packet> inline Packet pcplxflip(const Packet& a)
+{ return Packet(imag(a),real(a)); }
+
+/**************************
+* Special math functions
+***************************/
+
+/** \internal \returns the sine of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet psin(const Packet& a) { return sin(a); }
+
+/** \internal \returns the cosine of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pcos(const Packet& a) { return cos(a); }
+
+/** \internal \returns the tan of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet ptan(const Packet& a) { return tan(a); }
+
+/** \internal \returns the arc sine of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pasin(const Packet& a) { return asin(a); }
+
+/** \internal \returns the arc cosine of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pacos(const Packet& a) { return acos(a); }
+
+/** \internal \returns the exp of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pexp(const Packet& a) { return exp(a); }
+
+/** \internal \returns the log of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet plog(const Packet& a) { return log(a); }
+
+/** \internal \returns the square-root of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet psqrt(const Packet& a) { return sqrt(a); }
+
+/***************************************************************************
+* The following functions might not have to be overwritten for vectorized types
+***************************************************************************/
+
+/** \internal copy a packet with constant coeficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */
+// NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type)
+template<typename Packet>
+inline void pstore1(typename unpacket_traits<Packet>::type* to, const typename unpacket_traits<Packet>::type& a)
+{
+ pstore(to, pset1<Packet>(a));
+}
+
+/** \internal \returns a * b + c (coeff-wise) */
+template<typename Packet> inline Packet
+pmadd(const Packet& a,
+ const Packet& b,
+ const Packet& c)
+{ return padd(pmul(a, b),c); }
+
+/** \internal \returns a packet version of \a *from.
+ * If LoadMode equals #Aligned, \a from must be 16 bytes aligned */
+template<typename Packet, int LoadMode>
+inline Packet ploadt(const typename unpacket_traits<Packet>::type* from)
+{
+ if(LoadMode == Aligned)
+ return pload<Packet>(from);
+ else
+ return ploadu<Packet>(from);
+}
+
+/** \internal copy the packet \a from to \a *to.
+ * If StoreMode equals #Aligned, \a to must be 16 bytes aligned */
+template<typename Scalar, typename Packet, int LoadMode>
+inline void pstoret(Scalar* to, const Packet& from)
+{
+ if(LoadMode == Aligned)
+ pstore(to, from);
+ else
+ pstoreu(to, from);
+}
+
+/** \internal default implementation of palign() allowing partial specialization */
+template<int Offset,typename PacketType>
+struct palign_impl
+{
+ // by default data are aligned, so there is nothing to be done :)
+ inline static void run(PacketType&, const PacketType&) {}
+};
+
+/** \internal update \a first using the concatenation of the \a Offset last elements
+ * of \a first and packet_size minus \a Offset first elements of \a second */
+template<int Offset,typename PacketType>
+inline void palign(PacketType& first, const PacketType& second)
+{
+ palign_impl<Offset,PacketType>::run(first,second);
+}
+
+/***************************************************************************
+* Fast complex products (GCC generates a function call which is very slow)
+***************************************************************************/
+
+template<> inline std::complex<float> pmul(const std::complex<float>& a, const std::complex<float>& b)
+{ return std::complex<float>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
+
+template<> inline std::complex<double> pmul(const std::complex<double>& a, const std::complex<double>& b)
+{ return std::complex<double>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
+
+} // end namespace internal
+
+#endif // EIGEN_GENERIC_PACKET_MATH_H
+
diff --git a/extern/Eigen3/Eigen/src/Core/GlobalFunctions.h b/extern/Eigen3/Eigen/src/Core/GlobalFunctions.h
new file mode 100644
index 00000000000..144145a955c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/GlobalFunctions.h
@@ -0,0 +1,95 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_GLOBAL_FUNCTIONS_H
+#define EIGEN_GLOBAL_FUNCTIONS_H
+
+#define EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(NAME,FUNCTOR) \
+ template<typename Derived> \
+ inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> \
+ NAME(const Eigen::ArrayBase<Derived>& x) { \
+ return x.derived(); \
+ }
+
+#define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME,FUNCTOR) \
+ \
+ template<typename Derived> \
+ struct NAME##_retval<ArrayBase<Derived> > \
+ { \
+ typedef const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> type; \
+ }; \
+ template<typename Derived> \
+ struct NAME##_impl<ArrayBase<Derived> > \
+ { \
+ static inline typename NAME##_retval<ArrayBase<Derived> >::type run(const Eigen::ArrayBase<Derived>& x) \
+ { \
+ return x.derived(); \
+ } \
+ };
+
+
+namespace std
+{
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(real,scalar_real_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(imag,scalar_imag_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sin,scalar_sin_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(cos,scalar_cos_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(asin,scalar_asin_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(acos,scalar_acos_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(tan,scalar_tan_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(exp,scalar_exp_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(log,scalar_log_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(abs,scalar_abs_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sqrt,scalar_sqrt_op)
+
+ template<typename Derived>
+ inline const Eigen::CwiseUnaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar>, const Derived>
+ pow(const Eigen::ArrayBase<Derived>& x, const typename Derived::Scalar& exponent) { \
+ return x.derived().pow(exponent); \
+ }
+}
+
+namespace Eigen
+{
+ namespace internal
+ {
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sin,scalar_sin_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(cos,scalar_cos_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(asin,scalar_asin_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(acos,scalar_acos_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(tan,scalar_tan_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(exp,scalar_exp_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(log,scalar_log_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs,scalar_abs_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op)
+ EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sqrt,scalar_sqrt_op)
+ }
+}
+
+// TODO: cleanly disable those functions that are not supported on Array (internal::real_ref, internal::random, internal::isApprox...)
+
+#endif // EIGEN_GLOBAL_FUNCTIONS_H
diff --git a/extern/Eigen2/Eigen/src/Core/IO.h b/extern/Eigen3/Eigen/src/Core/IO.h
index 2b00d5bc509..f3cfcdbf4a3 100644
--- a/extern/Eigen2/Eigen/src/Core/IO.h
+++ b/extern/Eigen3/Eigen/src/Core/IO.h
@@ -1,8 +1,8 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -26,15 +26,28 @@
#ifndef EIGEN_IO_H
#define EIGEN_IO_H
-enum { Raw, AlignCols };
+enum { DontAlignCols = 1 };
+enum { StreamPrecision = -1,
+ FullPrecision = -2 };
+
+namespace internal {
+template<typename Derived>
+std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt);
+}
/** \class IOFormat
+ * \ingroup Core_Module
*
* \brief Stores a set of parameters controlling the way matrices are printed
*
* List of available parameters:
- * - \b precision number of digits for floating point values
- * - \b flags can be either Raw (default) or AlignCols which aligns all the columns
+ * - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c FullPrecision.
+ * The default is the special value \c StreamPrecision which means to use the
+ * stream's own precision setting, as set for instance using \c cout.precision(3). The other special value
+ * \c FullPrecision means that the number of digits will be computed to match the full precision of each floating-point
+ * type.
+ * - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c DontAlignCols which
+ * allows to disable the alignment of columns, resulting in faster code.
* - \b coeffSeparator string printed between two coefficients of the same row
* - \b rowSeparator string printed between two rows
* - \b rowPrefix string printed at the beginning of each row
@@ -45,12 +58,12 @@ enum { Raw, AlignCols };
* Example: \include IOFormat.cpp
* Output: \verbinclude IOFormat.out
*
- * \sa MatrixBase::format(), class WithFormat
+ * \sa DenseBase::format(), class WithFormat
*/
struct IOFormat
{
/** Default contructor, see class IOFormat for the meaning of the parameters */
- IOFormat(int _precision=4, int _flags=Raw,
+ IOFormat(int _precision = StreamPrecision, int _flags = 0,
const std::string& _coeffSeparator = " ",
const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="",
const std::string& _matPrefix="", const std::string& _matSuffix="")
@@ -73,18 +86,19 @@ struct IOFormat
};
/** \class WithFormat
+ * \ingroup Core_Module
*
* \brief Pseudo expression providing matrix output with given format
*
* \param ExpressionType the type of the object on which IO stream operations are performed
*
* This class represents an expression with stream operators controlled by a given IOFormat.
- * It is the return type of MatrixBase::format()
+ * It is the return type of DenseBase::format()
* and most of the time this is the only way it is used.
*
* See class IOFormat for some examples.
*
- * \sa MatrixBase::format(), class IOFormat
+ * \sa DenseBase::format(), class IOFormat
*/
template<typename ExpressionType>
class WithFormat
@@ -97,7 +111,7 @@ class WithFormat
friend std::ostream & operator << (std::ostream & s, const WithFormat& wf)
{
- return ei_print_matrix(s, wf.m_matrix.eval(), wf.m_format);
+ return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format);
}
protected:
@@ -114,41 +128,100 @@ class WithFormat
*/
template<typename Derived>
inline const WithFormat<Derived>
-MatrixBase<Derived>::format(const IOFormat& fmt) const
+DenseBase<Derived>::format(const IOFormat& fmt) const
{
return WithFormat<Derived>(derived(), fmt);
}
+namespace internal {
+
+template<typename Scalar, bool IsInteger>
+struct significant_decimals_default_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline int run()
+ {
+ using std::ceil;
+ return cast<RealScalar,int>(ceil(-log(NumTraits<RealScalar>::epsilon())/log(RealScalar(10))));
+ }
+};
+
+template<typename Scalar>
+struct significant_decimals_default_impl<Scalar, true>
+{
+ static inline int run()
+ {
+ return 0;
+ }
+};
+
+template<typename Scalar>
+struct significant_decimals_impl
+ : significant_decimals_default_impl<Scalar, NumTraits<Scalar>::IsInteger>
+{};
+
/** \internal
* print the matrix \a _m to the output stream \a s using the output format \a fmt */
template<typename Derived>
-std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt)
+std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt)
{
+ if(_m.size() == 0)
+ {
+ s << fmt.matPrefix << fmt.matSuffix;
+ return s;
+ }
+
const typename Derived::Nested m = _m;
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
- int width = 0;
- if (fmt.flags & AlignCols)
+ Index width = 0;
+
+ std::streamsize explicit_precision;
+ if(fmt.precision == StreamPrecision)
+ {
+ explicit_precision = 0;
+ }
+ else if(fmt.precision == FullPrecision)
+ {
+ if (NumTraits<Scalar>::IsInteger)
+ {
+ explicit_precision = 0;
+ }
+ else
+ {
+ explicit_precision = significant_decimals_impl<Scalar>::run();
+ }
+ }
+ else
+ {
+ explicit_precision = fmt.precision;
+ }
+
+ bool align_cols = !(fmt.flags & DontAlignCols);
+ if(align_cols)
{
// compute the largest width
- for(int j = 1; j < m.cols(); ++j)
- for(int i = 0; i < m.rows(); ++i)
+ for(Index j = 1; j < m.cols(); ++j)
+ for(Index i = 0; i < m.rows(); ++i)
{
std::stringstream sstr;
- sstr.precision(fmt.precision);
+ if(explicit_precision) sstr.precision(explicit_precision);
sstr << m.coeff(i,j);
- width = std::max<int>(width, int(sstr.str().length()));
+ width = std::max<Index>(width, Index(sstr.str().length()));
}
}
- s.precision(fmt.precision);
+ std::streamsize old_precision = 0;
+ if(explicit_precision) old_precision = s.precision(explicit_precision);
s << fmt.matPrefix;
- for(int i = 0; i < m.rows(); ++i)
+ for(Index i = 0; i < m.rows(); ++i)
{
if (i)
s << fmt.rowSpacer;
s << fmt.rowPrefix;
if(width) s.width(width);
s << m.coeff(i, 0);
- for(int j = 1; j < m.cols(); ++j)
+ for(Index j = 1; j < m.cols(); ++j)
{
s << fmt.coeffSeparator;
if (width) s.width(width);
@@ -159,26 +232,29 @@ std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOForm
s << fmt.rowSeparator;
}
s << fmt.matSuffix;
+ if(explicit_precision) s.precision(old_precision);
return s;
}
-/** \relates MatrixBase
+} // end namespace internal
+
+/** \relates DenseBase
*
* Outputs the matrix, to the given stream.
*
- * If you wish to print the matrix with a format different than the default, use MatrixBase::format().
+ * If you wish to print the matrix with a format different than the default, use DenseBase::format().
*
* It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers.
* If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default parameters.
*
- * \sa MatrixBase::format()
+ * \sa DenseBase::format()
*/
template<typename Derived>
std::ostream & operator <<
(std::ostream & s,
- const MatrixBase<Derived> & m)
+ const DenseBase<Derived> & m)
{
- return ei_print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT);
+ return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT);
}
#endif // EIGEN_IO_H
diff --git a/extern/Eigen3/Eigen/src/Core/Map.h b/extern/Eigen3/Eigen/src/Core/Map.h
new file mode 100644
index 00000000000..dd0673609c5
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Map.h
@@ -0,0 +1,205 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MAP_H
+#define EIGEN_MAP_H
+
+/** \class Map
+ * \ingroup Core_Module
+ *
+ * \brief A matrix or vector expression mapping an existing array of data.
+ *
+ * \tparam PlainObjectType the equivalent matrix type of the mapped data
+ * \tparam MapOptions specifies whether the pointer is \c #Aligned, or \c #Unaligned.
+ * The default is \c #Unaligned.
+ * \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout
+ * of an ordinary, contiguous array. This can be overridden by specifying strides.
+ * The type passed here must be a specialization of the Stride template, see examples below.
+ *
+ * This class represents a matrix or vector expression mapping an existing array of data.
+ * It can be used to let Eigen interface without any overhead with non-Eigen data structures,
+ * such as plain C arrays or structures from other libraries. By default, it assumes that the
+ * data is laid out contiguously in memory. You can however override this by explicitly specifying
+ * inner and outer strides.
+ *
+ * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix:
+ * \include Map_simple.cpp
+ * Output: \verbinclude Map_simple.out
+ *
+ * If you need to map non-contiguous arrays, you can do so by specifying strides:
+ *
+ * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer
+ * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time
+ * fixed value.
+ * \include Map_inner_stride.cpp
+ * Output: \verbinclude Map_inner_stride.out
+ *
+ * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping
+ * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns.
+ * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is
+ * a short version of \c OuterStride<Dynamic> because the default template parameter of OuterStride
+ * is \c Dynamic
+ * \include Map_outer_stride.cpp
+ * Output: \verbinclude Map_outer_stride.out
+ *
+ * For more details and for an example of specifying both an inner and an outer stride, see class Stride.
+ *
+ * \b Tip: to change the array of data mapped by a Map object, you can use the C++
+ * placement new syntax:
+ *
+ * Example: \include Map_placement_new.cpp
+ * Output: \verbinclude Map_placement_new.out
+ *
+ * This class is the return type of PlainObjectBase::Map() but can also be used directly.
+ *
+ * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
+ */
+
+namespace internal {
+template<typename PlainObjectType, int MapOptions, typename StrideType>
+struct traits<Map<PlainObjectType, MapOptions, StrideType> >
+ : public traits<PlainObjectType>
+{
+ typedef traits<PlainObjectType> TraitsBase;
+ typedef typename PlainObjectType::Index Index;
+ typedef typename PlainObjectType::Scalar Scalar;
+ enum {
+ InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
+ ? int(PlainObjectType::InnerStrideAtCompileTime)
+ : int(StrideType::InnerStrideAtCompileTime),
+ OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
+ ? int(PlainObjectType::OuterStrideAtCompileTime)
+ : int(StrideType::OuterStrideAtCompileTime),
+ HasNoInnerStride = InnerStrideAtCompileTime == 1,
+ HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
+ HasNoStride = HasNoInnerStride && HasNoOuterStride,
+ IsAligned = bool(EIGEN_ALIGN) && ((int(MapOptions)&Aligned)==Aligned),
+ IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
+ KeepsPacketAccess = bool(HasNoInnerStride)
+ && ( bool(IsDynamicSize)
+ || HasNoOuterStride
+ || ( OuterStrideAtCompileTime!=Dynamic
+ && ((static_cast<int>(sizeof(Scalar))*OuterStrideAtCompileTime)%16)==0 ) ),
+ Flags0 = TraitsBase::Flags,
+ Flags1 = IsAligned ? (int(Flags0) | AlignedBit) : (int(Flags0) & ~AlignedBit),
+ Flags2 = (bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime))
+ ? int(Flags1) : int(Flags1 & ~LinearAccessBit),
+ Flags3 = is_lvalue<PlainObjectType>::value ? int(Flags2) : (int(Flags2) & ~LvalueBit),
+ Flags = KeepsPacketAccess ? int(Flags3) : (int(Flags3) & ~PacketAccessBit)
+ };
+private:
+ enum { Options }; // Expressions don't have Options
+};
+}
+
+template<typename PlainObjectType, int MapOptions, typename StrideType> class Map
+ : public MapBase<Map<PlainObjectType, MapOptions, StrideType> >
+{
+ public:
+
+ typedef MapBase<Map> Base;
+
+ EIGEN_DENSE_PUBLIC_INTERFACE(Map)
+
+ typedef typename Base::PointerType PointerType;
+#if EIGEN2_SUPPORT_STAGE <= STAGE30_FULL_EIGEN3_API
+ typedef const Scalar* PointerArgType;
+ inline PointerType cast_to_pointer_type(PointerArgType ptr) { return const_cast<PointerType>(ptr); }
+#else
+ typedef PointerType PointerArgType;
+ inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; }
+#endif
+
+ inline Index innerStride() const
+ {
+ return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
+ }
+
+ inline Index outerStride() const
+ {
+ return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
+ : IsVectorAtCompileTime ? this->size()
+ : int(Flags)&RowMajorBit ? this->cols()
+ : this->rows();
+ }
+
+ /** Constructor in the fixed-size case.
+ *
+ * \param data pointer to the array to map
+ * \param stride optional Stride object, passing the strides.
+ */
+ inline Map(PointerArgType data, const StrideType& stride = StrideType())
+ : Base(cast_to_pointer_type(data)), m_stride(stride)
+ {
+ PlainObjectType::Base::_check_template_params();
+ }
+
+ /** Constructor in the dynamic-size vector case.
+ *
+ * \param data pointer to the array to map
+ * \param size the size of the vector expression
+ * \param stride optional Stride object, passing the strides.
+ */
+ inline Map(PointerArgType data, Index size, const StrideType& stride = StrideType())
+ : Base(cast_to_pointer_type(data), size), m_stride(stride)
+ {
+ PlainObjectType::Base::_check_template_params();
+ }
+
+ /** Constructor in the dynamic-size matrix case.
+ *
+ * \param data pointer to the array to map
+ * \param rows the number of rows of the matrix expression
+ * \param cols the number of columns of the matrix expression
+ * \param stride optional Stride object, passing the strides.
+ */
+ inline Map(PointerArgType data, Index rows, Index cols, const StrideType& stride = StrideType())
+ : Base(cast_to_pointer_type(data), rows, cols), m_stride(stride)
+ {
+ PlainObjectType::Base::_check_template_params();
+ }
+
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
+
+ protected:
+ StrideType m_stride;
+};
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+inline Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>
+ ::Array(const Scalar *data)
+{
+ this->_set_noalias(Eigen::Map<const Array>(data));
+}
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+inline Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>
+ ::Matrix(const Scalar *data)
+{
+ this->_set_noalias(Eigen::Map<const Matrix>(data));
+}
+
+#endif // EIGEN_MAP_H
diff --git a/extern/Eigen3/Eigen/src/Core/MapBase.h b/extern/Eigen3/Eigen/src/Core/MapBase.h
new file mode 100644
index 00000000000..c23bcbfdcca
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/MapBase.h
@@ -0,0 +1,255 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MAPBASE_H
+#define EIGEN_MAPBASE_H
+
+#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \
+ EIGEN_STATIC_ASSERT((int(internal::traits<Derived>::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \
+ YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT)
+
+
+/** \class MapBase
+ * \ingroup Core_Module
+ *
+ * \brief Base class for Map and Block expression with direct access
+ *
+ * \sa class Map, class Block
+ */
+template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
+ : public internal::dense_xpr_base<Derived>::type
+{
+ public:
+
+ typedef typename internal::dense_xpr_base<Derived>::type Base;
+ enum {
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ SizeAtCompileTime = Base::SizeAtCompileTime
+ };
+
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::conditional<
+ bool(internal::is_lvalue<Derived>::value),
+ Scalar *,
+ const Scalar *>::type
+ PointerType;
+
+ using Base::derived;
+// using Base::RowsAtCompileTime;
+// using Base::ColsAtCompileTime;
+// using Base::SizeAtCompileTime;
+ using Base::MaxRowsAtCompileTime;
+ using Base::MaxColsAtCompileTime;
+ using Base::MaxSizeAtCompileTime;
+ using Base::IsVectorAtCompileTime;
+ using Base::Flags;
+ using Base::IsRowMajor;
+
+ using Base::rows;
+ using Base::cols;
+ using Base::size;
+ using Base::coeff;
+ using Base::coeffRef;
+ using Base::lazyAssign;
+ using Base::eval;
+
+ using Base::innerStride;
+ using Base::outerStride;
+ using Base::rowStride;
+ using Base::colStride;
+
+ // bug 217 - compile error on ICC 11.1
+ using Base::operator=;
+
+ typedef typename Base::CoeffReturnType CoeffReturnType;
+
+ inline Index rows() const { return m_rows.value(); }
+ inline Index cols() const { return m_cols.value(); }
+
+ /** Returns a pointer to the first coefficient of the matrix or vector.
+ *
+ * \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride().
+ *
+ * \sa innerStride(), outerStride()
+ */
+ inline const Scalar* data() const { return m_data; }
+
+ inline const Scalar& coeff(Index row, Index col) const
+ {
+ return m_data[col * colStride() + row * rowStride()];
+ }
+
+ inline const Scalar& coeff(Index index) const
+ {
+ EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+ return m_data[index * innerStride()];
+ }
+
+ inline const Scalar& coeffRef(Index row, Index col) const
+ {
+ return this->m_data[col * colStride() + row * rowStride()];
+ }
+
+ inline const Scalar& coeffRef(Index index) const
+ {
+ EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+ return this->m_data[index * innerStride()];
+ }
+
+ template<int LoadMode>
+ inline PacketScalar packet(Index row, Index col) const
+ {
+ return internal::ploadt<PacketScalar, LoadMode>
+ (m_data + (col * colStride() + row * rowStride()));
+ }
+
+ template<int LoadMode>
+ inline PacketScalar packet(Index index) const
+ {
+ EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+ return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
+ }
+
+ inline MapBase(PointerType data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
+ {
+ EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+ checkSanity();
+ }
+
+ inline MapBase(PointerType data, Index size)
+ : m_data(data),
+ m_rows(RowsAtCompileTime == Dynamic ? size : Index(RowsAtCompileTime)),
+ m_cols(ColsAtCompileTime == Dynamic ? size : Index(ColsAtCompileTime))
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ eigen_assert(size >= 0);
+ eigen_assert(data == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
+ checkSanity();
+ }
+
+ inline MapBase(PointerType data, Index rows, Index cols)
+ : m_data(data), m_rows(rows), m_cols(cols)
+ {
+ eigen_assert( (data == 0)
+ || ( rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
+ && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)));
+ checkSanity();
+ }
+
+ protected:
+
+ void checkSanity() const
+ {
+ EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(internal::traits<Derived>::Flags&PacketAccessBit,
+ internal::inner_stride_at_compile_time<Derived>::ret==1),
+ PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
+ eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % (sizeof(Scalar)*internal::packet_traits<Scalar>::size)) == 0)
+ && "data is not aligned");
+ }
+
+ PointerType m_data;
+ const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
+ const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
+};
+
+template<typename Derived> class MapBase<Derived, WriteAccessors>
+ : public MapBase<Derived, ReadOnlyAccessors>
+{
+ public:
+
+ typedef MapBase<Derived, ReadOnlyAccessors> Base;
+
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::PacketScalar PacketScalar;
+ typedef typename Base::Index Index;
+ typedef typename Base::PointerType PointerType;
+
+ using Base::derived;
+ using Base::rows;
+ using Base::cols;
+ using Base::size;
+ using Base::coeff;
+ using Base::coeffRef;
+
+ using Base::innerStride;
+ using Base::outerStride;
+ using Base::rowStride;
+ using Base::colStride;
+
+ typedef typename internal::conditional<
+ internal::is_lvalue<Derived>::value,
+ Scalar,
+ const Scalar
+ >::type ScalarWithConstIfNotLvalue;
+
+ inline const Scalar* data() const { return this->m_data; }
+ inline ScalarWithConstIfNotLvalue* data() { return this->m_data; } // no const-cast here so non-const-correct code will give a compile error
+
+ inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col)
+ {
+ return this->m_data[col * colStride() + row * rowStride()];
+ }
+
+ inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
+ {
+ EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+ return this->m_data[index * innerStride()];
+ }
+
+ template<int StoreMode>
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
+ {
+ internal::pstoret<Scalar, PacketScalar, StoreMode>
+ (this->m_data + (col * colStride() + row * rowStride()), x);
+ }
+
+ template<int StoreMode>
+ inline void writePacket(Index index, const PacketScalar& x)
+ {
+ EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+ internal::pstoret<Scalar, PacketScalar, StoreMode>
+ (this->m_data + index * innerStride(), x);
+ }
+
+ explicit inline MapBase(PointerType data) : Base(data) {}
+ inline MapBase(PointerType data, Index size) : Base(data, size) {}
+ inline MapBase(PointerType data, Index rows, Index cols) : Base(data, rows, cols) {}
+
+ Derived& operator=(const MapBase& other)
+ {
+ Base::Base::operator=(other);
+ return derived();
+ }
+
+ using Base::Base::operator=;
+};
+
+
+#endif // EIGEN_MAPBASE_H
diff --git a/extern/Eigen3/Eigen/src/Core/MathFunctions.h b/extern/Eigen3/Eigen/src/Core/MathFunctions.h
new file mode 100644
index 00000000000..2b454db21e9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/MathFunctions.h
@@ -0,0 +1,843 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MATHFUNCTIONS_H
+#define EIGEN_MATHFUNCTIONS_H
+
+namespace internal {
+
+/** \internal \struct global_math_functions_filtering_base
+ *
+ * What it does:
+ * Defines a typedef 'type' as follows:
+ * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then
+ * global_math_functions_filtering_base<T>::type is a typedef for it.
+ * - otherwise, global_math_functions_filtering_base<T>::type is a typedef for T.
+ *
+ * How it's used:
+ * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions.
+ * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know
+ * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase<Derived>.
+ * So we must make sure to use sin_impl<ArrayBase<Derived> > and not sin_impl<Derived>, otherwise our partial specialization
+ * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it.
+ *
+ * How it's implemented:
+ * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace
+ * the typename dummy by an integer template parameter, it doesn't work anymore!
+ */
+
+template<typename T, typename dummy = void>
+struct global_math_functions_filtering_base
+{
+ typedef T type;
+};
+
+template<typename T> struct always_void { typedef void type; };
+
+template<typename T>
+struct global_math_functions_filtering_base
+ <T,
+ typename always_void<typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl>::type
+ >
+{
+ typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type;
+};
+
+#define EIGEN_MATHFUNC_IMPL(func, scalar) func##_impl<typename global_math_functions_filtering_base<scalar>::type>
+#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename func##_retval<typename global_math_functions_filtering_base<scalar>::type>::type
+
+
+/****************************************************************************
+* Implementation of real *
+****************************************************************************/
+
+template<typename Scalar>
+struct real_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline RealScalar run(const Scalar& x)
+ {
+ return x;
+ }
+};
+
+template<typename RealScalar>
+struct real_impl<std::complex<RealScalar> >
+{
+ static inline RealScalar run(const std::complex<RealScalar>& x)
+ {
+ using std::real;
+ return real(x);
+ }
+};
+
+template<typename Scalar>
+struct real_retval
+{
+ typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of imag *
+****************************************************************************/
+
+template<typename Scalar>
+struct imag_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline RealScalar run(const Scalar&)
+ {
+ return RealScalar(0);
+ }
+};
+
+template<typename RealScalar>
+struct imag_impl<std::complex<RealScalar> >
+{
+ static inline RealScalar run(const std::complex<RealScalar>& x)
+ {
+ using std::imag;
+ return imag(x);
+ }
+};
+
+template<typename Scalar>
+struct imag_retval
+{
+ typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of real_ref *
+****************************************************************************/
+
+template<typename Scalar>
+struct real_ref_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline RealScalar& run(Scalar& x)
+ {
+ return reinterpret_cast<RealScalar*>(&x)[0];
+ }
+ static inline const RealScalar& run(const Scalar& x)
+ {
+ return reinterpret_cast<const RealScalar*>(&x)[0];
+ }
+};
+
+template<typename Scalar>
+struct real_ref_retval
+{
+ typedef typename NumTraits<Scalar>::Real & type;
+};
+
+template<typename Scalar>
+inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)
+{
+ return real_ref_impl<Scalar>::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of imag_ref *
+****************************************************************************/
+
+template<typename Scalar, bool IsComplex>
+struct imag_ref_default_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline RealScalar& run(Scalar& x)
+ {
+ return reinterpret_cast<RealScalar*>(&x)[1];
+ }
+ static inline const RealScalar& run(const Scalar& x)
+ {
+ return reinterpret_cast<RealScalar*>(&x)[1];
+ }
+};
+
+template<typename Scalar>
+struct imag_ref_default_impl<Scalar, false>
+{
+ static inline Scalar run(Scalar&)
+ {
+ return Scalar(0);
+ }
+ static inline const Scalar run(const Scalar&)
+ {
+ return Scalar(0);
+ }
+};
+
+template<typename Scalar>
+struct imag_ref_impl : imag_ref_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {};
+
+template<typename Scalar>
+struct imag_ref_retval
+{
+ typedef typename NumTraits<Scalar>::Real & type;
+};
+
+template<typename Scalar>
+inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)
+{
+ return imag_ref_impl<Scalar>::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of conj *
+****************************************************************************/
+
+template<typename Scalar>
+struct conj_impl
+{
+ static inline Scalar run(const Scalar& x)
+ {
+ return x;
+ }
+};
+
+template<typename RealScalar>
+struct conj_impl<std::complex<RealScalar> >
+{
+ static inline std::complex<RealScalar> run(const std::complex<RealScalar>& x)
+ {
+ using std::conj;
+ return conj(x);
+ }
+};
+
+template<typename Scalar>
+struct conj_retval
+{
+ typedef Scalar type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of abs *
+****************************************************************************/
+
+template<typename Scalar>
+struct abs_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline RealScalar run(const Scalar& x)
+ {
+ using std::abs;
+ return abs(x);
+ }
+};
+
+template<typename Scalar>
+struct abs_retval
+{
+ typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(abs, Scalar) abs(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(abs, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of abs2 *
+****************************************************************************/
+
+template<typename Scalar>
+struct abs2_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline RealScalar run(const Scalar& x)
+ {
+ return x*x;
+ }
+};
+
+template<typename RealScalar>
+struct abs2_impl<std::complex<RealScalar> >
+{
+ static inline RealScalar run(const std::complex<RealScalar>& x)
+ {
+ using std::norm;
+ return norm(x);
+ }
+};
+
+template<typename Scalar>
+struct abs2_retval
+{
+ typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of norm1 *
+****************************************************************************/
+
+template<typename Scalar, bool IsComplex>
+struct norm1_default_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline RealScalar run(const Scalar& x)
+ {
+ return abs(real(x)) + abs(imag(x));
+ }
+};
+
+template<typename Scalar>
+struct norm1_default_impl<Scalar, false>
+{
+ static inline Scalar run(const Scalar& x)
+ {
+ return abs(x);
+ }
+};
+
+template<typename Scalar>
+struct norm1_impl : norm1_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {};
+
+template<typename Scalar>
+struct norm1_retval
+{
+ typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of hypot *
+****************************************************************************/
+
+template<typename Scalar>
+struct hypot_impl
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static inline RealScalar run(const Scalar& x, const Scalar& y)
+ {
+ using std::max;
+ using std::min;
+ RealScalar _x = abs(x);
+ RealScalar _y = abs(y);
+ RealScalar p = (max)(_x, _y);
+ RealScalar q = (min)(_x, _y);
+ RealScalar qp = q/p;
+ return p * sqrt(RealScalar(1) + qp*qp);
+ }
+};
+
+template<typename Scalar>
+struct hypot_retval
+{
+ typedef typename NumTraits<Scalar>::Real type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)
+{
+ return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
+}
+
+/****************************************************************************
+* Implementation of cast *
+****************************************************************************/
+
+template<typename OldType, typename NewType>
+struct cast_impl
+{
+ static inline NewType run(const OldType& x)
+ {
+ return static_cast<NewType>(x);
+ }
+};
+
+// here, for once, we're plainly returning NewType: we don't want cast to do weird things.
+
+template<typename OldType, typename NewType>
+inline NewType cast(const OldType& x)
+{
+ return cast_impl<OldType, NewType>::run(x);
+}
+
+/****************************************************************************
+* Implementation of sqrt *
+****************************************************************************/
+
+template<typename Scalar, bool IsInteger>
+struct sqrt_default_impl
+{
+ static inline Scalar run(const Scalar& x)
+ {
+ using std::sqrt;
+ return sqrt(x);
+ }
+};
+
+template<typename Scalar>
+struct sqrt_default_impl<Scalar, true>
+{
+ static inline Scalar run(const Scalar&)
+ {
+#ifdef EIGEN2_SUPPORT
+ eigen_assert(!NumTraits<Scalar>::IsInteger);
+#else
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
+#endif
+ return Scalar(0);
+ }
+};
+
+template<typename Scalar>
+struct sqrt_impl : sqrt_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct sqrt_retval
+{
+ typedef Scalar type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x)
+{
+ return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x);
+}
+
+/****************************************************************************
+* Implementation of standard unary real functions (exp, log, sin, cos, ... *
+****************************************************************************/
+
+// This macro instanciate all the necessary template mechanism which is common to all unary real functions.
+#define EIGEN_MATHFUNC_STANDARD_REAL_UNARY(NAME) \
+ template<typename Scalar, bool IsInteger> struct NAME##_default_impl { \
+ static inline Scalar run(const Scalar& x) { using std::NAME; return NAME(x); } \
+ }; \
+ template<typename Scalar> struct NAME##_default_impl<Scalar, true> { \
+ static inline Scalar run(const Scalar&) { \
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) \
+ return Scalar(0); \
+ } \
+ }; \
+ template<typename Scalar> struct NAME##_impl \
+ : NAME##_default_impl<Scalar, NumTraits<Scalar>::IsInteger> \
+ {}; \
+ template<typename Scalar> struct NAME##_retval { typedef Scalar type; }; \
+ template<typename Scalar> \
+ inline EIGEN_MATHFUNC_RETVAL(NAME, Scalar) NAME(const Scalar& x) { \
+ return EIGEN_MATHFUNC_IMPL(NAME, Scalar)::run(x); \
+ }
+
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(exp)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(log)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(sin)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(cos)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(tan)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(asin)
+EIGEN_MATHFUNC_STANDARD_REAL_UNARY(acos)
+
+/****************************************************************************
+* Implementation of atan2 *
+****************************************************************************/
+
+template<typename Scalar, bool IsInteger>
+struct atan2_default_impl
+{
+ typedef Scalar retval;
+ static inline Scalar run(const Scalar& x, const Scalar& y)
+ {
+ using std::atan2;
+ return atan2(x, y);
+ }
+};
+
+template<typename Scalar>
+struct atan2_default_impl<Scalar, true>
+{
+ static inline Scalar run(const Scalar&, const Scalar&)
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
+ return Scalar(0);
+ }
+};
+
+template<typename Scalar>
+struct atan2_impl : atan2_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct atan2_retval
+{
+ typedef Scalar type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(atan2, Scalar) atan2(const Scalar& x, const Scalar& y)
+{
+ return EIGEN_MATHFUNC_IMPL(atan2, Scalar)::run(x, y);
+}
+
+/****************************************************************************
+* Implementation of pow *
+****************************************************************************/
+
+template<typename Scalar, bool IsInteger>
+struct pow_default_impl
+{
+ typedef Scalar retval;
+ static inline Scalar run(const Scalar& x, const Scalar& y)
+ {
+ using std::pow;
+ return pow(x, y);
+ }
+};
+
+template<typename Scalar>
+struct pow_default_impl<Scalar, true>
+{
+ static inline Scalar run(Scalar x, Scalar y)
+ {
+ Scalar res = 1;
+ eigen_assert(!NumTraits<Scalar>::IsSigned || y >= 0);
+ if(y & 1) res *= x;
+ y >>= 1;
+ while(y)
+ {
+ x *= x;
+ if(y&1) res *= x;
+ y >>= 1;
+ }
+ return res;
+ }
+};
+
+template<typename Scalar>
+struct pow_impl : pow_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct pow_retval
+{
+ typedef Scalar type;
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y)
+{
+ return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y);
+}
+
+/****************************************************************************
+* Implementation of random *
+****************************************************************************/
+
+template<typename Scalar,
+ bool IsComplex,
+ bool IsInteger>
+struct random_default_impl {};
+
+template<typename Scalar>
+struct random_impl : random_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct random_retval
+{
+ typedef Scalar type;
+};
+
+template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y);
+template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random();
+
+template<typename Scalar>
+struct random_default_impl<Scalar, false, false>
+{
+ static inline Scalar run(const Scalar& x, const Scalar& y)
+ {
+ return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX);
+ }
+ static inline Scalar run()
+ {
+ return run(Scalar(NumTraits<Scalar>::IsSigned ? -1 : 0), Scalar(1));
+ }
+};
+
+enum {
+ floor_log2_terminate,
+ floor_log2_move_up,
+ floor_log2_move_down,
+ floor_log2_bogus
+};
+
+template<unsigned int n, int lower, int upper> struct floor_log2_selector
+{
+ enum { middle = (lower + upper) / 2,
+ value = (upper <= lower + 1) ? int(floor_log2_terminate)
+ : (n < (1 << middle)) ? int(floor_log2_move_down)
+ : (n==0) ? int(floor_log2_bogus)
+ : int(floor_log2_move_up)
+ };
+};
+
+template<unsigned int n,
+ int lower = 0,
+ int upper = sizeof(unsigned int) * CHAR_BIT - 1,
+ int selector = floor_log2_selector<n, lower, upper>::value>
+struct floor_log2 {};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_move_down>
+{
+ enum { value = floor_log2<n, lower, floor_log2_selector<n, lower, upper>::middle>::value };
+};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_move_up>
+{
+ enum { value = floor_log2<n, floor_log2_selector<n, lower, upper>::middle, upper>::value };
+};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_terminate>
+{
+ enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower };
+};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_bogus>
+{
+ // no value, error at compile time
+};
+
+template<typename Scalar>
+struct random_default_impl<Scalar, false, true>
+{
+ typedef typename NumTraits<Scalar>::NonInteger NonInteger;
+
+ static inline Scalar run(const Scalar& x, const Scalar& y)
+ {
+ return x + Scalar((NonInteger(y)-x+1) * std::rand() / (RAND_MAX + NonInteger(1)));
+ }
+
+ static inline Scalar run()
+ {
+#ifdef EIGEN_MAKING_DOCS
+ return run(Scalar(NumTraits<Scalar>::IsSigned ? -10 : 0), Scalar(10));
+#else
+ enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value,
+ scalar_bits = sizeof(Scalar) * CHAR_BIT,
+ shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits))
+ };
+ Scalar x = Scalar(std::rand() >> shift);
+ Scalar offset = NumTraits<Scalar>::IsSigned ? Scalar(1 << (rand_bits-1)) : Scalar(0);
+ return x - offset;
+#endif
+ }
+};
+
+template<typename Scalar>
+struct random_default_impl<Scalar, true, false>
+{
+ static inline Scalar run(const Scalar& x, const Scalar& y)
+ {
+ return Scalar(random(real(x), real(y)),
+ random(imag(x), imag(y)));
+ }
+ static inline Scalar run()
+ {
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ return Scalar(random<RealScalar>(), random<RealScalar>());
+ }
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y)
+{
+ return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
+{
+ return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
+}
+
+/****************************************************************************
+* Implementation of fuzzy comparisons *
+****************************************************************************/
+
+template<typename Scalar,
+ bool IsComplex,
+ bool IsInteger>
+struct scalar_fuzzy_default_impl {};
+
+template<typename Scalar>
+struct scalar_fuzzy_default_impl<Scalar, false, false>
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ template<typename OtherScalar>
+ static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
+ {
+ return abs(x) <= abs(y) * prec;
+ }
+ static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
+ {
+ using std::min;
+ return abs(x - y) <= (min)(abs(x), abs(y)) * prec;
+ }
+ static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)
+ {
+ return x <= y || isApprox(x, y, prec);
+ }
+};
+
+template<typename Scalar>
+struct scalar_fuzzy_default_impl<Scalar, false, true>
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ template<typename OtherScalar>
+ static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&)
+ {
+ return x == Scalar(0);
+ }
+ static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&)
+ {
+ return x == y;
+ }
+ static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&)
+ {
+ return x <= y;
+ }
+};
+
+template<typename Scalar>
+struct scalar_fuzzy_default_impl<Scalar, true, false>
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ template<typename OtherScalar>
+ static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
+ {
+ return abs2(x) <= abs2(y) * prec * prec;
+ }
+ static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
+ {
+ using std::min;
+ return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec;
+ }
+};
+
+template<typename Scalar>
+struct scalar_fuzzy_impl : scalar_fuzzy_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar, typename OtherScalar>
+inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y,
+ typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+ return scalar_fuzzy_impl<Scalar>::template isMuchSmallerThan<OtherScalar>(x, y, precision);
+}
+
+template<typename Scalar>
+inline bool isApprox(const Scalar& x, const Scalar& y,
+ typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+ return scalar_fuzzy_impl<Scalar>::isApprox(x, y, precision);
+}
+
+template<typename Scalar>
+inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y,
+ typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+ return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision);
+}
+
+/******************************************
+*** The special case of the bool type ***
+******************************************/
+
+template<> struct random_impl<bool>
+{
+ static inline bool run()
+ {
+ return random<int>(0,1)==0 ? false : true;
+ }
+};
+
+template<> struct scalar_fuzzy_impl<bool>
+{
+ typedef bool RealScalar;
+
+ template<typename OtherScalar>
+ static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&)
+ {
+ return !x;
+ }
+
+ static inline bool isApprox(bool x, bool y, bool)
+ {
+ return x == y;
+ }
+
+ static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&)
+ {
+ return (!x) || y;
+ }
+
+};
+
+} // end namespace internal
+
+#endif // EIGEN_MATHFUNCTIONS_H
diff --git a/extern/Eigen3/Eigen/src/Core/Matrix.h b/extern/Eigen3/Eigen/src/Core/Matrix.h
new file mode 100644
index 00000000000..44de22cb4d5
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Matrix.h
@@ -0,0 +1,439 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MATRIX_H
+#define EIGEN_MATRIX_H
+
+/** \class Matrix
+ * \ingroup Core_Module
+ *
+ * \brief The matrix class, also used for vectors and row-vectors
+ *
+ * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen.
+ * Vectors are matrices with one column, and row-vectors are matrices with one row.
+ *
+ * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note").
+ *
+ * The first three template parameters are required:
+ * \tparam _Scalar \anchor matrix_tparam_scalar Numeric type, e.g. float, double, int or std::complex<float>.
+ * User defined sclar types are supported as well (see \ref user_defined_scalars "here").
+ * \tparam _Rows Number of rows, or \b Dynamic
+ * \tparam _Cols Number of columns, or \b Dynamic
+ *
+ * The remaining template parameters are optional -- in most cases you don't have to worry about them.
+ * \tparam _Options \anchor matrix_tparam_options A combination of either \b #RowMajor or \b #ColMajor, and of either
+ * \b #AutoAlign or \b #DontAlign.
+ * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required
+ * for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size.
+ * \tparam _MaxRows Maximum number of rows. Defaults to \a _Rows (\ref maxrows "note").
+ * \tparam _MaxCols Maximum number of columns. Defaults to \a _Cols (\ref maxrows "note").
+ *
+ * Eigen provides a number of typedefs covering the usual cases. Here are some examples:
+ *
+ * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix<double, 2, 2>)
+ * \li \c Vector4f is a vector of 4 floats (\c Matrix<float, 4, 1>)
+ * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix<int, 1, 3>)
+ *
+ * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix<float, Dynamic, Dynamic>)
+ * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix<float, Dynamic, 1>)
+ *
+ * \li \c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\c Matrix<float, 2, Dynamic>)
+ * \li \c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\c Matrix<double, Dynamic, 3>)
+ *
+ * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs.
+ *
+ * You can access elements of vectors and matrices using normal subscripting:
+ *
+ * \code
+ * Eigen::VectorXd v(10);
+ * v[0] = 0.1;
+ * v[1] = 0.2;
+ * v(0) = 0.3;
+ * v(1) = 0.4;
+ *
+ * Eigen::MatrixXi m(10, 10);
+ * m(0, 1) = 1;
+ * m(0, 2) = 2;
+ * m(0, 3) = 3;
+ * \endcode
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN.
+ *
+ * <i><b>Some notes:</b></i>
+ *
+ * <dl>
+ * <dt><b>\anchor dense Dense versus sparse:</b></dt>
+ * <dd>This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the Sparse module.
+ *
+ * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary contiguous array.
+ * This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero coefficients.</dd>
+ *
+ * <dt><b>\anchor fixedsize Fixed-size versus dynamic-size:</b></dt>
+ * <dd>Fixed-size means that the numbers of rows and columns are known are compile-time. In this case, Eigen allocates the array
+ * of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, typically up to 4x4, sometimes up
+ * to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time.
+ *
+ * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime
+ * variables, and the array of coefficients is allocated dynamically on the heap.
+ *
+ * Note that \em dense matrices, be they Fixed-size or Dynamic-size, <em>do not</em> expand dynamically in the sense of a std::map.
+ * If you want this behavior, see the Sparse module.</dd>
+ *
+ * <dt><b>\anchor maxrows _MaxRows and _MaxCols:</b></dt>
+ * <dd>In most cases, one just leaves these parameters to the default values.
+ * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases
+ * when the exact numbers of rows and columns are not known are compile-time, but it is known at compile-time that they cannot
+ * exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case _MaxRows and _MaxCols
+ * are the dimensions of the original matrix, while _Rows and _Cols are Dynamic.</dd>
+ * </dl>
+ *
+ * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy,
+ * \ref TopicStorageOrders
+ */
+
+namespace internal {
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+{
+ typedef _Scalar Scalar;
+ typedef Dense StorageKind;
+ typedef DenseIndex Index;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = _Rows,
+ ColsAtCompileTime = _Cols,
+ MaxRowsAtCompileTime = _MaxRows,
+ MaxColsAtCompileTime = _MaxCols,
+ Flags = compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ Options = _Options,
+ InnerStrideAtCompileTime = 1,
+ OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime
+ };
+};
+}
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+class Matrix
+ : public PlainObjectBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+{
+ public:
+
+ /** \brief Base class typedef.
+ * \sa PlainObjectBase
+ */
+ typedef PlainObjectBase<Matrix> Base;
+
+ enum { Options = _Options };
+
+ EIGEN_DENSE_PUBLIC_INTERFACE(Matrix)
+
+ typedef typename Base::PlainObject PlainObject;
+
+ enum { NeedsToAlign = (!(Options&DontAlign))
+ && SizeAtCompileTime!=Dynamic && ((static_cast<int>(sizeof(Scalar))*SizeAtCompileTime)%16)==0 };
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+
+ using Base::base;
+ using Base::coeffRef;
+
+ /**
+ * \brief Assigns matrices to each other.
+ *
+ * \note This is a special case of the templated operator=. Its purpose is
+ * to prevent a default operator= from hiding the templated operator=.
+ *
+ * \callgraph
+ */
+ EIGEN_STRONG_INLINE Matrix& operator=(const Matrix& other)
+ {
+ return Base::_set(other);
+ }
+
+ /** \internal
+ * \brief Copies the value of the expression \a other into \c *this with automatic resizing.
+ *
+ * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
+ * it will be initialized.
+ *
+ * Note that copying a row-vector into a vector (and conversely) is allowed.
+ * The resizing, if any, is then done in the appropriate way so that row-vectors
+ * remain row-vectors and vectors remain vectors.
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Matrix& operator=(const MatrixBase<OtherDerived>& other)
+ {
+ return Base::_set(other);
+ }
+
+ /* Here, doxygen failed to copy the brief information when using \copydoc */
+
+ /**
+ * \brief Copies the generic expression \a other into *this.
+ * \copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other)
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase<OtherDerived> &other)
+ {
+ return Base::operator=(other);
+ }
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue<OtherDerived>& func)
+ {
+ return Base::operator=(func);
+ }
+
+ /** \brief Default constructor.
+ *
+ * For fixed-size matrices, does nothing.
+ *
+ * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix
+ * is called a null matrix. This constructor is the unique way to create null matrices: resizing
+ * a matrix to 0 is not supported.
+ *
+ * \sa resize(Index,Index)
+ */
+ EIGEN_STRONG_INLINE explicit Matrix() : Base()
+ {
+ Base::_check_template_params();
+ EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ }
+
+ // FIXME is it still needed
+ Matrix(internal::constructor_without_unaligned_array_assert)
+ : Base(internal::constructor_without_unaligned_array_assert())
+ { Base::_check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED }
+
+ /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
+ *
+ * Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
+ * it is redundant to pass the dimension here, so it makes more sense to use the default
+ * constructor Matrix() instead.
+ */
+ EIGEN_STRONG_INLINE explicit Matrix(Index dim)
+ : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
+ {
+ Base::_check_template_params();
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix)
+ eigen_assert(dim >= 0);
+ eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
+ EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename T0, typename T1>
+ EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y)
+ {
+ Base::_check_template_params();
+ Base::template _init2<T0,T1>(x, y);
+ }
+ #else
+ /** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns.
+ *
+ * This is useful for dynamic-size matrices. For fixed-size matrices,
+ * it is redundant to pass these parameters, so one should use the default constructor
+ * Matrix() instead. */
+ Matrix(Index rows, Index cols);
+ /** \brief Constructs an initialized 2D vector with given coefficients */
+ Matrix(const Scalar& x, const Scalar& y);
+ #endif
+
+ /** \brief Constructs an initialized 3D vector with given coefficients */
+ EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z)
+ {
+ Base::_check_template_params();
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3)
+ m_storage.data()[0] = x;
+ m_storage.data()[1] = y;
+ m_storage.data()[2] = z;
+ }
+ /** \brief Constructs an initialized 4D vector with given coefficients */
+ EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w)
+ {
+ Base::_check_template_params();
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4)
+ m_storage.data()[0] = x;
+ m_storage.data()[1] = y;
+ m_storage.data()[2] = z;
+ m_storage.data()[3] = w;
+ }
+
+ explicit Matrix(const Scalar *data);
+
+ /** \brief Constructor copying the value of the expression \a other */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Matrix(const MatrixBase<OtherDerived>& other)
+ : Base(other.rows() * other.cols(), other.rows(), other.cols())
+ {
+ // This test resides here, to bring the error messages closer to the user. Normally, these checks
+ // are performed deeply within the library, thus causing long and scary error traces.
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ Base::_check_template_params();
+ Base::_set_noalias(other);
+ }
+ /** \brief Copy constructor */
+ EIGEN_STRONG_INLINE Matrix(const Matrix& other)
+ : Base(other.rows() * other.cols(), other.rows(), other.cols())
+ {
+ Base::_check_template_params();
+ Base::_set_noalias(other);
+ }
+ /** \brief Copy constructor with in-place evaluation */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Matrix(const ReturnByValue<OtherDerived>& other)
+ {
+ Base::_check_template_params();
+ Base::resize(other.rows(), other.cols());
+ other.evalTo(*this);
+ }
+
+ /** \brief Copy constructor for generic expressions.
+ * \sa MatrixBase::operator=(const EigenBase<OtherDerived>&)
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Matrix(const EigenBase<OtherDerived> &other)
+ : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
+ {
+ Base::_check_template_params();
+ Base::resize(other.rows(), other.cols());
+ // FIXME/CHECK: isn't *this = other.derived() more efficient. it allows to
+ // go for pure _set() implementations, right?
+ *this = other;
+ }
+
+ /** \internal
+ * \brief Override MatrixBase::swap() since for dynamic-sized matrices
+ * of same type it is enough to swap the data pointers.
+ */
+ template<typename OtherDerived>
+ void swap(MatrixBase<OtherDerived> const & other)
+ { this->_swap(other.derived()); }
+
+ inline Index innerStride() const { return 1; }
+ inline Index outerStride() const { return this->innerSize(); }
+
+ /////////// Geometry module ///////////
+
+ template<typename OtherDerived>
+ explicit Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r);
+ template<typename OtherDerived>
+ Matrix& operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r);
+
+ #ifdef EIGEN2_SUPPORT
+ template<typename OtherDerived>
+ explicit Matrix(const eigen2_RotationBase<OtherDerived,ColsAtCompileTime>& r);
+ template<typename OtherDerived>
+ Matrix& operator=(const eigen2_RotationBase<OtherDerived,ColsAtCompileTime>& r);
+ #endif
+
+ // allow to extend Matrix outside Eigen
+ #ifdef EIGEN_MATRIX_PLUGIN
+ #include EIGEN_MATRIX_PLUGIN
+ #endif
+
+ protected:
+ template <typename Derived, typename OtherDerived, bool IsVector>
+ friend struct internal::conservative_resize_like_impl;
+
+ using Base::m_storage;
+};
+
+/** \defgroup matrixtypedefs Global matrix typedefs
+ *
+ * \ingroup Core_Module
+ *
+ * Eigen defines several typedef shortcuts for most common matrix and vector types.
+ *
+ * The general patterns are the following:
+ *
+ * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size,
+ * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd
+ * for complex double.
+ *
+ * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of floats.
+ *
+ * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is
+ * a fixed-size vector of 4 complex floats.
+ *
+ * \sa class Matrix
+ */
+
+#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \
+/** \ingroup matrixtypedefs */ \
+typedef Matrix<Type, Size, Size> Matrix##SizeSuffix##TypeSuffix; \
+/** \ingroup matrixtypedefs */ \
+typedef Matrix<Type, Size, 1> Vector##SizeSuffix##TypeSuffix; \
+/** \ingroup matrixtypedefs */ \
+typedef Matrix<Type, 1, Size> RowVector##SizeSuffix##TypeSuffix;
+
+#define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \
+/** \ingroup matrixtypedefs */ \
+typedef Matrix<Type, Size, Dynamic> Matrix##Size##X##TypeSuffix; \
+/** \ingroup matrixtypedefs */ \
+typedef Matrix<Type, Dynamic, Size> Matrix##X##Size##TypeSuffix;
+
+#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \
+EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \
+EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \
+EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4)
+
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<float>, cf)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
+
+#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES
+#undef EIGEN_MAKE_TYPEDEFS
+
+#undef EIGEN_MAKE_TYPEDEFS_LARGE
+
+#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \
+using Eigen::Matrix##SizeSuffix##TypeSuffix; \
+using Eigen::Vector##SizeSuffix##TypeSuffix; \
+using Eigen::RowVector##SizeSuffix##TypeSuffix;
+
+#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(TypeSuffix) \
+EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \
+EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \
+EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \
+EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \
+
+#define EIGEN_USING_MATRIX_TYPEDEFS \
+EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(i) \
+EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(f) \
+EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(d) \
+EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cf) \
+EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cd)
+
+#endif // EIGEN_MATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/MatrixBase.h b/extern/Eigen3/Eigen/src/Core/MatrixBase.h
new file mode 100644
index 00000000000..db156f6e9d0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/MatrixBase.h
@@ -0,0 +1,520 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MATRIXBASE_H
+#define EIGEN_MATRIXBASE_H
+
+/** \class MatrixBase
+ * \ingroup Core_Module
+ *
+ * \brief Base class for all dense matrices, vectors, and expressions
+ *
+ * This class is the base that is inherited by all matrix, vector, and related expression
+ * types. Most of the Eigen API is contained in this class, and its base classes. Other important
+ * classes for the Eigen API are Matrix, and VectorwiseOp.
+ *
+ * Note that some methods are defined in other modules such as the \ref LU_Module LU module
+ * for all functions related to matrix inversions.
+ *
+ * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc.
+ *
+ * When writing a function taking Eigen objects as argument, if you want your function
+ * to take as argument any matrix, vector, or expression, just let it take a
+ * MatrixBase argument. As an example, here is a function printFirstRow which, given
+ * a matrix, vector, or expression \a x, prints the first row of \a x.
+ *
+ * \code
+ template<typename Derived>
+ void printFirstRow(const Eigen::MatrixBase<Derived>& x)
+ {
+ cout << x.row(0) << endl;
+ }
+ * \endcode
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN.
+ *
+ * \sa \ref TopicClassHierarchy
+ */
+template<typename Derived> class MatrixBase
+ : public DenseBase<Derived>
+{
+ public:
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ typedef MatrixBase StorageBaseType;
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ typedef DenseBase<Derived> Base;
+ using Base::RowsAtCompileTime;
+ using Base::ColsAtCompileTime;
+ using Base::SizeAtCompileTime;
+ using Base::MaxRowsAtCompileTime;
+ using Base::MaxColsAtCompileTime;
+ using Base::MaxSizeAtCompileTime;
+ using Base::IsVectorAtCompileTime;
+ using Base::Flags;
+ using Base::CoeffReadCost;
+
+ using Base::derived;
+ using Base::const_cast_derived;
+ using Base::rows;
+ using Base::cols;
+ using Base::size;
+ using Base::coeff;
+ using Base::coeffRef;
+ using Base::lazyAssign;
+ using Base::eval;
+ using Base::operator+=;
+ using Base::operator-=;
+ using Base::operator*=;
+ using Base::operator/=;
+
+ typedef typename Base::CoeffReturnType CoeffReturnType;
+ typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType;
+ typedef typename Base::RowXpr RowXpr;
+ typedef typename Base::ColXpr ColXpr;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** type of the equivalent square matrix */
+ typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
+ EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ /** \returns the size of the main diagonal, which is min(rows(),cols()).
+ * \sa rows(), cols(), SizeAtCompileTime. */
+ inline Index diagonalSize() const { return (std::min)(rows(),cols()); }
+
+ /** \brief The plain matrix type corresponding to this expression.
+ *
+ * This is not necessarily exactly the return type of eval(). In the case of plain matrices,
+ * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
+ * that the return type of eval() is either PlainObject or const PlainObject&.
+ */
+ typedef Matrix<typename internal::traits<Derived>::Scalar,
+ internal::traits<Derived>::RowsAtCompileTime,
+ internal::traits<Derived>::ColsAtCompileTime,
+ AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),
+ internal::traits<Derived>::MaxRowsAtCompileTime,
+ internal::traits<Derived>::MaxColsAtCompileTime
+ > PlainObject;
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** \internal Represents a matrix with all coefficients equal to one another*/
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType;
+ /** \internal the return type of MatrixBase::adjoint() */
+ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>,
+ ConstTransposeReturnType
+ >::type AdjointReturnType;
+ /** \internal Return type of eigenvalues() */
+ typedef Matrix<std::complex<RealScalar>, internal::traits<Derived>::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType;
+ /** \internal the return type of identity */
+ typedef CwiseNullaryOp<internal::scalar_identity_op<Scalar>,Derived> IdentityReturnType;
+ /** \internal the return type of unit vectors */
+ typedef Block<const CwiseNullaryOp<internal::scalar_identity_op<Scalar>, SquareMatrixType>,
+ internal::traits<Derived>::RowsAtCompileTime,
+ internal::traits<Derived>::ColsAtCompileTime> BasisReturnType;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase
+# include "../plugins/CommonCwiseUnaryOps.h"
+# include "../plugins/CommonCwiseBinaryOps.h"
+# include "../plugins/MatrixCwiseUnaryOps.h"
+# include "../plugins/MatrixCwiseBinaryOps.h"
+# ifdef EIGEN_MATRIXBASE_PLUGIN
+# include EIGEN_MATRIXBASE_PLUGIN
+# endif
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+ /** Special case of the template operator=, in order to prevent the compiler
+ * from generating a default operator= (issue hit with g++ 4.1)
+ */
+ Derived& operator=(const MatrixBase& other);
+
+ // We cannot inherit here via Base::operator= since it is causing
+ // trouble with MSVC.
+
+ template <typename OtherDerived>
+ Derived& operator=(const DenseBase<OtherDerived>& other);
+
+ template <typename OtherDerived>
+ Derived& operator=(const EigenBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ Derived& operator=(const ReturnByValue<OtherDerived>& other);
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename ProductDerived, typename Lhs, typename Rhs>
+ Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ template<typename OtherDerived>
+ Derived& operator+=(const MatrixBase<OtherDerived>& other);
+ template<typename OtherDerived>
+ Derived& operator-=(const MatrixBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ const typename ProductReturnType<Derived,OtherDerived>::Type
+ operator*(const MatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ const typename LazyProductReturnType<Derived,OtherDerived>::Type
+ lazyProduct(const MatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ Derived& operator*=(const EigenBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ void applyOnTheLeft(const EigenBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ void applyOnTheRight(const EigenBase<OtherDerived>& other);
+
+ template<typename DiagonalDerived>
+ const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
+ operator*(const DiagonalBase<DiagonalDerived> &diagonal) const;
+
+ template<typename OtherDerived>
+ typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
+ dot(const MatrixBase<OtherDerived>& other) const;
+
+ #ifdef EIGEN2_SUPPORT
+ template<typename OtherDerived>
+ Scalar eigen2_dot(const MatrixBase<OtherDerived>& other) const;
+ #endif
+
+ RealScalar squaredNorm() const;
+ RealScalar norm() const;
+ RealScalar stableNorm() const;
+ RealScalar blueNorm() const;
+ RealScalar hypotNorm() const;
+ const PlainObject normalized() const;
+ void normalize();
+
+ const AdjointReturnType adjoint() const;
+ void adjointInPlace();
+
+ typedef Diagonal<Derived> DiagonalReturnType;
+ DiagonalReturnType diagonal();
+ typedef const Diagonal<const Derived> ConstDiagonalReturnType;
+ const ConstDiagonalReturnType diagonal() const;
+
+ template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
+ template<int Index> struct ConstDiagonalIndexReturnType { typedef const Diagonal<const Derived,Index> Type; };
+
+ template<int Index> typename DiagonalIndexReturnType<Index>::Type diagonal();
+ template<int Index> typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const;
+
+ // Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
+ // On the other hand they confuse MSVC8...
+ #if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later
+ typename MatrixBase::template DiagonalIndexReturnType<Dynamic>::Type diagonal(Index index);
+ typename MatrixBase::template ConstDiagonalIndexReturnType<Dynamic>::Type diagonal(Index index) const;
+ #else
+ typename DiagonalIndexReturnType<Dynamic>::Type diagonal(Index index);
+ typename ConstDiagonalIndexReturnType<Dynamic>::Type diagonal(Index index) const;
+ #endif
+
+ #ifdef EIGEN2_SUPPORT
+ template<unsigned int Mode> typename internal::eigen2_part_return_type<Derived, Mode>::type part();
+ template<unsigned int Mode> const typename internal::eigen2_part_return_type<Derived, Mode>::type part() const;
+
+ // huuuge hack. make Eigen2's matrix.part<Diagonal>() work in eigen3. Problem: Diagonal is now a class template instead
+ // of an integer constant. Solution: overload the part() method template wrt template parameters list.
+ template<template<typename T, int n> class U>
+ const DiagonalWrapper<ConstDiagonalReturnType> part() const
+ { return diagonal().asDiagonal(); }
+ #endif // EIGEN2_SUPPORT
+
+ template<unsigned int Mode> struct TriangularViewReturnType { typedef TriangularView<Derived, Mode> Type; };
+ template<unsigned int Mode> struct ConstTriangularViewReturnType { typedef const TriangularView<const Derived, Mode> Type; };
+
+ template<unsigned int Mode> typename TriangularViewReturnType<Mode>::Type triangularView();
+ template<unsigned int Mode> typename ConstTriangularViewReturnType<Mode>::Type triangularView() const;
+
+ template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SelfAdjointView<Derived, UpLo> Type; };
+ template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SelfAdjointView<const Derived, UpLo> Type; };
+
+ template<unsigned int UpLo> typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();
+ template<unsigned int UpLo> typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
+
+ const SparseView<Derived> sparseView(const Scalar& m_reference = Scalar(0),
+ typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) const;
+ static const IdentityReturnType Identity();
+ static const IdentityReturnType Identity(Index rows, Index cols);
+ static const BasisReturnType Unit(Index size, Index i);
+ static const BasisReturnType Unit(Index i);
+ static const BasisReturnType UnitX();
+ static const BasisReturnType UnitY();
+ static const BasisReturnType UnitZ();
+ static const BasisReturnType UnitW();
+
+ const DiagonalWrapper<const Derived> asDiagonal() const;
+ const PermutationWrapper<const Derived> asPermutation() const;
+
+ Derived& setIdentity();
+ Derived& setIdentity(Index rows, Index cols);
+
+ bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+ bool isUpperTriangular(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isLowerTriangular(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+ template<typename OtherDerived>
+ bool isOrthogonal(const MatrixBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+ bool isUnitary(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+ /** \returns true if each coefficients of \c *this and \a other are all exactly equal.
+ * \warning When using floating point scalar values you probably should rather use a
+ * fuzzy comparison such as isApprox()
+ * \sa isApprox(), operator!= */
+ template<typename OtherDerived>
+ inline bool operator==(const MatrixBase<OtherDerived>& other) const
+ { return cwiseEqual(other).all(); }
+
+ /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other.
+ * \warning When using floating point scalar values you probably should rather use a
+ * fuzzy comparison such as isApprox()
+ * \sa isApprox(), operator== */
+ template<typename OtherDerived>
+ inline bool operator!=(const MatrixBase<OtherDerived>& other) const
+ { return cwiseNotEqual(other).any(); }
+
+ NoAlias<Derived,Eigen::MatrixBase > noalias();
+
+ inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
+ inline ForceAlignedAccess<Derived> forceAlignedAccess();
+ template<bool Enable> inline typename internal::add_const_on_value_type<typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type>::type forceAlignedAccessIf() const;
+ template<bool Enable> inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();
+
+ Scalar trace() const;
+
+/////////// Array module ///////////
+
+ template<int p> RealScalar lpNorm() const;
+
+ MatrixBase<Derived>& matrix() { return *this; }
+ const MatrixBase<Derived>& matrix() const { return *this; }
+
+ /** \returns an \link ArrayBase Array \endlink expression of this matrix
+ * \sa ArrayBase::matrix() */
+ ArrayWrapper<Derived> array() { return derived(); }
+ const ArrayWrapper<Derived> array() const { return derived(); }
+
+/////////// LU module ///////////
+
+ const FullPivLU<PlainObject> fullPivLu() const;
+ const PartialPivLU<PlainObject> partialPivLu() const;
+
+ #if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+ const LU<PlainObject> lu() const;
+ #endif
+
+ #ifdef EIGEN2_SUPPORT
+ const LU<PlainObject> eigen2_lu() const;
+ #endif
+
+ #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+ const PartialPivLU<PlainObject> lu() const;
+ #endif
+
+ #ifdef EIGEN2_SUPPORT
+ template<typename ResultType>
+ void computeInverse(MatrixBase<ResultType> *result) const {
+ *result = this->inverse();
+ }
+ #endif
+
+ const internal::inverse_impl<Derived> inverse() const;
+ template<typename ResultType>
+ void computeInverseAndDetWithCheck(
+ ResultType& inverse,
+ typename ResultType::Scalar& determinant,
+ bool& invertible,
+ const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
+ ) const;
+ template<typename ResultType>
+ void computeInverseWithCheck(
+ ResultType& inverse,
+ bool& invertible,
+ const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
+ ) const;
+ Scalar determinant() const;
+
+/////////// Cholesky module ///////////
+
+ const LLT<PlainObject> llt() const;
+ const LDLT<PlainObject> ldlt() const;
+
+/////////// QR module ///////////
+
+ const HouseholderQR<PlainObject> householderQr() const;
+ const ColPivHouseholderQR<PlainObject> colPivHouseholderQr() const;
+ const FullPivHouseholderQR<PlainObject> fullPivHouseholderQr() const;
+
+ #ifdef EIGEN2_SUPPORT
+ const QR<PlainObject> qr() const;
+ #endif
+
+ EigenvaluesReturnType eigenvalues() const;
+ RealScalar operatorNorm() const;
+
+/////////// SVD module ///////////
+
+ JacobiSVD<PlainObject> jacobiSvd(unsigned int computationOptions = 0) const;
+
+ #ifdef EIGEN2_SUPPORT
+ SVD<PlainObject> svd() const;
+ #endif
+
+/////////// Geometry module ///////////
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /// \internal helper struct to form the return type of the cross product
+ template<typename OtherDerived> struct cross_product_return_type {
+ typedef typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType Scalar;
+ typedef Matrix<Scalar,MatrixBase::RowsAtCompileTime,MatrixBase::ColsAtCompileTime> type;
+ };
+ #endif // EIGEN_PARSED_BY_DOXYGEN
+ template<typename OtherDerived>
+ typename cross_product_return_type<OtherDerived>::type
+ cross(const MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived>
+ PlainObject cross3(const MatrixBase<OtherDerived>& other) const;
+ PlainObject unitOrthogonal(void) const;
+ Matrix<Scalar,3,1> eulerAngles(Index a0, Index a1, Index a2) const;
+
+ #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+ ScalarMultipleReturnType operator*(const UniformScaling<Scalar>& s) const;
+ // put this as separate enum value to work around possible GCC 4.3 bug (?)
+ enum { HomogeneousReturnTypeDirection = ColsAtCompileTime==1?Vertical:Horizontal };
+ typedef Homogeneous<Derived, HomogeneousReturnTypeDirection> HomogeneousReturnType;
+ HomogeneousReturnType homogeneous() const;
+ #endif
+
+ enum {
+ SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1
+ };
+ typedef Block<const Derived,
+ internal::traits<Derived>::ColsAtCompileTime==1 ? SizeMinusOne : 1,
+ internal::traits<Derived>::ColsAtCompileTime==1 ? 1 : SizeMinusOne> ConstStartMinusOne;
+ typedef CwiseUnaryOp<internal::scalar_quotient1_op<typename internal::traits<Derived>::Scalar>,
+ const ConstStartMinusOne > HNormalizedReturnType;
+
+ const HNormalizedReturnType hnormalized() const;
+
+////////// Householder module ///////////
+
+ void makeHouseholderInPlace(Scalar& tau, RealScalar& beta);
+ template<typename EssentialPart>
+ void makeHouseholder(EssentialPart& essential,
+ Scalar& tau, RealScalar& beta) const;
+ template<typename EssentialPart>
+ void applyHouseholderOnTheLeft(const EssentialPart& essential,
+ const Scalar& tau,
+ Scalar* workspace);
+ template<typename EssentialPart>
+ void applyHouseholderOnTheRight(const EssentialPart& essential,
+ const Scalar& tau,
+ Scalar* workspace);
+
+///////// Jacobi module /////////
+
+ template<typename OtherScalar>
+ void applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j);
+ template<typename OtherScalar>
+ void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
+
+///////// MatrixFunctions module /////////
+
+ typedef typename internal::stem_function<Scalar>::type StemFunction;
+ const MatrixExponentialReturnValue<Derived> exp() const;
+ const MatrixFunctionReturnValue<Derived> matrixFunction(StemFunction f) const;
+ const MatrixFunctionReturnValue<Derived> cosh() const;
+ const MatrixFunctionReturnValue<Derived> sinh() const;
+ const MatrixFunctionReturnValue<Derived> cos() const;
+ const MatrixFunctionReturnValue<Derived> sin() const;
+
+#ifdef EIGEN2_SUPPORT
+ template<typename ProductDerived, typename Lhs, typename Rhs>
+ Derived& operator+=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+ EvalBeforeAssigningBit>& other);
+
+ template<typename ProductDerived, typename Lhs, typename Rhs>
+ Derived& operator-=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+ EvalBeforeAssigningBit>& other);
+
+ /** \deprecated because .lazy() is deprecated
+ * Overloaded for cache friendly product evaluation */
+ template<typename OtherDerived>
+ Derived& lazyAssign(const Flagged<OtherDerived, 0, EvalBeforeAssigningBit>& other)
+ { return lazyAssign(other._expression()); }
+
+ template<unsigned int Added>
+ const Flagged<Derived, Added, 0> marked() const;
+ const Flagged<Derived, 0, EvalBeforeAssigningBit> lazy() const;
+
+ inline const Cwise<Derived> cwise() const;
+ inline Cwise<Derived> cwise();
+
+ VectorBlock<Derived> start(Index size);
+ const VectorBlock<const Derived> start(Index size) const;
+ VectorBlock<Derived> end(Index size);
+ const VectorBlock<const Derived> end(Index size) const;
+ template<int Size> VectorBlock<Derived,Size> start();
+ template<int Size> const VectorBlock<const Derived,Size> start() const;
+ template<int Size> VectorBlock<Derived,Size> end();
+ template<int Size> const VectorBlock<const Derived,Size> end() const;
+
+ Minor<Derived> minor(Index row, Index col);
+ const Minor<Derived> minor(Index row, Index col) const;
+#endif
+
+ protected:
+ MatrixBase() : Base() {}
+
+ private:
+ explicit MatrixBase(int);
+ MatrixBase(int,int);
+ template<typename OtherDerived> explicit MatrixBase(const MatrixBase<OtherDerived>&);
+ protected:
+ // mixing arrays and matrices is not legal
+ template<typename OtherDerived> Derived& operator+=(const ArrayBase<OtherDerived>& )
+ {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);}
+ // mixing arrays and matrices is not legal
+ template<typename OtherDerived> Derived& operator-=(const ArrayBase<OtherDerived>& )
+ {EIGEN_STATIC_ASSERT(sizeof(typename OtherDerived::Scalar)==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES);}
+};
+
+#endif // EIGEN_MATRIXBASE_H
diff --git a/extern/Eigen2/Eigen/src/Core/NestByValue.h b/extern/Eigen3/Eigen/src/Core/NestByValue.h
index 2a14ab1f156..a6104d2a426 100644
--- a/extern/Eigen2/Eigen/src/Core/NestByValue.h
+++ b/extern/Eigen3/Eigen/src/Core/NestByValue.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
@@ -27,6 +27,7 @@
#define EIGEN_NESTBYVALUE_H
/** \class NestByValue
+ * \ingroup Core_Module
*
* \brief Expression which must be nested by value
*
@@ -37,79 +38,83 @@
*
* \sa MatrixBase::nestByValue()
*/
+
+namespace internal {
template<typename ExpressionType>
-struct ei_traits<NestByValue<ExpressionType> > : public ei_traits<ExpressionType>
+struct traits<NestByValue<ExpressionType> > : public traits<ExpressionType>
{};
+}
template<typename ExpressionType> class NestByValue
- : public MatrixBase<NestByValue<ExpressionType> >
+ : public internal::dense_xpr_base< NestByValue<ExpressionType> >::type
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(NestByValue)
+ typedef typename internal::dense_xpr_base<NestByValue>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue)
inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {}
- inline int rows() const { return m_expression.rows(); }
- inline int cols() const { return m_expression.cols(); }
- inline int stride() const { return m_expression.stride(); }
+ inline Index rows() const { return m_expression.rows(); }
+ inline Index cols() const { return m_expression.cols(); }
+ inline Index outerStride() const { return m_expression.outerStride(); }
+ inline Index innerStride() const { return m_expression.innerStride(); }
- inline const Scalar coeff(int row, int col) const
+ inline const CoeffReturnType coeff(Index row, Index col) const
{
return m_expression.coeff(row, col);
}
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_expression.const_cast_derived().coeffRef(row, col);
}
- inline const Scalar coeff(int index) const
+ inline const CoeffReturnType coeff(Index index) const
{
return m_expression.coeff(index);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_expression.const_cast_derived().coeffRef(index);
}
template<int LoadMode>
- inline const PacketScalar packet(int row, int col) const
+ inline const PacketScalar packet(Index row, Index col) const
{
return m_expression.template packet<LoadMode>(row, col);
}
template<int LoadMode>
- inline void writePacket(int row, int col, const PacketScalar& x)
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
}
template<int LoadMode>
- inline const PacketScalar packet(int index) const
+ inline const PacketScalar packet(Index index) const
{
return m_expression.template packet<LoadMode>(index);
}
template<int LoadMode>
- inline void writePacket(int index, const PacketScalar& x)
+ inline void writePacket(Index index, const PacketScalar& x)
{
m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
}
+ operator const ExpressionType&() const { return m_expression; }
+
protected:
const ExpressionType m_expression;
-
- private:
- NestByValue& operator=(const NestByValue&);
};
/** \returns an expression of the temporary version of *this.
*/
template<typename Derived>
inline const NestByValue<Derived>
-MatrixBase<Derived>::nestByValue() const
+DenseBase<Derived>::nestByValue() const
{
return NestByValue<Derived>(derived());
}
diff --git a/extern/Eigen3/Eigen/src/Core/NoAlias.h b/extern/Eigen3/Eigen/src/Core/NoAlias.h
new file mode 100644
index 00000000000..da64affcf9a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/NoAlias.h
@@ -0,0 +1,136 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_NOALIAS_H
+#define EIGEN_NOALIAS_H
+
+/** \class NoAlias
+ * \ingroup Core_Module
+ *
+ * \brief Pseudo expression providing an operator = assuming no aliasing
+ *
+ * \param ExpressionType the type of the object on which to do the lazy assignment
+ *
+ * This class represents an expression with special assignment operators
+ * assuming no aliasing between the target expression and the source expression.
+ * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression.
+ * It is the return type of MatrixBase::noalias()
+ * and most of the time this is the only way it is used.
+ *
+ * \sa MatrixBase::noalias()
+ */
+template<typename ExpressionType, template <typename> class StorageBase>
+class NoAlias
+{
+ typedef typename ExpressionType::Scalar Scalar;
+ public:
+ NoAlias(ExpressionType& expression) : m_expression(expression) {}
+
+ /** Behaves like MatrixBase::lazyAssign(other)
+ * \sa MatrixBase::lazyAssign() */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other)
+ { return internal::assign_selector<ExpressionType,OtherDerived,false>::run(m_expression,other.derived()); }
+
+ /** \sa MatrixBase::operator+= */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other)
+ {
+ typedef SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, ExpressionType, OtherDerived> SelfAdder;
+ SelfAdder tmp(m_expression);
+ typedef typename internal::nested<OtherDerived>::type OtherDerivedNested;
+ typedef typename internal::remove_all<OtherDerivedNested>::type _OtherDerivedNested;
+ internal::assign_selector<SelfAdder,_OtherDerivedNested,false>::run(tmp,OtherDerivedNested(other.derived()));
+ return m_expression;
+ }
+
+ /** \sa MatrixBase::operator-= */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other)
+ {
+ typedef SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, ExpressionType, OtherDerived> SelfAdder;
+ SelfAdder tmp(m_expression);
+ typedef typename internal::nested<OtherDerived>::type OtherDerivedNested;
+ typedef typename internal::remove_all<OtherDerivedNested>::type _OtherDerivedNested;
+ internal::assign_selector<SelfAdder,_OtherDerivedNested,false>::run(tmp,OtherDerivedNested(other.derived()));
+ return m_expression;
+ }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename ProductDerived, typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE ExpressionType& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+ { other.derived().addTo(m_expression); return m_expression; }
+
+ template<typename ProductDerived, typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE ExpressionType& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+ { other.derived().subTo(m_expression); return m_expression; }
+
+ template<typename Lhs, typename Rhs, int NestingFlags>
+ EIGEN_STRONG_INLINE ExpressionType& operator+=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
+ { return m_expression.derived() += CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
+
+ template<typename Lhs, typename Rhs, int NestingFlags>
+ EIGEN_STRONG_INLINE ExpressionType& operator-=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
+ { return m_expression.derived() -= CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
+#endif
+
+ protected:
+ ExpressionType& m_expression;
+};
+
+/** \returns a pseudo expression of \c *this with an operator= assuming
+ * no aliasing between \c *this and the source expression.
+ *
+ * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
+ * Currently, even though several expressions may alias, only product
+ * expressions have this flag. Therefore, noalias() is only usefull when
+ * the source expression contains a matrix product.
+ *
+ * Here are some examples where noalias is usefull:
+ * \code
+ * D.noalias() = A * B;
+ * D.noalias() += A.transpose() * B;
+ * D.noalias() -= 2 * A * B.adjoint();
+ * \endcode
+ *
+ * On the other hand the following example will lead to a \b wrong result:
+ * \code
+ * A.noalias() = A * B;
+ * \endcode
+ * because the result matrix A is also an operand of the matrix product. Therefore,
+ * there is no alternative than evaluating A * B in a temporary, that is the default
+ * behavior when you write:
+ * \code
+ * A = A * B;
+ * \endcode
+ *
+ * \sa class NoAlias
+ */
+template<typename Derived>
+NoAlias<Derived,MatrixBase> MatrixBase<Derived>::noalias()
+{
+ return derived();
+}
+
+#endif // EIGEN_NOALIAS_H
diff --git a/extern/Eigen3/Eigen/src/Core/NumTraits.h b/extern/Eigen3/Eigen/src/Core/NumTraits.h
new file mode 100644
index 00000000000..73ef05dfe7a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/NumTraits.h
@@ -0,0 +1,160 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_NUMTRAITS_H
+#define EIGEN_NUMTRAITS_H
+
+/** \class NumTraits
+ * \ingroup Core_Module
+ *
+ * \brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
+ *
+ * \param T the numeric type at hand
+ *
+ * This class stores enums, typedefs and static methods giving information about a numeric type.
+ *
+ * The provided data consists of:
+ * \li A typedef \a Real, giving the "real part" type of \a T. If \a T is already real,
+ * then \a Real is just a typedef to \a T. If \a T is \c std::complex<U> then \a Real
+ * is a typedef to \a U.
+ * \li A typedef \a NonInteger, giving the type that should be used for operations producing non-integral values,
+ * such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives
+ * \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to
+ * take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is
+ * only intended as a helper for code that needs to explicitly promote types.
+ * \li A typedef \a Nested giving the type to use to nest a value inside of the expression tree. If you don't know what
+ * this means, just use \a T here.
+ * \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c std::complex
+ * type, and to 0 otherwise.
+ * \li An enum value \a IsInteger. It is equal to \c 1 if \a T is an integer type such as \c int,
+ * and to \c 0 otherwise.
+ * \li Enum values ReadCost, AddCost and MulCost representing a rough estimate of the number of CPU cycles needed
+ * to by move / add / mul instructions respectively, assuming the data is already stored in CPU registers.
+ * Stay vague here. No need to do architecture-specific stuff.
+ * \li An enum value \a IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T is unsigned.
+ * \li An enum value \a RequireInitialization. It is equal to \c 1 if the constructor of the numeric type \a T must
+ * be called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 otherwise.
+ * \li An epsilon() function which, unlike std::numeric_limits::epsilon(), returns a \a Real instead of a \a T.
+ * \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default
+ * value by the fuzzy comparison operators.
+ * \li highest() and lowest() functions returning the highest and lowest possible values respectively.
+ */
+
+template<typename T> struct GenericNumTraits
+{
+ enum {
+ IsInteger = std::numeric_limits<T>::is_integer,
+ IsSigned = std::numeric_limits<T>::is_signed,
+ IsComplex = 0,
+ RequireInitialization = internal::is_arithmetic<T>::value ? 0 : 1,
+ ReadCost = 1,
+ AddCost = 1,
+ MulCost = 1
+ };
+
+ typedef T Real;
+ typedef typename internal::conditional<
+ IsInteger,
+ typename internal::conditional<sizeof(T)<=2, float, double>::type,
+ T
+ >::type NonInteger;
+ typedef T Nested;
+
+ inline static Real epsilon() { return std::numeric_limits<T>::epsilon(); }
+ inline static Real dummy_precision()
+ {
+ // make sure to override this for floating-point types
+ return Real(0);
+ }
+ inline static T highest() { return (std::numeric_limits<T>::max)(); }
+ inline static T lowest() { return IsInteger ? (std::numeric_limits<T>::min)() : (-(std::numeric_limits<T>::max)()); }
+
+#ifdef EIGEN2_SUPPORT
+ enum {
+ HasFloatingPoint = !IsInteger
+ };
+ typedef NonInteger FloatingPoint;
+#endif
+};
+
+template<typename T> struct NumTraits : GenericNumTraits<T>
+{};
+
+template<> struct NumTraits<float>
+ : GenericNumTraits<float>
+{
+ inline static float dummy_precision() { return 1e-5f; }
+};
+
+template<> struct NumTraits<double> : GenericNumTraits<double>
+{
+ inline static double dummy_precision() { return 1e-12; }
+};
+
+template<> struct NumTraits<long double>
+ : GenericNumTraits<long double>
+{
+ static inline long double dummy_precision() { return 1e-15l; }
+};
+
+template<typename _Real> struct NumTraits<std::complex<_Real> >
+ : GenericNumTraits<std::complex<_Real> >
+{
+ typedef _Real Real;
+ enum {
+ IsComplex = 1,
+ RequireInitialization = NumTraits<_Real>::RequireInitialization,
+ ReadCost = 2 * NumTraits<_Real>::ReadCost,
+ AddCost = 2 * NumTraits<Real>::AddCost,
+ MulCost = 4 * NumTraits<Real>::MulCost + 2 * NumTraits<Real>::AddCost
+ };
+
+ inline static Real epsilon() { return NumTraits<Real>::epsilon(); }
+ inline static Real dummy_precision() { return NumTraits<Real>::dummy_precision(); }
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+struct NumTraits<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
+{
+ typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> ArrayType;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef Array<RealScalar, Rows, Cols, Options, MaxRows, MaxCols> Real;
+ typedef typename NumTraits<Scalar>::NonInteger NonIntegerScalar;
+ typedef Array<NonIntegerScalar, Rows, Cols, Options, MaxRows, MaxCols> NonInteger;
+ typedef ArrayType & Nested;
+
+ enum {
+ IsComplex = NumTraits<Scalar>::IsComplex,
+ IsInteger = NumTraits<Scalar>::IsInteger,
+ IsSigned = NumTraits<Scalar>::IsSigned,
+ RequireInitialization = 1,
+ ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::ReadCost,
+ AddCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::AddCost,
+ MulCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::MulCost
+ };
+};
+
+
+
+#endif // EIGEN_NUMTRAITS_H
diff --git a/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h b/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h
new file mode 100644
index 00000000000..a064e053e51
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h
@@ -0,0 +1,696 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PERMUTATIONMATRIX_H
+#define EIGEN_PERMUTATIONMATRIX_H
+
+template<int RowCol,typename IndicesType,typename MatrixType, typename StorageKind> class PermutedImpl;
+
+/** \class PermutationBase
+ * \ingroup Core_Module
+ *
+ * \brief Base class for permutations
+ *
+ * \param Derived the derived class
+ *
+ * This class is the base class for all expressions representing a permutation matrix,
+ * internally stored as a vector of integers.
+ * The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix
+ * \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have:
+ * \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f]
+ * This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have:
+ * \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f]
+ *
+ * Permutation matrices are square and invertible.
+ *
+ * Notice that in addition to the member functions and operators listed here, there also are non-member
+ * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase)
+ * on either side.
+ *
+ * \sa class PermutationMatrix, class PermutationWrapper
+ */
+
+namespace internal {
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed=false>
+struct permut_matrix_product_retval;
+enum PermPermProduct_t {PermPermProduct};
+
+} // end namespace internal
+
+template<typename Derived>
+class PermutationBase : public EigenBase<Derived>
+{
+ typedef internal::traits<Derived> Traits;
+ typedef EigenBase<Derived> Base;
+ public:
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ typedef typename Traits::IndicesType IndicesType;
+ enum {
+ Flags = Traits::Flags,
+ CoeffReadCost = Traits::CoeffReadCost,
+ RowsAtCompileTime = Traits::RowsAtCompileTime,
+ ColsAtCompileTime = Traits::ColsAtCompileTime,
+ MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
+ };
+ typedef typename Traits::Scalar Scalar;
+ typedef typename Traits::Index Index;
+ typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime>
+ DenseMatrixType;
+ typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,Index>
+ PlainPermutationType;
+ using Base::derived;
+ #endif
+
+ /** Copies the other permutation into *this */
+ template<typename OtherDerived>
+ Derived& operator=(const PermutationBase<OtherDerived>& other)
+ {
+ indices() = other.indices();
+ return derived();
+ }
+
+ /** Assignment from the Transpositions \a tr */
+ template<typename OtherDerived>
+ Derived& operator=(const TranspositionsBase<OtherDerived>& tr)
+ {
+ setIdentity(tr.size());
+ for(Index k=size()-1; k>=0; --k)
+ applyTranspositionOnTheRight(k,tr.coeff(k));
+ return derived();
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is a special case of the templated operator=. Its purpose is to
+ * prevent a default operator= from hiding the templated operator=.
+ */
+ Derived& operator=(const PermutationBase& other)
+ {
+ indices() = other.indices();
+ return derived();
+ }
+ #endif
+
+ /** \returns the number of rows */
+ inline Index rows() const { return indices().size(); }
+
+ /** \returns the number of columns */
+ inline Index cols() const { return indices().size(); }
+
+ /** \returns the size of a side of the respective square matrix, i.e., the number of indices */
+ inline Index size() const { return indices().size(); }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived>& other) const
+ {
+ other.setZero();
+ for (int i=0; i<rows();++i)
+ other.coeffRef(indices().coeff(i),i) = typename DenseDerived::Scalar(1);
+ }
+ #endif
+
+ /** \returns a Matrix object initialized from this permutation matrix. Notice that it
+ * is inefficient to return this Matrix object by value. For efficiency, favor using
+ * the Matrix constructor taking EigenBase objects.
+ */
+ DenseMatrixType toDenseMatrix() const
+ {
+ return derived();
+ }
+
+ /** const version of indices(). */
+ const IndicesType& indices() const { return derived().indices(); }
+ /** \returns a reference to the stored array representing the permutation. */
+ IndicesType& indices() { return derived().indices(); }
+
+ /** Resizes to given size.
+ */
+ inline void resize(Index size)
+ {
+ indices().resize(size);
+ }
+
+ /** Sets *this to be the identity permutation matrix */
+ void setIdentity()
+ {
+ for(Index i = 0; i < size(); ++i)
+ indices().coeffRef(i) = i;
+ }
+
+ /** Sets *this to be the identity permutation matrix of given size.
+ */
+ void setIdentity(Index size)
+ {
+ resize(size);
+ setIdentity();
+ }
+
+ /** Multiplies *this by the transposition \f$(ij)\f$ on the left.
+ *
+ * \returns a reference to *this.
+ *
+ * \warning This is much slower than applyTranspositionOnTheRight(int,int):
+ * this has linear complexity and requires a lot of branching.
+ *
+ * \sa applyTranspositionOnTheRight(int,int)
+ */
+ Derived& applyTranspositionOnTheLeft(Index i, Index j)
+ {
+ eigen_assert(i>=0 && j>=0 && i<size() && j<size());
+ for(Index k = 0; k < size(); ++k)
+ {
+ if(indices().coeff(k) == i) indices().coeffRef(k) = j;
+ else if(indices().coeff(k) == j) indices().coeffRef(k) = i;
+ }
+ return derived();
+ }
+
+ /** Multiplies *this by the transposition \f$(ij)\f$ on the right.
+ *
+ * \returns a reference to *this.
+ *
+ * This is a fast operation, it only consists in swapping two indices.
+ *
+ * \sa applyTranspositionOnTheLeft(int,int)
+ */
+ Derived& applyTranspositionOnTheRight(Index i, Index j)
+ {
+ eigen_assert(i>=0 && j>=0 && i<size() && j<size());
+ std::swap(indices().coeffRef(i), indices().coeffRef(j));
+ return derived();
+ }
+
+ /** \returns the inverse permutation matrix.
+ *
+ * \note \note_try_to_help_rvo
+ */
+ inline Transpose<PermutationBase> inverse() const
+ { return derived(); }
+ /** \returns the tranpose permutation matrix.
+ *
+ * \note \note_try_to_help_rvo
+ */
+ inline Transpose<PermutationBase> transpose() const
+ { return derived(); }
+
+ /**** multiplication helpers to hopefully get RVO ****/
+
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ protected:
+ template<typename OtherDerived>
+ void assignTranspose(const PermutationBase<OtherDerived>& other)
+ {
+ for (int i=0; i<rows();++i) indices().coeffRef(other.indices().coeff(i)) = i;
+ }
+ template<typename Lhs,typename Rhs>
+ void assignProduct(const Lhs& lhs, const Rhs& rhs)
+ {
+ eigen_assert(lhs.cols() == rhs.rows());
+ for (int i=0; i<rows();++i) indices().coeffRef(i) = lhs.indices().coeff(rhs.indices().coeff(i));
+ }
+#endif
+
+ public:
+
+ /** \returns the product permutation matrix.
+ *
+ * \note \note_try_to_help_rvo
+ */
+ template<typename Other>
+ inline PlainPermutationType operator*(const PermutationBase<Other>& other) const
+ { return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); }
+
+ /** \returns the product of a permutation with another inverse permutation.
+ *
+ * \note \note_try_to_help_rvo
+ */
+ template<typename Other>
+ inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other) const
+ { return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); }
+
+ /** \returns the product of an inverse permutation with another permutation.
+ *
+ * \note \note_try_to_help_rvo
+ */
+ template<typename Other> friend
+ inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
+ { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
+
+ protected:
+
+};
+
+/** \class PermutationMatrix
+ * \ingroup Core_Module
+ *
+ * \brief Permutation matrix
+ *
+ * \param SizeAtCompileTime the number of rows/cols, or Dynamic
+ * \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
+ * \param IndexType the interger type of the indices
+ *
+ * This class represents a permutation matrix, internally stored as a vector of integers.
+ *
+ * \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix
+ */
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType> >
+ : traits<Matrix<IndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+ typedef IndexType Index;
+ typedef Matrix<IndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType> >
+{
+ typedef PermutationBase<PermutationMatrix> Base;
+ typedef internal::traits<PermutationMatrix> Traits;
+ public:
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ typedef typename Traits::IndicesType IndicesType;
+ #endif
+
+ inline PermutationMatrix()
+ {}
+
+ /** Constructs an uninitialized permutation matrix of given size.
+ */
+ inline PermutationMatrix(int size) : m_indices(size)
+ {}
+
+ /** Copy constructor. */
+ template<typename OtherDerived>
+ inline PermutationMatrix(const PermutationBase<OtherDerived>& other)
+ : m_indices(other.indices()) {}
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** Standard copy constructor. Defined only to prevent a default copy constructor
+ * from hiding the other templated constructor */
+ inline PermutationMatrix(const PermutationMatrix& other) : m_indices(other.indices()) {}
+ #endif
+
+ /** Generic constructor from expression of the indices. The indices
+ * array has the meaning that the permutations sends each integer i to indices[i].
+ *
+ * \warning It is your responsibility to check that the indices array that you passes actually
+ * describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the
+ * array's size.
+ */
+ template<typename Other>
+ explicit inline PermutationMatrix(const MatrixBase<Other>& indices) : m_indices(indices)
+ {}
+
+ /** Convert the Transpositions \a tr to a permutation matrix */
+ template<typename Other>
+ explicit PermutationMatrix(const TranspositionsBase<Other>& tr)
+ : m_indices(tr.size())
+ {
+ *this = tr;
+ }
+
+ /** Copies the other permutation into *this */
+ template<typename Other>
+ PermutationMatrix& operator=(const PermutationBase<Other>& other)
+ {
+ m_indices = other.indices();
+ return *this;
+ }
+
+ /** Assignment from the Transpositions \a tr */
+ template<typename Other>
+ PermutationMatrix& operator=(const TranspositionsBase<Other>& tr)
+ {
+ return Base::operator=(tr.derived());
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is a special case of the templated operator=. Its purpose is to
+ * prevent a default operator= from hiding the templated operator=.
+ */
+ PermutationMatrix& operator=(const PermutationMatrix& other)
+ {
+ m_indices = other.m_indices;
+ return *this;
+ }
+ #endif
+
+ /** const version of indices(). */
+ const IndicesType& indices() const { return m_indices; }
+ /** \returns a reference to the stored array representing the permutation. */
+ IndicesType& indices() { return m_indices; }
+
+
+ /**** multiplication helpers to hopefully get RVO ****/
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Other>
+ PermutationMatrix(const Transpose<PermutationBase<Other> >& other)
+ : m_indices(other.nestedPermutation().size())
+ {
+ for (int i=0; i<m_indices.size();++i) m_indices.coeffRef(other.nestedPermutation().indices().coeff(i)) = i;
+ }
+ template<typename Lhs,typename Rhs>
+ PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs)
+ : m_indices(lhs.indices().size())
+ {
+ Base::assignProduct(lhs,rhs);
+ }
+#endif
+
+ protected:
+
+ IndicesType m_indices;
+};
+
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
+struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess> >
+ : traits<Matrix<IndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+ typedef IndexType Index;
+ typedef Map<const Matrix<IndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
+class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess>
+ : public PermutationBase<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess> >
+{
+ typedef PermutationBase<Map> Base;
+ typedef internal::traits<Map> Traits;
+ public:
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ typedef typename Traits::IndicesType IndicesType;
+ typedef typename IndicesType::Scalar Index;
+ #endif
+
+ inline Map(const Index* indices)
+ : m_indices(indices)
+ {}
+
+ inline Map(const Index* indices, Index size)
+ : m_indices(indices,size)
+ {}
+
+ /** Copies the other permutation into *this */
+ template<typename Other>
+ Map& operator=(const PermutationBase<Other>& other)
+ { return Base::operator=(other.derived()); }
+
+ /** Assignment from the Transpositions \a tr */
+ template<typename Other>
+ Map& operator=(const TranspositionsBase<Other>& tr)
+ { return Base::operator=(tr.derived()); }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is a special case of the templated operator=. Its purpose is to
+ * prevent a default operator= from hiding the templated operator=.
+ */
+ Map& operator=(const Map& other)
+ {
+ m_indices = other.m_indices;
+ return *this;
+ }
+ #endif
+
+ /** const version of indices(). */
+ const IndicesType& indices() const { return m_indices; }
+ /** \returns a reference to the stored array representing the permutation. */
+ IndicesType& indices() { return m_indices; }
+
+ protected:
+
+ IndicesType m_indices;
+};
+
+/** \class PermutationWrapper
+ * \ingroup Core_Module
+ *
+ * \brief Class to view a vector of integers as a permutation matrix
+ *
+ * \param _IndicesType the type of the vector of integer (can be any compatible expression)
+ *
+ * This class allows to view any vector expression of integers as a permutation matrix.
+ *
+ * \sa class PermutationBase, class PermutationMatrix
+ */
+
+struct PermutationStorage {};
+
+template<typename _IndicesType> class TranspositionsWrapper;
+namespace internal {
+template<typename _IndicesType>
+struct traits<PermutationWrapper<_IndicesType> >
+{
+ typedef PermutationStorage StorageKind;
+ typedef typename _IndicesType::Scalar Scalar;
+ typedef typename _IndicesType::Scalar Index;
+ typedef _IndicesType IndicesType;
+ enum {
+ RowsAtCompileTime = _IndicesType::SizeAtCompileTime,
+ ColsAtCompileTime = _IndicesType::SizeAtCompileTime,
+ MaxRowsAtCompileTime = IndicesType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = IndicesType::MaxColsAtCompileTime,
+ Flags = 0,
+ CoeffReadCost = _IndicesType::CoeffReadCost
+ };
+};
+}
+
+template<typename _IndicesType>
+class PermutationWrapper : public PermutationBase<PermutationWrapper<_IndicesType> >
+{
+ typedef PermutationBase<PermutationWrapper> Base;
+ typedef internal::traits<PermutationWrapper> Traits;
+ public:
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ typedef typename Traits::IndicesType IndicesType;
+ #endif
+
+ inline PermutationWrapper(const IndicesType& indices)
+ : m_indices(indices)
+ {}
+
+ /** const version of indices(). */
+ const typename internal::remove_all<typename IndicesType::Nested>::type&
+ indices() const { return m_indices; }
+
+ protected:
+
+ const typename IndicesType::Nested m_indices;
+};
+
+/** \returns the matrix with the permutation applied to the columns.
+ */
+template<typename Derived, typename PermutationDerived>
+inline const internal::permut_matrix_product_retval<PermutationDerived, Derived, OnTheRight>
+operator*(const MatrixBase<Derived>& matrix,
+ const PermutationBase<PermutationDerived> &permutation)
+{
+ return internal::permut_matrix_product_retval
+ <PermutationDerived, Derived, OnTheRight>
+ (permutation.derived(), matrix.derived());
+}
+
+/** \returns the matrix with the permutation applied to the rows.
+ */
+template<typename Derived, typename PermutationDerived>
+inline const internal::permut_matrix_product_retval
+ <PermutationDerived, Derived, OnTheLeft>
+operator*(const PermutationBase<PermutationDerived> &permutation,
+ const MatrixBase<Derived>& matrix)
+{
+ return internal::permut_matrix_product_retval
+ <PermutationDerived, Derived, OnTheLeft>
+ (permutation.derived(), matrix.derived());
+}
+
+namespace internal {
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct traits<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+ typedef typename MatrixType::PlainObject ReturnType;
+};
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct permut_matrix_product_retval
+ : public ReturnByValue<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+ typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+
+ permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
+ : m_permutation(perm), m_matrix(matrix)
+ {}
+
+ inline int rows() const { return m_matrix.rows(); }
+ inline int cols() const { return m_matrix.cols(); }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ const int n = Side==OnTheLeft ? rows() : cols();
+
+ if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix))
+ {
+ // apply the permutation inplace
+ Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());
+ mask.fill(false);
+ int r = 0;
+ while(r < m_permutation.size())
+ {
+ // search for the next seed
+ while(r<m_permutation.size() && mask[r]) r++;
+ if(r>=m_permutation.size())
+ break;
+ // we got one, let's follow it until we are back to the seed
+ int k0 = r++;
+ int kPrev = k0;
+ mask.coeffRef(k0) = true;
+ for(int k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k))
+ {
+ Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>(dst, k)
+ .swap(Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
+ (dst,((Side==OnTheLeft) ^ Transposed) ? k0 : kPrev));
+
+ mask.coeffRef(k) = true;
+ kPrev = k;
+ }
+ }
+ }
+ else
+ {
+ for(int i = 0; i < n; ++i)
+ {
+ Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
+ (dst, ((Side==OnTheLeft) ^ Transposed) ? m_permutation.indices().coeff(i) : i)
+
+ =
+
+ Block<const MatrixTypeNestedCleaned,Side==OnTheLeft ? 1 : MatrixType::RowsAtCompileTime,Side==OnTheRight ? 1 : MatrixType::ColsAtCompileTime>
+ (m_matrix, ((Side==OnTheRight) ^ Transposed) ? m_permutation.indices().coeff(i) : i);
+ }
+ }
+ }
+
+ protected:
+ const PermutationType& m_permutation;
+ const typename MatrixType::Nested m_matrix;
+};
+
+/* Template partial specialization for transposed/inverse permutations */
+
+template<typename Derived>
+struct traits<Transpose<PermutationBase<Derived> > >
+ : traits<Derived>
+{};
+
+} // end namespace internal
+
+template<typename Derived>
+class Transpose<PermutationBase<Derived> >
+ : public EigenBase<Transpose<PermutationBase<Derived> > >
+{
+ typedef Derived PermutationType;
+ typedef typename PermutationType::IndicesType IndicesType;
+ typedef typename PermutationType::PlainPermutationType PlainPermutationType;
+ public:
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ typedef internal::traits<PermutationType> Traits;
+ typedef typename Derived::DenseMatrixType DenseMatrixType;
+ enum {
+ Flags = Traits::Flags,
+ CoeffReadCost = Traits::CoeffReadCost,
+ RowsAtCompileTime = Traits::RowsAtCompileTime,
+ ColsAtCompileTime = Traits::ColsAtCompileTime,
+ MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
+ };
+ typedef typename Traits::Scalar Scalar;
+ #endif
+
+ Transpose(const PermutationType& p) : m_permutation(p) {}
+
+ inline int rows() const { return m_permutation.rows(); }
+ inline int cols() const { return m_permutation.cols(); }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived>& other) const
+ {
+ other.setZero();
+ for (int i=0; i<rows();++i)
+ other.coeffRef(i, m_permutation.indices().coeff(i)) = typename DenseDerived::Scalar(1);
+ }
+ #endif
+
+ /** \return the equivalent permutation matrix */
+ PlainPermutationType eval() const { return *this; }
+
+ DenseMatrixType toDenseMatrix() const { return *this; }
+
+ /** \returns the matrix with the inverse permutation applied to the columns.
+ */
+ template<typename OtherDerived> friend
+ inline const internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheRight, true>
+ operator*(const MatrixBase<OtherDerived>& matrix, const Transpose& trPerm)
+ {
+ return internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheRight, true>(trPerm.m_permutation, matrix.derived());
+ }
+
+ /** \returns the matrix with the inverse permutation applied to the rows.
+ */
+ template<typename OtherDerived>
+ inline const internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheLeft, true>
+ operator*(const MatrixBase<OtherDerived>& matrix) const
+ {
+ return internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheLeft, true>(m_permutation, matrix.derived());
+ }
+
+ const PermutationType& nestedPermutation() const { return m_permutation; }
+
+ protected:
+ const PermutationType& m_permutation;
+};
+
+template<typename Derived>
+const PermutationWrapper<const Derived> MatrixBase<Derived>::asPermutation() const
+{
+ return derived();
+}
+
+#endif // EIGEN_PERMUTATIONMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h b/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
new file mode 100644
index 00000000000..c70db92479a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
@@ -0,0 +1,737 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DENSESTORAGEBASE_H
+#define EIGEN_DENSESTORAGEBASE_H
+
+#ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
+# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0);
+#else
+# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+#endif
+
+namespace internal {
+
+template <typename Derived, typename OtherDerived = Derived, bool IsVector = static_cast<bool>(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl;
+
+template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl;
+
+} // end namespace internal
+
+/**
+ * \brief %Dense storage base class for matrices and arrays.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN.
+ *
+ * \sa \ref TopicClassHierarchy
+ */
+template<typename Derived>
+class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
+{
+ public:
+ enum { Options = internal::traits<Derived>::Options };
+ typedef typename internal::dense_xpr_base<Derived>::type Base;
+
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef Derived DenseType;
+
+ using Base::RowsAtCompileTime;
+ using Base::ColsAtCompileTime;
+ using Base::SizeAtCompileTime;
+ using Base::MaxRowsAtCompileTime;
+ using Base::MaxColsAtCompileTime;
+ using Base::MaxSizeAtCompileTime;
+ using Base::IsVectorAtCompileTime;
+ using Base::Flags;
+
+ template<typename PlainObjectType, int MapOptions, typename StrideType> friend class Eigen::Map;
+ friend class Eigen::Map<Derived, Unaligned>;
+ typedef Eigen::Map<Derived, Unaligned> MapType;
+ friend class Eigen::Map<const Derived, Unaligned>;
+ typedef const Eigen::Map<const Derived, Unaligned> ConstMapType;
+ friend class Eigen::Map<Derived, Aligned>;
+ typedef Eigen::Map<Derived, Aligned> AlignedMapType;
+ friend class Eigen::Map<const Derived, Aligned>;
+ typedef const Eigen::Map<const Derived, Aligned> ConstAlignedMapType;
+ template<typename StrideType> struct StridedMapType { typedef Eigen::Map<Derived, Unaligned, StrideType> type; };
+ template<typename StrideType> struct StridedConstMapType { typedef Eigen::Map<const Derived, Unaligned, StrideType> type; };
+ template<typename StrideType> struct StridedAlignedMapType { typedef Eigen::Map<Derived, Aligned, StrideType> type; };
+ template<typename StrideType> struct StridedConstAlignedMapType { typedef Eigen::Map<const Derived, Aligned, StrideType> type; };
+
+
+ protected:
+ DenseStorage<Scalar, Base::MaxSizeAtCompileTime, Base::RowsAtCompileTime, Base::ColsAtCompileTime, Options> m_storage;
+
+ public:
+ enum { NeedsToAlign = (!(Options&DontAlign))
+ && SizeAtCompileTime!=Dynamic && ((static_cast<int>(sizeof(Scalar))*SizeAtCompileTime)%16)==0 };
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+
+ Base& base() { return *static_cast<Base*>(this); }
+ const Base& base() const { return *static_cast<const Base*>(this); }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); }
+
+ EIGEN_STRONG_INLINE const Scalar& coeff(Index row, Index col) const
+ {
+ if(Flags & RowMajorBit)
+ return m_storage.data()[col + row * m_storage.cols()];
+ else // column-major
+ return m_storage.data()[row + col * m_storage.rows()];
+ }
+
+ EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
+ {
+ return m_storage.data()[index];
+ }
+
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
+ {
+ if(Flags & RowMajorBit)
+ return m_storage.data()[col + row * m_storage.cols()];
+ else // column-major
+ return m_storage.data()[row + col * m_storage.rows()];
+ }
+
+ EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
+ {
+ return m_storage.data()[index];
+ }
+
+ EIGEN_STRONG_INLINE const Scalar& coeffRef(Index row, Index col) const
+ {
+ if(Flags & RowMajorBit)
+ return m_storage.data()[col + row * m_storage.cols()];
+ else // column-major
+ return m_storage.data()[row + col * m_storage.rows()];
+ }
+
+ EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const
+ {
+ return m_storage.data()[index];
+ }
+
+ /** \internal */
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+ {
+ return internal::ploadt<PacketScalar, LoadMode>
+ (m_storage.data() + (Flags & RowMajorBit
+ ? col + row * m_storage.cols()
+ : row + col * m_storage.rows()));
+ }
+
+ /** \internal */
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+ {
+ return internal::ploadt<PacketScalar, LoadMode>(m_storage.data() + index);
+ }
+
+ /** \internal */
+ template<int StoreMode>
+ EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketScalar& x)
+ {
+ internal::pstoret<Scalar, PacketScalar, StoreMode>
+ (m_storage.data() + (Flags & RowMajorBit
+ ? col + row * m_storage.cols()
+ : row + col * m_storage.rows()), x);
+ }
+
+ /** \internal */
+ template<int StoreMode>
+ EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& x)
+ {
+ internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
+ }
+
+ /** \returns a const pointer to the data array of this matrix */
+ EIGEN_STRONG_INLINE const Scalar *data() const
+ { return m_storage.data(); }
+
+ /** \returns a pointer to the data array of this matrix */
+ EIGEN_STRONG_INLINE Scalar *data()
+ { return m_storage.data(); }
+
+ /** Resizes \c *this to a \a rows x \a cols matrix.
+ *
+ * This method is intended for dynamic-size matrices, although it is legal to call it on any
+ * matrix as long as fixed dimensions are left unchanged. If you only want to change the number
+ * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t).
+ *
+ * If the current number of coefficients of \c *this exactly matches the
+ * product \a rows * \a cols, then no memory allocation is performed and
+ * the current values are left unchanged. In all other cases, including
+ * shrinking, the data is reallocated and all previous values are lost.
+ *
+ * Example: \include Matrix_resize_int_int.cpp
+ * Output: \verbinclude Matrix_resize_int_int.out
+ *
+ * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)
+ */
+ EIGEN_STRONG_INLINE void resize(Index rows, Index cols)
+ {
+ #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
+ Index size = rows*cols;
+ bool size_changed = size != this->size();
+ m_storage.resize(size, rows, cols);
+ if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ #else
+ m_storage.resize(rows*cols, rows, cols);
+ #endif
+ }
+
+ /** Resizes \c *this to a vector of length \a size
+ *
+ * \only_for_vectors. This method does not work for
+ * partially dynamic matrices when the static dimension is anything other
+ * than 1. For example it will not work with Matrix<double, 2, Dynamic>.
+ *
+ * Example: \include Matrix_resize_int.cpp
+ * Output: \verbinclude Matrix_resize_int.out
+ *
+ * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t)
+ */
+ inline void resize(Index size)
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase)
+ eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
+ #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
+ bool size_changed = size != this->size();
+ #endif
+ if(RowsAtCompileTime == 1)
+ m_storage.resize(size, 1, size);
+ else
+ m_storage.resize(size, size, 1);
+ #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO
+ if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ #endif
+ }
+
+ /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \c NoChange
+ * as in the example below.
+ *
+ * Example: \include Matrix_resize_NoChange_int.cpp
+ * Output: \verbinclude Matrix_resize_NoChange_int.out
+ *
+ * \sa resize(Index,Index)
+ */
+ inline void resize(NoChange_t, Index cols)
+ {
+ resize(rows(), cols);
+ }
+
+ /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange
+ * as in the example below.
+ *
+ * Example: \include Matrix_resize_int_NoChange.cpp
+ * Output: \verbinclude Matrix_resize_int_NoChange.out
+ *
+ * \sa resize(Index,Index)
+ */
+ inline void resize(Index rows, NoChange_t)
+ {
+ resize(rows, cols());
+ }
+
+ /** Resizes \c *this to have the same dimensions as \a other.
+ * Takes care of doing all the checking that's needed.
+ *
+ * Note that copying a row-vector into a vector (and conversely) is allowed.
+ * The resizing, if any, is then done in the appropriate way so that row-vectors
+ * remain row-vectors and vectors remain vectors.
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
+ {
+ const OtherDerived& other = _other.derived();
+ const Index othersize = other.rows()*other.cols();
+ if(RowsAtCompileTime == 1)
+ {
+ eigen_assert(other.rows() == 1 || other.cols() == 1);
+ resize(1, othersize);
+ }
+ else if(ColsAtCompileTime == 1)
+ {
+ eigen_assert(other.rows() == 1 || other.cols() == 1);
+ resize(othersize, 1);
+ }
+ else resize(other.rows(), other.cols());
+ }
+
+ /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
+ *
+ * The method is intended for matrices of dynamic size. If you only want to change the number
+ * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
+ * conservativeResize(Index, NoChange_t).
+ *
+ * Matrices are resized relative to the top-left element. In case values need to be
+ * appended to the matrix they will be uninitialized.
+ */
+ EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols)
+ {
+ internal::conservative_resize_like_impl<Derived>::run(*this, rows, cols);
+ }
+
+ /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
+ *
+ * As opposed to conservativeResize(Index rows, Index cols), this version leaves
+ * the number of columns unchanged.
+ *
+ * In case the matrix is growing, new rows will be uninitialized.
+ */
+ EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t)
+ {
+ // Note: see the comment in conservativeResize(Index,Index)
+ conservativeResize(rows, cols());
+ }
+
+ /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
+ *
+ * As opposed to conservativeResize(Index rows, Index cols), this version leaves
+ * the number of rows unchanged.
+ *
+ * In case the matrix is growing, new columns will be uninitialized.
+ */
+ EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols)
+ {
+ // Note: see the comment in conservativeResize(Index,Index)
+ conservativeResize(rows(), cols);
+ }
+
+ /** Resizes the vector to \a size while retaining old values.
+ *
+ * \only_for_vectors. This method does not work for
+ * partially dynamic matrices when the static dimension is anything other
+ * than 1. For example it will not work with Matrix<double, 2, Dynamic>.
+ *
+ * When values are appended, they will be uninitialized.
+ */
+ EIGEN_STRONG_INLINE void conservativeResize(Index size)
+ {
+ internal::conservative_resize_like_impl<Derived>::run(*this, size);
+ }
+
+ /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched.
+ *
+ * The method is intended for matrices of dynamic size. If you only want to change the number
+ * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
+ * conservativeResize(Index, NoChange_t).
+ *
+ * Matrices are resized relative to the top-left element. In case values need to be
+ * appended to the matrix they will copied from \c other.
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase<OtherDerived>& other)
+ {
+ internal::conservative_resize_like_impl<Derived,OtherDerived>::run(*this, other);
+ }
+
+ /** This is a special case of the templated operator=. Its purpose is to
+ * prevent a default operator= from hiding the templated operator=.
+ */
+ EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other)
+ {
+ return _set(other);
+ }
+
+ /** \sa MatrixBase::lazyAssign() */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase<OtherDerived>& other)
+ {
+ _resize_to_match(other);
+ return Base::lazyAssign(other.derived());
+ }
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue<OtherDerived>& func)
+ {
+ resize(func.rows(), func.cols());
+ return Base::operator=(func);
+ }
+
+ EIGEN_STRONG_INLINE explicit PlainObjectBase() : m_storage()
+ {
+// _check_template_params();
+// EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ // FIXME is it still needed ?
+ /** \internal */
+ PlainObjectBase(internal::constructor_without_unaligned_array_assert)
+ : m_storage(internal::constructor_without_unaligned_array_assert())
+ {
+// _check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ }
+#endif
+
+ EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols)
+ : m_storage(size, rows, cols)
+ {
+// _check_template_params();
+// EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ }
+
+ /** \copydoc MatrixBase::operator=(const EigenBase<OtherDerived>&)
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Derived& operator=(const EigenBase<OtherDerived> &other)
+ {
+ _resize_to_match(other);
+ Base::operator=(other.derived());
+ return this->derived();
+ }
+
+ /** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase<OtherDerived> &other)
+ : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
+ {
+ _check_template_params();
+ Base::operator=(other.derived());
+ }
+
+ /** \name Map
+ * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects,
+ * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned
+ * \a data pointers.
+ *
+ * \see class Map
+ */
+ //@{
+ inline static ConstMapType Map(const Scalar* data)
+ { return ConstMapType(data); }
+ inline static MapType Map(Scalar* data)
+ { return MapType(data); }
+ inline static ConstMapType Map(const Scalar* data, Index size)
+ { return ConstMapType(data, size); }
+ inline static MapType Map(Scalar* data, Index size)
+ { return MapType(data, size); }
+ inline static ConstMapType Map(const Scalar* data, Index rows, Index cols)
+ { return ConstMapType(data, rows, cols); }
+ inline static MapType Map(Scalar* data, Index rows, Index cols)
+ { return MapType(data, rows, cols); }
+
+ inline static ConstAlignedMapType MapAligned(const Scalar* data)
+ { return ConstAlignedMapType(data); }
+ inline static AlignedMapType MapAligned(Scalar* data)
+ { return AlignedMapType(data); }
+ inline static ConstAlignedMapType MapAligned(const Scalar* data, Index size)
+ { return ConstAlignedMapType(data, size); }
+ inline static AlignedMapType MapAligned(Scalar* data, Index size)
+ { return AlignedMapType(data, size); }
+ inline static ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols)
+ { return ConstAlignedMapType(data, rows, cols); }
+ inline static AlignedMapType MapAligned(Scalar* data, Index rows, Index cols)
+ { return AlignedMapType(data, rows, cols); }
+
+ template<int Outer, int Inner>
+ inline static typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, const Stride<Outer, Inner>& stride)
+ { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, stride); }
+ template<int Outer, int Inner>
+ inline static typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, const Stride<Outer, Inner>& stride)
+ { return typename StridedMapType<Stride<Outer, Inner> >::type(data, stride); }
+ template<int Outer, int Inner>
+ inline static typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+ { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+ template<int Outer, int Inner>
+ inline static typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+ { return typename StridedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+ template<int Outer, int Inner>
+ inline static typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+ { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+ template<int Outer, int Inner>
+ inline static typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+ { return typename StridedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+
+ template<int Outer, int Inner>
+ inline static typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, const Stride<Outer, Inner>& stride)
+ { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, stride); }
+ template<int Outer, int Inner>
+ inline static typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, const Stride<Outer, Inner>& stride)
+ { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, stride); }
+ template<int Outer, int Inner>
+ inline static typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+ { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+ template<int Outer, int Inner>
+ inline static typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+ { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+ template<int Outer, int Inner>
+ inline static typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+ { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+ template<int Outer, int Inner>
+ inline static typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+ { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+ //@}
+
+ using Base::setConstant;
+ Derived& setConstant(Index size, const Scalar& value);
+ Derived& setConstant(Index rows, Index cols, const Scalar& value);
+
+ using Base::setZero;
+ Derived& setZero(Index size);
+ Derived& setZero(Index rows, Index cols);
+
+ using Base::setOnes;
+ Derived& setOnes(Index size);
+ Derived& setOnes(Index rows, Index cols);
+
+ using Base::setRandom;
+ Derived& setRandom(Index size);
+ Derived& setRandom(Index rows, Index cols);
+
+ #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN
+ #include EIGEN_PLAINOBJECTBASE_PLUGIN
+ #endif
+
+ protected:
+ /** \internal Resizes *this in preparation for assigning \a other to it.
+ * Takes care of doing all the checking that's needed.
+ *
+ * Note that copying a row-vector into a vector (and conversely) is allowed.
+ * The resizing, if any, is then done in the appropriate way so that row-vectors
+ * remain row-vectors and vectors remain vectors.
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase<OtherDerived>& other)
+ {
+ #ifdef EIGEN_NO_AUTOMATIC_RESIZING
+ eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size())
+ : (rows() == other.rows() && cols() == other.cols())))
+ && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
+ #else
+ resizeLike(other);
+ #endif
+ }
+
+ /**
+ * \brief Copies the value of the expression \a other into \c *this with automatic resizing.
+ *
+ * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
+ * it will be initialized.
+ *
+ * Note that copying a row-vector into a vector (and conversely) is allowed.
+ * The resizing, if any, is then done in the appropriate way so that row-vectors
+ * remain row-vectors and vectors remain vectors.
+ *
+ * \sa operator=(const MatrixBase<OtherDerived>&), _set_noalias()
+ *
+ * \internal
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Derived& _set(const DenseBase<OtherDerived>& other)
+ {
+ _set_selector(other.derived(), typename internal::conditional<static_cast<bool>(int(OtherDerived::Flags) & EvalBeforeAssigningBit), internal::true_type, internal::false_type>::type());
+ return this->derived();
+ }
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::true_type&) { _set_noalias(other.eval()); }
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::false_type&) { _set_noalias(other); }
+
+ /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which
+ * is the case when creating a new matrix) so one can enforce lazy evaluation.
+ *
+ * \sa operator=(const MatrixBase<OtherDerived>&), _set()
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other)
+ {
+ // I don't think we need this resize call since the lazyAssign will anyways resize
+ // and lazyAssign will be called by the assign selector.
+ //_resize_to_match(other);
+ // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because
+ // it wouldn't allow to copy a row-vector into a column-vector.
+ return internal::assign_selector<Derived,OtherDerived,false>::run(this->derived(), other.derived());
+ }
+
+ template<typename T0, typename T1>
+ EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
+ {
+ eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
+ && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
+ m_storage.resize(rows*cols,rows,cols);
+ EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
+ }
+ template<typename T0, typename T1>
+ EIGEN_STRONG_INLINE void _init2(const Scalar& x, const Scalar& y, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2)
+ m_storage.data()[0] = x;
+ m_storage.data()[1] = y;
+ }
+
+ template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
+ friend struct internal::matrix_swap_impl;
+
+ /** \internal generic implementation of swap for dense storage since for dynamic-sized matrices of same type it is enough to swap the
+ * data pointers.
+ */
+ template<typename OtherDerived>
+ void _swap(DenseBase<OtherDerived> const & other)
+ {
+ enum { SwapPointers = internal::is_same<Derived, OtherDerived>::value && Base::SizeAtCompileTime==Dynamic };
+ internal::matrix_swap_impl<Derived, OtherDerived, bool(SwapPointers)>::run(this->derived(), other.const_cast_derived());
+ }
+
+ public:
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ EIGEN_STRONG_INLINE static void _check_template_params()
+ {
+ EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor)
+ && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0)
+ && ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0))
+ && ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0))
+ && ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0))
+ && ((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0))
+ && (MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic)
+ && (MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic)
+ && (Options & (DontAlign|RowMajor)) == Options),
+ INVALID_MATRIX_TEMPLATE_PARAMETERS)
+ }
+#endif
+
+private:
+ enum { ThisConstantIsPrivateInPlainObjectBase };
+};
+
+template <typename Derived, typename OtherDerived, bool IsVector>
+struct internal::conservative_resize_like_impl
+{
+ typedef typename Derived::Index Index;
+ static void run(DenseBase<Derived>& _this, Index rows, Index cols)
+ {
+ if (_this.rows() == rows && _this.cols() == cols) return;
+ EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
+
+ if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
+ (!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns
+ {
+ _this.derived().m_storage.conservativeResize(rows*cols,rows,cols);
+ }
+ else
+ {
+ // The storage order does not allow us to use reallocation.
+ typename Derived::PlainObject tmp(rows,cols);
+ const Index common_rows = (std::min)(rows, _this.rows());
+ const Index common_cols = (std::min)(cols, _this.cols());
+ tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
+ _this.derived().swap(tmp);
+ }
+ }
+
+ static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)
+ {
+ if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
+
+ // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index),
+ // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the
+ // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or
+ // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like
+ // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good.
+ EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
+ EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived)
+
+ if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows
+ (!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns
+ {
+ const Index new_rows = other.rows() - _this.rows();
+ const Index new_cols = other.cols() - _this.cols();
+ _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols());
+ if (new_rows>0)
+ _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows);
+ else if (new_cols>0)
+ _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols);
+ }
+ else
+ {
+ // The storage order does not allow us to use reallocation.
+ typename Derived::PlainObject tmp(other);
+ const Index common_rows = (std::min)(tmp.rows(), _this.rows());
+ const Index common_cols = (std::min)(tmp.cols(), _this.cols());
+ tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
+ _this.derived().swap(tmp);
+ }
+ }
+};
+
+namespace internal {
+
+template <typename Derived, typename OtherDerived>
+struct conservative_resize_like_impl<Derived,OtherDerived,true>
+{
+ typedef typename Derived::Index Index;
+ static void run(DenseBase<Derived>& _this, Index size)
+ {
+ const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;
+ const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1;
+ _this.derived().m_storage.conservativeResize(size,new_rows,new_cols);
+ }
+
+ static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)
+ {
+ if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
+
+ const Index num_new_elements = other.size() - _this.size();
+
+ const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows();
+ const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1;
+ _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);
+
+ if (num_new_elements > 0)
+ _this.tail(num_new_elements) = other.tail(num_new_elements);
+ }
+};
+
+template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
+struct matrix_swap_impl
+{
+ static inline void run(MatrixTypeA& a, MatrixTypeB& b)
+ {
+ a.base().swap(b);
+ }
+};
+
+template<typename MatrixTypeA, typename MatrixTypeB>
+struct matrix_swap_impl<MatrixTypeA, MatrixTypeB, true>
+{
+ static inline void run(MatrixTypeA& a, MatrixTypeB& b)
+ {
+ static_cast<typename MatrixTypeA::Base&>(a).m_storage.swap(static_cast<typename MatrixTypeB::Base&>(b).m_storage);
+ }
+};
+
+} // end namespace internal
+
+#endif // EIGEN_DENSESTORAGEBASE_H
diff --git a/extern/Eigen3/Eigen/src/Core/Product.h b/extern/Eigen3/Eigen/src/Core/Product.h
new file mode 100644
index 00000000000..e2035b242b1
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Product.h
@@ -0,0 +1,625 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PRODUCT_H
+#define EIGEN_PRODUCT_H
+
+/** \class GeneralProduct
+ * \ingroup Core_Module
+ *
+ * \brief Expression of the product of two general matrices or vectors
+ *
+ * \param LhsNested the type used to store the left-hand side
+ * \param RhsNested the type used to store the right-hand side
+ * \param ProductMode the type of the product
+ *
+ * This class represents an expression of the product of two general matrices.
+ * We call a general matrix, a dense matrix with full storage. For instance,
+ * This excludes triangular, selfadjoint, and sparse matrices.
+ * It is the return type of the operator* between general matrices. Its template
+ * arguments are determined automatically by ProductReturnType. Therefore,
+ * GeneralProduct should never be used direclty. To determine the result type of a
+ * function which involves a matrix product, use ProductReturnType::Type.
+ *
+ * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
+ */
+template<typename Lhs, typename Rhs, int ProductType = internal::product_type<Lhs,Rhs>::value>
+class GeneralProduct;
+
+enum {
+ Large = 2,
+ Small = 3
+};
+
+namespace internal {
+
+template<int Rows, int Cols, int Depth> struct product_type_selector;
+
+template<int Size, int MaxSize> struct product_size_category
+{
+ enum { is_large = MaxSize == Dynamic ||
+ Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD,
+ value = is_large ? Large
+ : Size == 1 ? 1
+ : Small
+ };
+};
+
+template<typename Lhs, typename Rhs> struct product_type
+{
+ typedef typename remove_all<Lhs>::type _Lhs;
+ typedef typename remove_all<Rhs>::type _Rhs;
+ enum {
+ MaxRows = _Lhs::MaxRowsAtCompileTime,
+ Rows = _Lhs::RowsAtCompileTime,
+ MaxCols = _Rhs::MaxColsAtCompileTime,
+ Cols = _Rhs::ColsAtCompileTime,
+ MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime,
+ _Rhs::MaxRowsAtCompileTime),
+ Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime,
+ _Rhs::RowsAtCompileTime),
+ LargeThreshold = EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+ };
+
+ // the splitting into different lines of code here, introducing the _select enums and the typedef below,
+ // is to work around an internal compiler error with gcc 4.1 and 4.2.
+private:
+ enum {
+ rows_select = product_size_category<Rows,MaxRows>::value,
+ cols_select = product_size_category<Cols,MaxCols>::value,
+ depth_select = product_size_category<Depth,MaxDepth>::value
+ };
+ typedef product_type_selector<rows_select, cols_select, depth_select> selector;
+
+public:
+ enum {
+ value = selector::ret
+ };
+#ifdef EIGEN_DEBUG_PRODUCT
+ static void debug()
+ {
+ EIGEN_DEBUG_VAR(Rows);
+ EIGEN_DEBUG_VAR(Cols);
+ EIGEN_DEBUG_VAR(Depth);
+ EIGEN_DEBUG_VAR(rows_select);
+ EIGEN_DEBUG_VAR(cols_select);
+ EIGEN_DEBUG_VAR(depth_select);
+ EIGEN_DEBUG_VAR(value);
+ }
+#endif
+};
+
+
+/* The following allows to select the kind of product at compile time
+ * based on the three dimensions of the product.
+ * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */
+// FIXME I'm not sure the current mapping is the ideal one.
+template<int M, int N> struct product_type_selector<M,N,1> { enum { ret = OuterProduct }; };
+template<int Depth> struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; };
+template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; };
+template<> struct product_type_selector<Small,1, Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<Small,Small,Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<Small, Small, 1> { enum { ret = LazyCoeffBasedProductMode }; };
+template<> struct product_type_selector<Small, Large, 1> { enum { ret = LazyCoeffBasedProductMode }; };
+template<> struct product_type_selector<Large, Small, 1> { enum { ret = LazyCoeffBasedProductMode }; };
+template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; };
+template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<Large,1, Small> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<Large,1, Large> { enum { ret = GemvProduct }; };
+template<> struct product_type_selector<Small,1, Large> { enum { ret = CoeffBasedProductMode }; };
+template<> struct product_type_selector<Small,Small,Large> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Large,Small,Large> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Small,Large,Large> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Large,Large,Large> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Large,Small,Small> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Small,Large,Small> { enum { ret = GemmProduct }; };
+template<> struct product_type_selector<Large,Large,Small> { enum { ret = GemmProduct }; };
+
+} // end namespace internal
+
+/** \class ProductReturnType
+ * \ingroup Core_Module
+ *
+ * \brief Helper class to get the correct and optimized returned type of operator*
+ *
+ * \param Lhs the type of the left-hand side
+ * \param Rhs the type of the right-hand side
+ * \param ProductMode the type of the product (determined automatically by internal::product_mode)
+ *
+ * This class defines the typename Type representing the optimized product expression
+ * between two matrix expressions. In practice, using ProductReturnType<Lhs,Rhs>::Type
+ * is the recommended way to define the result type of a function returning an expression
+ * which involve a matrix product. The class Product should never be
+ * used directly.
+ *
+ * \sa class Product, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
+ */
+template<typename Lhs, typename Rhs, int ProductType>
+struct ProductReturnType
+{
+ // TODO use the nested type to reduce instanciations ????
+// typedef typename internal::nested<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
+// typedef typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
+
+ typedef GeneralProduct<Lhs/*Nested*/, Rhs/*Nested*/, ProductType> Type;
+};
+
+template<typename Lhs, typename Rhs>
+struct ProductReturnType<Lhs,Rhs,CoeffBasedProductMode>
+{
+ typedef typename internal::nested<Lhs, Rhs::ColsAtCompileTime, typename internal::plain_matrix_type<Lhs>::type >::type LhsNested;
+ typedef typename internal::nested<Rhs, Lhs::RowsAtCompileTime, typename internal::plain_matrix_type<Rhs>::type >::type RhsNested;
+ typedef CoeffBasedProduct<LhsNested, RhsNested, EvalBeforeAssigningBit | EvalBeforeNestingBit> Type;
+};
+
+template<typename Lhs, typename Rhs>
+struct ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
+{
+ typedef typename internal::nested<Lhs, Rhs::ColsAtCompileTime, typename internal::plain_matrix_type<Lhs>::type >::type LhsNested;
+ typedef typename internal::nested<Rhs, Lhs::RowsAtCompileTime, typename internal::plain_matrix_type<Rhs>::type >::type RhsNested;
+ typedef CoeffBasedProduct<LhsNested, RhsNested, NestByRefBit> Type;
+};
+
+// this is a workaround for sun CC
+template<typename Lhs, typename Rhs>
+struct LazyProductReturnType : public ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
+{};
+
+/***********************************************************************
+* Implementation of Inner Vector Vector Product
+***********************************************************************/
+
+// FIXME : maybe the "inner product" could return a Scalar
+// instead of a 1x1 matrix ??
+// Pro: more natural for the user
+// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix
+// product ends up to a row-vector times col-vector product... To tackle this use
+// case, we could have a specialization for Block<MatrixType,1,1> with: operator=(Scalar x);
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,InnerProduct> >
+ : traits<Matrix<typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1> >
+{};
+
+}
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, InnerProduct>
+ : internal::no_assignment_operator,
+ public Matrix<typename internal::scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1>
+{
+ typedef Matrix<typename internal::scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1> Base;
+ public:
+ GeneralProduct(const Lhs& lhs, const Rhs& rhs)
+ {
+ EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();
+ }
+
+ /** Convertion to scalar */
+ operator const typename Base::Scalar() const {
+ return Base::coeff(0,0);
+ }
+};
+
+/***********************************************************************
+* Implementation of Outer Vector Vector Product
+***********************************************************************/
+
+namespace internal {
+template<int StorageOrder> struct outer_product_selector;
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,OuterProduct> >
+ : traits<ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs> >
+{};
+
+}
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, OuterProduct>
+ : public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
+
+ GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {
+ EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ }
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha);
+ }
+};
+
+namespace internal {
+
+template<> struct outer_product_selector<ColMajor> {
+ template<typename ProductType, typename Dest>
+ static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
+ typedef typename Dest::Index Index;
+ // FIXME make sure lhs is sequentially stored
+ // FIXME not very good if rhs is real and lhs complex while alpha is real too
+ const Index cols = dest.cols();
+ for (Index j=0; j<cols; ++j)
+ dest.col(j) += (alpha * prod.rhs().coeff(j)) * prod.lhs();
+ }
+};
+
+template<> struct outer_product_selector<RowMajor> {
+ template<typename ProductType, typename Dest>
+ static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) {
+ typedef typename Dest::Index Index;
+ // FIXME make sure rhs is sequentially stored
+ // FIXME not very good if lhs is real and rhs complex while alpha is real too
+ const Index rows = dest.rows();
+ for (Index i=0; i<rows; ++i)
+ dest.row(i) += (alpha * prod.lhs().coeff(i)) * prod.rhs();
+ }
+};
+
+} // end namespace internal
+
+/***********************************************************************
+* Implementation of General Matrix Vector Product
+***********************************************************************/
+
+/* According to the shape/flags of the matrix we have to distinghish 3 different cases:
+ * 1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine
+ * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine
+ * 3 - all other cases are handled using a simple loop along the outer-storage direction.
+ * Therefore we need a lower level meta selector.
+ * Furthermore, if the matrix is the rhs, then the product has to be transposed.
+ */
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,GemvProduct> >
+ : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemvProduct>, Lhs, Rhs> >
+{};
+
+template<int Side, int StorageOrder, bool BlasCompatible>
+struct gemv_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, GemvProduct>
+ : public ProductBase<GeneralProduct<Lhs,Rhs,GemvProduct>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
+
+ typedef typename Lhs::Scalar LhsScalar;
+ typedef typename Rhs::Scalar RhsScalar;
+
+ GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {
+// EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::Scalar, typename Rhs::Scalar>::value),
+// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ }
+
+ enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight };
+ typedef typename internal::conditional<int(Side)==OnTheRight,_LhsNested,_RhsNested>::type MatrixType;
+
+ template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ {
+ eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols());
+ internal::gemv_selector<Side,(int(MatrixType::Flags)&RowMajorBit) ? RowMajor : ColMajor,
+ bool(internal::blas_traits<MatrixType>::HasUsableDirectAccess)>::run(*this, dst, alpha);
+ }
+};
+
+namespace internal {
+
+// The vector is on the left => transposition
+template<int StorageOrder, bool BlasCompatible>
+struct gemv_selector<OnTheLeft,StorageOrder,BlasCompatible>
+{
+ template<typename ProductType, typename Dest>
+ static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ {
+ Transpose<Dest> destT(dest);
+ enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
+ gemv_selector<OnTheRight,OtherStorageOrder,BlasCompatible>
+ ::run(GeneralProduct<Transpose<const typename ProductType::_RhsNested>,Transpose<const typename ProductType::_LhsNested>, GemvProduct>
+ (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha);
+ }
+};
+
+template<typename Scalar,int Size,int MaxSize,bool Cond> struct gemv_static_vector_if;
+
+template<typename Scalar,int Size,int MaxSize>
+struct gemv_static_vector_if<Scalar,Size,MaxSize,false>
+{
+ EIGEN_STRONG_INLINE Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; }
+};
+
+template<typename Scalar,int Size>
+struct gemv_static_vector_if<Scalar,Size,Dynamic,true>
+{
+ EIGEN_STRONG_INLINE Scalar* data() { return 0; }
+};
+
+template<typename Scalar,int Size,int MaxSize>
+struct gemv_static_vector_if<Scalar,Size,MaxSize,true>
+{
+ #if EIGEN_ALIGN_STATICALLY
+ internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize),0> m_data;
+ EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; }
+ #else
+ // Some architectures cannot align on the stack,
+ // => let's manually enforce alignment by allocating more data and return the address of the first aligned element.
+ enum {
+ ForceAlignment = internal::packet_traits<Scalar>::Vectorizable,
+ PacketSize = internal::packet_traits<Scalar>::size
+ };
+ internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize)+(ForceAlignment?PacketSize:0),0> m_data;
+ EIGEN_STRONG_INLINE Scalar* data() {
+ return ForceAlignment
+ ? reinterpret_cast<Scalar*>((reinterpret_cast<size_t>(m_data.array) & ~(size_t(15))) + 16)
+ : m_data.array;
+ }
+ #endif
+};
+
+template<> struct gemv_selector<OnTheRight,ColMajor,true>
+{
+ template<typename ProductType, typename Dest>
+ static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ {
+ typedef typename ProductType::Index Index;
+ typedef typename ProductType::LhsScalar LhsScalar;
+ typedef typename ProductType::RhsScalar RhsScalar;
+ typedef typename ProductType::Scalar ResScalar;
+ typedef typename ProductType::RealScalar RealScalar;
+ typedef typename ProductType::ActualLhsType ActualLhsType;
+ typedef typename ProductType::ActualRhsType ActualRhsType;
+ typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+ typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+ typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
+
+ const ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs());
+ const ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+ ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+ * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+ enum {
+ // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
+ // on, the other hand it is good for the cache to pack the vector anyways...
+ EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
+ ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
+ MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
+ };
+
+ gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
+
+ // this is written like this (i.e., with a ?:) to workaround an ICE with ICC 12
+ bool alphaIsCompatible = (!ComplexByReal) ? true : (imag(actualAlpha)==RealScalar(0));
+ bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
+
+ RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
+
+ ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
+ evalToDest ? dest.data() : static_dest.data());
+
+ if(!evalToDest)
+ {
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ int size = dest.size();
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ #endif
+ if(!alphaIsCompatible)
+ {
+ MappedDest(actualDestPtr, dest.size()).setZero();
+ compatibleAlpha = RhsScalar(1);
+ }
+ else
+ MappedDest(actualDestPtr, dest.size()) = dest;
+ }
+
+ general_matrix_vector_product
+ <Index,LhsScalar,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run(
+ actualLhs.rows(), actualLhs.cols(),
+ &actualLhs.coeffRef(0,0), actualLhs.outerStride(),
+ actualRhs.data(), actualRhs.innerStride(),
+ actualDestPtr, 1,
+ compatibleAlpha);
+
+ if (!evalToDest)
+ {
+ if(!alphaIsCompatible)
+ dest += actualAlpha * MappedDest(actualDestPtr, dest.size());
+ else
+ dest = MappedDest(actualDestPtr, dest.size());
+ }
+ }
+};
+
+template<> struct gemv_selector<OnTheRight,RowMajor,true>
+{
+ template<typename ProductType, typename Dest>
+ static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ {
+ typedef typename ProductType::LhsScalar LhsScalar;
+ typedef typename ProductType::RhsScalar RhsScalar;
+ typedef typename ProductType::Scalar ResScalar;
+ typedef typename ProductType::Index Index;
+ typedef typename ProductType::ActualLhsType ActualLhsType;
+ typedef typename ProductType::ActualRhsType ActualRhsType;
+ typedef typename ProductType::_ActualRhsType _ActualRhsType;
+ typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+ typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+
+ typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+ typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+ ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+ * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+ enum {
+ // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
+ // on, the other hand it is good for the cache to pack the vector anyways...
+ DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1
+ };
+
+ gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
+
+ ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),
+ DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
+
+ if(!DirectlyUseRhs)
+ {
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ int size = actualRhs.size();
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ #endif
+ Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
+ }
+
+ general_matrix_vector_product
+ <Index,LhsScalar,RowMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run(
+ actualLhs.rows(), actualLhs.cols(),
+ &actualLhs.coeffRef(0,0), actualLhs.outerStride(),
+ actualRhsPtr, 1,
+ &dest.coeffRef(0,0), dest.innerStride(),
+ actualAlpha);
+ }
+};
+
+template<> struct gemv_selector<OnTheRight,ColMajor,false>
+{
+ template<typename ProductType, typename Dest>
+ static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ {
+ typedef typename Dest::Index Index;
+ // TODO makes sure dest is sequentially stored in memory, otherwise use a temp
+ const Index size = prod.rhs().rows();
+ for(Index k=0; k<size; ++k)
+ dest += (alpha*prod.rhs().coeff(k)) * prod.lhs().col(k);
+ }
+};
+
+template<> struct gemv_selector<OnTheRight,RowMajor,false>
+{
+ template<typename ProductType, typename Dest>
+ static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha)
+ {
+ typedef typename Dest::Index Index;
+ // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
+ const Index rows = prod.rows();
+ for(Index i=0; i<rows; ++i)
+ dest.coeffRef(i) += alpha * (prod.lhs().row(i).cwiseProduct(prod.rhs().transpose())).sum();
+ }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of matrix base methods
+***************************************************************************/
+
+/** \returns the matrix product of \c *this and \a other.
+ *
+ * \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().
+ *
+ * \sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*()
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename ProductReturnType<Derived,OtherDerived>::Type
+MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ // A note regarding the function declaration: In MSVC, this function will sometimes
+ // not be inlined since DenseStorage is an unwindable object for dynamic
+ // matrices and product types are holding a member to store the result.
+ // Thus it does not help tagging this function with EIGEN_STRONG_INLINE.
+ enum {
+ ProductIsValid = Derived::ColsAtCompileTime==Dynamic
+ || OtherDerived::RowsAtCompileTime==Dynamic
+ || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
+ AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwiseProduct(v2)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+#ifdef EIGEN_DEBUG_PRODUCT
+ internal::product_type<Derived,OtherDerived>::debug();
+#endif
+ return typename ProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation.
+ *
+ * The returned product will behave like any other expressions: the coefficients of the product will be
+ * computed once at a time as requested. This might be useful in some extremely rare cases when only
+ * a small and no coherent fraction of the result's coefficients have to be computed.
+ *
+ * \warning This version of the matrix product can be much much slower. So use it only if you know
+ * what you are doing and that you measured a true speed improvement.
+ *
+ * \sa operator*(const MatrixBase&)
+ */
+template<typename Derived>
+template<typename OtherDerived>
+const typename LazyProductReturnType<Derived,OtherDerived>::Type
+MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
+{
+ enum {
+ ProductIsValid = Derived::ColsAtCompileTime==Dynamic
+ || OtherDerived::RowsAtCompileTime==Dynamic
+ || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
+ AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwiseProduct(v2)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+
+ return typename LazyProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+#endif // EIGEN_PRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Core/ProductBase.h b/extern/Eigen3/Eigen/src/Core/ProductBase.h
new file mode 100644
index 00000000000..3bd3487d6a2
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/ProductBase.h
@@ -0,0 +1,288 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PRODUCTBASE_H
+#define EIGEN_PRODUCTBASE_H
+
+/** \class ProductBase
+ * \ingroup Core_Module
+ *
+ */
+
+namespace internal {
+template<typename Derived, typename _Lhs, typename _Rhs>
+struct traits<ProductBase<Derived,_Lhs,_Rhs> >
+{
+ typedef MatrixXpr XprKind;
+ typedef typename remove_all<_Lhs>::type Lhs;
+ typedef typename remove_all<_Rhs>::type Rhs;
+ typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+ typedef typename promote_storage_type<typename traits<Lhs>::StorageKind,
+ typename traits<Rhs>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ enum {
+ RowsAtCompileTime = traits<Lhs>::RowsAtCompileTime,
+ ColsAtCompileTime = traits<Rhs>::ColsAtCompileTime,
+ MaxRowsAtCompileTime = traits<Lhs>::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = traits<Rhs>::MaxColsAtCompileTime,
+ Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0)
+ | EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit,
+ // Note that EvalBeforeNestingBit and NestByRefBit
+ // are not used in practice because nested is overloaded for products
+ CoeffReadCost = 0 // FIXME why is it needed ?
+ };
+};
+}
+
+#define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived) \
+ typedef ProductBase<Derived, Lhs, Rhs > Base; \
+ EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
+ typedef typename Base::LhsNested LhsNested; \
+ typedef typename Base::_LhsNested _LhsNested; \
+ typedef typename Base::LhsBlasTraits LhsBlasTraits; \
+ typedef typename Base::ActualLhsType ActualLhsType; \
+ typedef typename Base::_ActualLhsType _ActualLhsType; \
+ typedef typename Base::RhsNested RhsNested; \
+ typedef typename Base::_RhsNested _RhsNested; \
+ typedef typename Base::RhsBlasTraits RhsBlasTraits; \
+ typedef typename Base::ActualRhsType ActualRhsType; \
+ typedef typename Base::_ActualRhsType _ActualRhsType; \
+ using Base::m_lhs; \
+ using Base::m_rhs;
+
+template<typename Derived, typename Lhs, typename Rhs>
+class ProductBase : public MatrixBase<Derived>
+{
+ public:
+ typedef MatrixBase<Derived> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(ProductBase)
+
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+ typedef internal::blas_traits<_LhsNested> LhsBlasTraits;
+ typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
+ typedef typename internal::remove_all<ActualLhsType>::type _ActualLhsType;
+ typedef typename internal::traits<Lhs>::Scalar LhsScalar;
+
+ typedef typename Rhs::Nested RhsNested;
+ typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+ typedef internal::blas_traits<_RhsNested> RhsBlasTraits;
+ typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
+ typedef typename internal::remove_all<ActualRhsType>::type _ActualRhsType;
+ typedef typename internal::traits<Rhs>::Scalar RhsScalar;
+
+ // Diagonal of a product: no need to evaluate the arguments because they are going to be evaluated only once
+ typedef CoeffBasedProduct<LhsNested, RhsNested, 0> FullyLazyCoeffBaseProductType;
+
+ public:
+
+ typedef typename Base::PlainObject PlainObject;
+
+ ProductBase(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ eigen_assert(lhs.cols() == rhs.rows()
+ && "invalid matrix product"
+ && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
+ }
+
+ inline Index rows() const { return m_lhs.rows(); }
+ inline Index cols() const { return m_rhs.cols(); }
+
+ template<typename Dest>
+ inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); }
+
+ template<typename Dest>
+ inline void addTo(Dest& dst) const { scaleAndAddTo(dst,1); }
+
+ template<typename Dest>
+ inline void subTo(Dest& dst) const { scaleAndAddTo(dst,-1); }
+
+ template<typename Dest>
+ inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { derived().scaleAndAddTo(dst,alpha); }
+
+ const _LhsNested& lhs() const { return m_lhs; }
+ const _RhsNested& rhs() const { return m_rhs; }
+
+ // Implicit conversion to the nested type (trigger the evaluation of the product)
+ operator const PlainObject& () const
+ {
+ m_result.resize(m_lhs.rows(), m_rhs.cols());
+ derived().evalTo(m_result);
+ return m_result;
+ }
+
+ const Diagonal<const FullyLazyCoeffBaseProductType,0> diagonal() const
+ { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
+
+ template<int Index>
+ const Diagonal<FullyLazyCoeffBaseProductType,Index> diagonal() const
+ { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
+
+ const Diagonal<FullyLazyCoeffBaseProductType,Dynamic> diagonal(Index index) const
+ { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); }
+
+ // restrict coeff accessors to 1x1 expressions. No need to care about mutators here since this isnt a Lvalue expression
+ typename Base::CoeffReturnType coeff(Index row, Index col) const
+ {
+#ifdef EIGEN2_SUPPORT
+ return lhs().row(row).cwiseProduct(rhs().col(col).transpose()).sum();
+#else
+ EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+ eigen_assert(this->rows() == 1 && this->cols() == 1);
+ return derived().coeff(row,col);
+#endif
+ }
+
+ typename Base::CoeffReturnType coeff(Index i) const
+ {
+ EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+ eigen_assert(this->rows() == 1 && this->cols() == 1);
+ return derived().coeff(i);
+ }
+
+ const Scalar& coeffRef(Index row, Index col) const
+ {
+ EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+ eigen_assert(this->rows() == 1 && this->cols() == 1);
+ return derived().coeffRef(row,col);
+ }
+
+ const Scalar& coeffRef(Index i) const
+ {
+ EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+ eigen_assert(this->rows() == 1 && this->cols() == 1);
+ return derived().coeffRef(i);
+ }
+
+ protected:
+
+ const LhsNested m_lhs;
+ const RhsNested m_rhs;
+
+ mutable PlainObject m_result;
+};
+
+// here we need to overload the nested rule for products
+// such that the nested type is a const reference to a plain matrix
+namespace internal {
+template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
+struct nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
+{
+ typedef PlainObject const& type;
+};
+}
+
+template<typename NestedProduct>
+class ScaledProduct;
+
+// Note that these two operator* functions are not defined as member
+// functions of ProductBase, because, otherwise we would have to
+// define all overloads defined in MatrixBase. Furthermore, Using
+// "using Base::operator*" would not work with MSVC.
+//
+// Also note that here we accept any compatible scalar types
+template<typename Derived,typename Lhs,typename Rhs>
+const ScaledProduct<Derived>
+operator*(const ProductBase<Derived,Lhs,Rhs>& prod, typename Derived::Scalar x)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+template<typename Derived,typename Lhs,typename Rhs>
+typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
+ const ScaledProduct<Derived> >::type
+operator*(const ProductBase<Derived,Lhs,Rhs>& prod, typename Derived::RealScalar x)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+
+template<typename Derived,typename Lhs,typename Rhs>
+const ScaledProduct<Derived>
+operator*(typename Derived::Scalar x,const ProductBase<Derived,Lhs,Rhs>& prod)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+template<typename Derived,typename Lhs,typename Rhs>
+typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
+ const ScaledProduct<Derived> >::type
+operator*(typename Derived::RealScalar x,const ProductBase<Derived,Lhs,Rhs>& prod)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+namespace internal {
+template<typename NestedProduct>
+struct traits<ScaledProduct<NestedProduct> >
+ : traits<ProductBase<ScaledProduct<NestedProduct>,
+ typename NestedProduct::_LhsNested,
+ typename NestedProduct::_RhsNested> >
+{
+ typedef typename traits<NestedProduct>::StorageKind StorageKind;
+};
+}
+
+template<typename NestedProduct>
+class ScaledProduct
+ : public ProductBase<ScaledProduct<NestedProduct>,
+ typename NestedProduct::_LhsNested,
+ typename NestedProduct::_RhsNested>
+{
+ public:
+ typedef ProductBase<ScaledProduct<NestedProduct>,
+ typename NestedProduct::_LhsNested,
+ typename NestedProduct::_RhsNested> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::PlainObject PlainObject;
+// EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct)
+
+ ScaledProduct(const NestedProduct& prod, Scalar x)
+ : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {}
+
+ template<typename Dest>
+ inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,m_alpha); }
+
+ template<typename Dest>
+ inline void addTo(Dest& dst) const { scaleAndAddTo(dst,m_alpha); }
+
+ template<typename Dest>
+ inline void subTo(Dest& dst) const { scaleAndAddTo(dst,-m_alpha); }
+
+ template<typename Dest>
+ inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { m_prod.derived().scaleAndAddTo(dst,alpha); }
+
+ const Scalar& alpha() const { return m_alpha; }
+
+ protected:
+ const NestedProduct& m_prod;
+ Scalar m_alpha;
+};
+
+/** \internal
+ * Overloaded to perform an efficient C = (A*B).lazy() */
+template<typename Derived>
+template<typename ProductDerived, typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+{
+ other.derived().evalTo(derived());
+ return derived();
+}
+
+
+#endif // EIGEN_PRODUCTBASE_H
diff --git a/extern/Eigen2/Eigen/src/Array/Random.h b/extern/Eigen3/Eigen/src/Core/Random.h
index 9185fe4a7d3..b7d90103a5b 100644
--- a/extern/Eigen2/Eigen/src/Array/Random.h
+++ b/extern/Eigen3/Eigen/src/Core/Random.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -25,42 +25,46 @@
#ifndef EIGEN_RANDOM_H
#define EIGEN_RANDOM_H
-template<typename Scalar> struct ei_scalar_random_op EIGEN_EMPTY_STRUCT {
- inline ei_scalar_random_op(void) {}
- inline const Scalar operator() (int, int) const { return ei_random<Scalar>(); }
+namespace internal {
+
+template<typename Scalar> struct scalar_random_op {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op)
+ template<typename Index>
+ inline const Scalar operator() (Index, Index = 0) const { return random<Scalar>(); }
};
+
template<typename Scalar>
-struct ei_functor_traits<ei_scalar_random_op<Scalar> >
+struct functor_traits<scalar_random_op<Scalar> >
{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false, IsRepeatable = false }; };
-/** \array_module
- *
- * \returns a random matrix (not an expression, the matrix is immediately evaluated).
+} // end namespace internal
+
+/** \returns a random matrix expression
*
* The parameters \a rows and \a cols are the number of rows and of columns of
* the returned matrix. Must be compatible with this MatrixBase type.
*
* This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
- * it is redundant to pass \a rows and \a cols as arguments, so ei_random() should be used
+ * it is redundant to pass \a rows and \a cols as arguments, so Random() should be used
* instead.
*
- * \addexample RandomExample \label How to create a matrix with random coefficients
- *
* Example: \include MatrixBase_random_int_int.cpp
* Output: \verbinclude MatrixBase_random_int_int.out
*
- * \sa MatrixBase::setRandom(), MatrixBase::Random(int), MatrixBase::Random()
+ * This expression has the "evaluate before nesting" flag so that it will be evaluated into
+ * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
+ * behavior with expressions involving random matrices.
+ *
+ * \sa MatrixBase::setRandom(), MatrixBase::Random(Index), MatrixBase::Random()
*/
template<typename Derived>
-inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
-MatrixBase<Derived>::Random(int rows, int cols)
+inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
+DenseBase<Derived>::Random(Index rows, Index cols)
{
- return NullaryExpr(rows, cols, ei_scalar_random_op<Scalar>());
+ return NullaryExpr(rows, cols, internal::scalar_random_op<Scalar>());
}
-/** \array_module
- *
- * \returns a random vector (not an expression, the vector is immediately evaluated).
+/** \returns a random vector expression
*
* The parameter \a size is the size of the returned vector.
* Must be compatible with this MatrixBase type.
@@ -68,25 +72,26 @@ MatrixBase<Derived>::Random(int rows, int cols)
* \only_for_vectors
*
* This variant is meant to be used for dynamic-size vector types. For fixed-size types,
- * it is redundant to pass \a size as argument, so ei_random() should be used
+ * it is redundant to pass \a size as argument, so Random() should be used
* instead.
*
* Example: \include MatrixBase_random_int.cpp
* Output: \verbinclude MatrixBase_random_int.out
*
- * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random()
+ * This expression has the "evaluate before nesting" flag so that it will be evaluated into
+ * a temporary vector whenever it is nested in a larger expression. This prevents unexpected
+ * behavior with expressions involving random matrices.
+ *
+ * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random()
*/
template<typename Derived>
-inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
-MatrixBase<Derived>::Random(int size)
+inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
+DenseBase<Derived>::Random(Index size)
{
- return NullaryExpr(size, ei_scalar_random_op<Scalar>());
+ return NullaryExpr(size, internal::scalar_random_op<Scalar>());
}
-/** \array_module
- *
- * \returns a fixed-size random matrix or vector
- * (not an expression, the matrix is immediately evaluated).
+/** \returns a fixed-size random matrix or vector expression
*
* This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
* need to use the variants taking size arguments.
@@ -94,26 +99,28 @@ MatrixBase<Derived>::Random(int size)
* Example: \include MatrixBase_random.cpp
* Output: \verbinclude MatrixBase_random.out
*
- * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random(int)
+ * This expression has the "evaluate before nesting" flag so that it will be evaluated into
+ * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
+ * behavior with expressions involving random matrices.
+ *
+ * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random(Index)
*/
template<typename Derived>
-inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
-MatrixBase<Derived>::Random()
+inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
+DenseBase<Derived>::Random()
{
- return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_random_op<Scalar>());
+ return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op<Scalar>());
}
-/** \array_module
- *
- * Sets all coefficients in this expression to random values.
+/** Sets all coefficients in this expression to random values.
*
* Example: \include MatrixBase_setRandom.cpp
* Output: \verbinclude MatrixBase_setRandom.out
*
- * \sa class CwiseNullaryOp, setRandom(int), setRandom(int,int)
+ * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
*/
template<typename Derived>
-inline Derived& MatrixBase<Derived>::setRandom()
+inline Derived& DenseBase<Derived>::setRandom()
{
return *this = Random(rows(), cols());
}
@@ -125,11 +132,11 @@ inline Derived& MatrixBase<Derived>::setRandom()
* Example: \include Matrix_setRandom_int.cpp
* Output: \verbinclude Matrix_setRandom_int.out
*
- * \sa MatrixBase::setRandom(), setRandom(int,int), class CwiseNullaryOp, MatrixBase::Random()
+ * \sa MatrixBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, MatrixBase::Random()
*/
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>&
-Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setRandom(int size)
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setRandom(Index size)
{
resize(size);
return setRandom();
@@ -143,11 +150,11 @@ Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setRandom(int size)
* Example: \include Matrix_setRandom_int_int.cpp
* Output: \verbinclude Matrix_setRandom_int_int.out
*
- * \sa MatrixBase::setRandom(), setRandom(int), class CwiseNullaryOp, MatrixBase::Random()
+ * \sa MatrixBase::setRandom(), setRandom(Index), class CwiseNullaryOp, MatrixBase::Random()
*/
-template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
-EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>&
-Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setRandom(int rows, int cols)
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setRandom(Index rows, Index cols)
{
resize(rows, cols);
return setRandom();
diff --git a/extern/Eigen3/Eigen/src/Core/Redux.h b/extern/Eigen3/Eigen/src/Core/Redux.h
new file mode 100644
index 00000000000..f9f5a95d546
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Redux.h
@@ -0,0 +1,404 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_REDUX_H
+#define EIGEN_REDUX_H
+
+namespace internal {
+
+// TODO
+// * implement other kind of vectorization
+// * factorize code
+
+/***************************************************************************
+* Part 1 : the logic deciding a strategy for vectorization and unrolling
+***************************************************************************/
+
+template<typename Func, typename Derived>
+struct redux_traits
+{
+public:
+ enum {
+ PacketSize = packet_traits<typename Derived::Scalar>::size,
+ InnerMaxSize = int(Derived::IsRowMajor)
+ ? Derived::MaxColsAtCompileTime
+ : Derived::MaxRowsAtCompileTime
+ };
+
+ enum {
+ MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit)
+ && (functor_traits<Func>::PacketAccess),
+ MayLinearVectorize = MightVectorize && (int(Derived::Flags)&LinearAccessBit),
+ MaySliceVectorize = MightVectorize && int(InnerMaxSize)>=3*PacketSize
+ };
+
+public:
+ enum {
+ Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
+ : int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
+ : int(DefaultTraversal)
+ };
+
+public:
+ enum {
+ Cost = ( Derived::SizeAtCompileTime == Dynamic
+ || Derived::CoeffReadCost == Dynamic
+ || (Derived::SizeAtCompileTime!=1 && functor_traits<Func>::Cost == Dynamic)
+ ) ? Dynamic
+ : Derived::SizeAtCompileTime * Derived::CoeffReadCost
+ + (Derived::SizeAtCompileTime-1) * functor_traits<Func>::Cost,
+ UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize))
+ };
+
+public:
+ enum {
+ Unrolling = Cost != Dynamic && Cost <= UnrollingLimit
+ ? CompleteUnrolling
+ : NoUnrolling
+ };
+};
+
+/***************************************************************************
+* Part 2 : unrollers
+***************************************************************************/
+
+/*** no vectorization ***/
+
+template<typename Func, typename Derived, int Start, int Length>
+struct redux_novec_unroller
+{
+ enum {
+ HalfLength = Length/2
+ };
+
+ typedef typename Derived::Scalar Scalar;
+
+ EIGEN_STRONG_INLINE static Scalar run(const Derived &mat, const Func& func)
+ {
+ return func(redux_novec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
+ redux_novec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func));
+ }
+};
+
+template<typename Func, typename Derived, int Start>
+struct redux_novec_unroller<Func, Derived, Start, 1>
+{
+ enum {
+ outer = Start / Derived::InnerSizeAtCompileTime,
+ inner = Start % Derived::InnerSizeAtCompileTime
+ };
+
+ typedef typename Derived::Scalar Scalar;
+
+ EIGEN_STRONG_INLINE static Scalar run(const Derived &mat, const Func&)
+ {
+ return mat.coeffByOuterInner(outer, inner);
+ }
+};
+
+// This is actually dead code and will never be called. It is required
+// to prevent false warnings regarding failed inlining though
+// for 0 length run() will never be called at all.
+template<typename Func, typename Derived, int Start>
+struct redux_novec_unroller<Func, Derived, Start, 0>
+{
+ typedef typename Derived::Scalar Scalar;
+ EIGEN_STRONG_INLINE static Scalar run(const Derived&, const Func&) { return Scalar(); }
+};
+
+/*** vectorization ***/
+
+template<typename Func, typename Derived, int Start, int Length>
+struct redux_vec_unroller
+{
+ enum {
+ PacketSize = packet_traits<typename Derived::Scalar>::size,
+ HalfLength = Length/2
+ };
+
+ typedef typename Derived::Scalar Scalar;
+ typedef typename packet_traits<Scalar>::type PacketScalar;
+
+ EIGEN_STRONG_INLINE static PacketScalar run(const Derived &mat, const Func& func)
+ {
+ return func.packetOp(
+ redux_vec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
+ redux_vec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func) );
+ }
+};
+
+template<typename Func, typename Derived, int Start>
+struct redux_vec_unroller<Func, Derived, Start, 1>
+{
+ enum {
+ index = Start * packet_traits<typename Derived::Scalar>::size,
+ outer = index / int(Derived::InnerSizeAtCompileTime),
+ inner = index % int(Derived::InnerSizeAtCompileTime),
+ alignment = (Derived::Flags & AlignedBit) ? Aligned : Unaligned
+ };
+
+ typedef typename Derived::Scalar Scalar;
+ typedef typename packet_traits<Scalar>::type PacketScalar;
+
+ EIGEN_STRONG_INLINE static PacketScalar run(const Derived &mat, const Func&)
+ {
+ return mat.template packetByOuterInner<alignment>(outer, inner);
+ }
+};
+
+/***************************************************************************
+* Part 3 : implementation of all cases
+***************************************************************************/
+
+template<typename Func, typename Derived,
+ int Traversal = redux_traits<Func, Derived>::Traversal,
+ int Unrolling = redux_traits<Func, Derived>::Unrolling
+>
+struct redux_impl;
+
+template<typename Func, typename Derived>
+struct redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>
+{
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+ static EIGEN_STRONG_INLINE Scalar run(const Derived& mat, const Func& func)
+ {
+ eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
+ Scalar res;
+ res = mat.coeffByOuterInner(0, 0);
+ for(Index i = 1; i < mat.innerSize(); ++i)
+ res = func(res, mat.coeffByOuterInner(0, i));
+ for(Index i = 1; i < mat.outerSize(); ++i)
+ for(Index j = 0; j < mat.innerSize(); ++j)
+ res = func(res, mat.coeffByOuterInner(i, j));
+ return res;
+ }
+};
+
+template<typename Func, typename Derived>
+struct redux_impl<Func,Derived, DefaultTraversal, CompleteUnrolling>
+ : public redux_novec_unroller<Func,Derived, 0, Derived::SizeAtCompileTime>
+{};
+
+template<typename Func, typename Derived>
+struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
+{
+ typedef typename Derived::Scalar Scalar;
+ typedef typename packet_traits<Scalar>::type PacketScalar;
+ typedef typename Derived::Index Index;
+
+ static Scalar run(const Derived& mat, const Func& func)
+ {
+ const Index size = mat.size();
+ eigen_assert(size && "you are using an empty matrix");
+ const Index packetSize = packet_traits<Scalar>::size;
+ const Index alignedStart = first_aligned(mat);
+ enum {
+ alignment = bool(Derived::Flags & DirectAccessBit) || bool(Derived::Flags & AlignedBit)
+ ? Aligned : Unaligned
+ };
+ const Index alignedSize = ((size-alignedStart)/packetSize)*packetSize;
+ const Index alignedEnd = alignedStart + alignedSize;
+ Scalar res;
+ if(alignedSize)
+ {
+ PacketScalar packet_res = mat.template packet<alignment>(alignedStart);
+ for(Index index = alignedStart + packetSize; index < alignedEnd; index += packetSize)
+ packet_res = func.packetOp(packet_res, mat.template packet<alignment>(index));
+ res = func.predux(packet_res);
+
+ for(Index index = 0; index < alignedStart; ++index)
+ res = func(res,mat.coeff(index));
+
+ for(Index index = alignedEnd; index < size; ++index)
+ res = func(res,mat.coeff(index));
+ }
+ else // too small to vectorize anything.
+ // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
+ {
+ res = mat.coeff(0);
+ for(Index index = 1; index < size; ++index)
+ res = func(res,mat.coeff(index));
+ }
+
+ return res;
+ }
+};
+
+template<typename Func, typename Derived>
+struct redux_impl<Func, Derived, SliceVectorizedTraversal, NoUnrolling>
+{
+ typedef typename Derived::Scalar Scalar;
+ typedef typename packet_traits<Scalar>::type PacketScalar;
+ typedef typename Derived::Index Index;
+
+ static Scalar run(const Derived& mat, const Func& func)
+ {
+ eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
+ const Index innerSize = mat.innerSize();
+ const Index outerSize = mat.outerSize();
+ enum {
+ packetSize = packet_traits<Scalar>::size
+ };
+ const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize;
+ Scalar res;
+ if(packetedInnerSize)
+ {
+ PacketScalar packet_res = mat.template packet<Unaligned>(0,0);
+ for(Index j=0; j<outerSize; ++j)
+ for(Index i=(j==0?packetSize:0); i<packetedInnerSize; i+=Index(packetSize))
+ packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned>(j,i));
+
+ res = func.predux(packet_res);
+ for(Index j=0; j<outerSize; ++j)
+ for(Index i=packetedInnerSize; i<innerSize; ++i)
+ res = func(res, mat.coeffByOuterInner(j,i));
+ }
+ else // too small to vectorize anything.
+ // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
+ {
+ res = redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>::run(mat, func);
+ }
+
+ return res;
+ }
+};
+
+template<typename Func, typename Derived>
+struct redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling>
+{
+ typedef typename Derived::Scalar Scalar;
+ typedef typename packet_traits<Scalar>::type PacketScalar;
+ enum {
+ PacketSize = packet_traits<Scalar>::size,
+ Size = Derived::SizeAtCompileTime,
+ VectorizedSize = (Size / PacketSize) * PacketSize
+ };
+ EIGEN_STRONG_INLINE static Scalar run(const Derived& mat, const Func& func)
+ {
+ eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
+ Scalar res = func.predux(redux_vec_unroller<Func, Derived, 0, Size / PacketSize>::run(mat,func));
+ if (VectorizedSize != Size)
+ res = func(res,redux_novec_unroller<Func, Derived, VectorizedSize, Size-VectorizedSize>::run(mat,func));
+ return res;
+ }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Part 4 : public API
+***************************************************************************/
+
+
+/** \returns the result of a full redux operation on the whole matrix or vector using \a func
+ *
+ * The template parameter \a BinaryOp is the type of the functor \a func which must be
+ * an associative operator. Both current STL and TR1 functor styles are handled.
+ *
+ * \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
+ */
+template<typename Derived>
+template<typename Func>
+EIGEN_STRONG_INLINE typename internal::result_of<Func(typename internal::traits<Derived>::Scalar)>::type
+DenseBase<Derived>::redux(const Func& func) const
+{
+ typedef typename internal::remove_all<typename Derived::Nested>::type ThisNested;
+ return internal::redux_impl<Func, ThisNested>
+ ::run(derived(), func);
+}
+
+/** \returns the minimum of all coefficients of *this
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::minCoeff() const
+{
+ return this->redux(Eigen::internal::scalar_min_op<Scalar>());
+}
+
+/** \returns the maximum of all coefficients of *this
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::maxCoeff() const
+{
+ return this->redux(Eigen::internal::scalar_max_op<Scalar>());
+}
+
+/** \returns the sum of all coefficients of *this
+ *
+ * \sa trace(), prod(), mean()
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::sum() const
+{
+ if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
+ return Scalar(0);
+ return this->redux(Eigen::internal::scalar_sum_op<Scalar>());
+}
+
+/** \returns the mean of all coefficients of *this
+*
+* \sa trace(), prod(), sum()
+*/
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::mean() const
+{
+ return Scalar(this->redux(Eigen::internal::scalar_sum_op<Scalar>())) / Scalar(this->size());
+}
+
+/** \returns the product of all coefficients of *this
+ *
+ * Example: \include MatrixBase_prod.cpp
+ * Output: \verbinclude MatrixBase_prod.out
+ *
+ * \sa sum(), mean(), trace()
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::prod() const
+{
+ if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
+ return Scalar(1);
+ return this->redux(Eigen::internal::scalar_product_op<Scalar>());
+}
+
+/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal.
+ *
+ * \c *this can be any matrix, not necessarily square.
+ *
+ * \sa diagonal(), sum()
+ */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+MatrixBase<Derived>::trace() const
+{
+ return derived().diagonal().sum();
+}
+
+#endif // EIGEN_REDUX_H
diff --git a/extern/Eigen3/Eigen/src/Core/Replicate.h b/extern/Eigen3/Eigen/src/Core/Replicate.h
new file mode 100644
index 00000000000..d2f9712db0f
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Replicate.h
@@ -0,0 +1,179 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_REPLICATE_H
+#define EIGEN_REPLICATE_H
+
+/**
+ * \class Replicate
+ * \ingroup Core_Module
+ *
+ * \brief Expression of the multiple replication of a matrix or vector
+ *
+ * \param MatrixType the type of the object we are replicating
+ *
+ * This class represents an expression of the multiple replication of a matrix or vector.
+ * It is the return type of DenseBase::replicate() and most of the time
+ * this is the only way it is used.
+ *
+ * \sa DenseBase::replicate()
+ */
+
+namespace internal {
+template<typename MatrixType,int RowFactor,int ColFactor>
+struct traits<Replicate<MatrixType,RowFactor,ColFactor> >
+ : traits<MatrixType>
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
+ typedef typename traits<MatrixType>::XprKind XprKind;
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+ enum {
+ RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic
+ ? Dynamic
+ : RowFactor * MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic
+ ? Dynamic
+ : ColFactor * MatrixType::ColsAtCompileTime,
+ //FIXME we don't propagate the max sizes !!!
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1
+ : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0
+ : (MatrixType::Flags & RowMajorBit) ? 1 : 0,
+ Flags = (_MatrixTypeNested::Flags & HereditaryBits & ~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0),
+ CoeffReadCost = _MatrixTypeNested::CoeffReadCost
+ };
+};
+}
+
+template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
+ : public internal::dense_xpr_base< Replicate<MatrixType,RowFactor,ColFactor> >::type
+{
+ public:
+
+ typedef typename internal::dense_xpr_base<Replicate>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
+
+ template<typename OriginalMatrixType>
+ inline explicit Replicate(const OriginalMatrixType& matrix)
+ : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)
+ {
+ EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
+ THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
+ eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic);
+ }
+
+ template<typename OriginalMatrixType>
+ inline Replicate(const OriginalMatrixType& matrix, int rowFactor, int colFactor)
+ : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)
+ {
+ EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
+ THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
+ }
+
+ inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
+ inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ // try to avoid using modulo; this is a pure optimization strategy
+ const Index actual_row = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
+ : RowFactor==1 ? row
+ : row%m_matrix.rows();
+ const Index actual_col = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
+ : ColFactor==1 ? col
+ : col%m_matrix.cols();
+
+ return m_matrix.coeff(actual_row, actual_col);
+ }
+ template<int LoadMode>
+ inline PacketScalar packet(Index row, Index col) const
+ {
+ const Index actual_row = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
+ : RowFactor==1 ? row
+ : row%m_matrix.rows();
+ const Index actual_col = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
+ : ColFactor==1 ? col
+ : col%m_matrix.cols();
+
+ return m_matrix.template packet<LoadMode>(actual_row, actual_col);
+ }
+
+
+ protected:
+ const typename MatrixType::Nested m_matrix;
+ const internal::variable_if_dynamic<Index, RowFactor> m_rowFactor;
+ const internal::variable_if_dynamic<Index, ColFactor> m_colFactor;
+};
+
+/**
+ * \return an expression of the replication of \c *this
+ *
+ * Example: \include MatrixBase_replicate.cpp
+ * Output: \verbinclude MatrixBase_replicate.out
+ *
+ * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate
+ */
+template<typename Derived>
+template<int RowFactor, int ColFactor>
+inline const Replicate<Derived,RowFactor,ColFactor>
+DenseBase<Derived>::replicate() const
+{
+ return Replicate<Derived,RowFactor,ColFactor>(derived());
+}
+
+/**
+ * \return an expression of the replication of \c *this
+ *
+ * Example: \include MatrixBase_replicate_int_int.cpp
+ * Output: \verbinclude MatrixBase_replicate_int_int.out
+ *
+ * \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
+ */
+template<typename Derived>
+inline const Replicate<Derived,Dynamic,Dynamic>
+DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const
+{
+ return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor);
+}
+
+/**
+ * \return an expression of the replication of each column (or row) of \c *this
+ *
+ * Example: \include DirectionWise_replicate_int.cpp
+ * Output: \verbinclude DirectionWise_replicate_int.out
+ *
+ * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
+ */
+template<typename ExpressionType, int Direction>
+const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
+VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const
+{
+ return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
+ (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
+}
+
+#endif // EIGEN_REPLICATE_H
diff --git a/extern/Eigen3/Eigen/src/Core/ReturnByValue.h b/extern/Eigen3/Eigen/src/Core/ReturnByValue.h
new file mode 100644
index 00000000000..24c5a4e215d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/ReturnByValue.h
@@ -0,0 +1,99 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_RETURNBYVALUE_H
+#define EIGEN_RETURNBYVALUE_H
+
+/** \class ReturnByValue
+ * \ingroup Core_Module
+ *
+ */
+
+namespace internal {
+
+template<typename Derived>
+struct traits<ReturnByValue<Derived> >
+ : public traits<typename traits<Derived>::ReturnType>
+{
+ enum {
+ // We're disabling the DirectAccess because e.g. the constructor of
+ // the Block-with-DirectAccess expression requires to have a coeffRef method.
+ // Also, we don't want to have to implement the stride stuff.
+ Flags = (traits<typename traits<Derived>::ReturnType>::Flags
+ | EvalBeforeNestingBit) & ~DirectAccessBit
+ };
+};
+
+/* The ReturnByValue object doesn't even have a coeff() method.
+ * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix.
+ * So internal::nested always gives the plain return matrix type.
+ *
+ * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ??
+ */
+template<typename Derived,int n,typename PlainObject>
+struct nested<ReturnByValue<Derived>, n, PlainObject>
+{
+ typedef typename traits<Derived>::ReturnType type;
+};
+
+} // end namespace internal
+
+template<typename Derived> class ReturnByValue
+ : public internal::dense_xpr_base< ReturnByValue<Derived> >::type
+{
+ public:
+ typedef typename internal::traits<Derived>::ReturnType ReturnType;
+
+ typedef typename internal::dense_xpr_base<ReturnByValue>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue)
+
+ template<typename Dest>
+ inline void evalTo(Dest& dst) const
+ { static_cast<const Derived*>(this)->evalTo(dst); }
+ inline Index rows() const { return static_cast<const Derived*>(this)->rows(); }
+ inline Index cols() const { return static_cast<const Derived*>(this)->cols(); }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT
+ class Unusable{
+ Unusable(const Unusable&) {}
+ Unusable& operator=(const Unusable&) {return *this;}
+ };
+ const Unusable& coeff(Index) const { return *reinterpret_cast<const Unusable*>(this); }
+ const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
+ Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
+ Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
+#endif
+};
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
+{
+ other.evalTo(derived());
+ return derived();
+}
+
+#endif // EIGEN_RETURNBYVALUE_H
diff --git a/extern/Eigen3/Eigen/src/Core/Reverse.h b/extern/Eigen3/Eigen/src/Core/Reverse.h
new file mode 100644
index 00000000000..600744ae758
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Reverse.h
@@ -0,0 +1,230 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Ricard Marxer <email@ricardmarxer.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_REVERSE_H
+#define EIGEN_REVERSE_H
+
+/** \class Reverse
+ * \ingroup Core_Module
+ *
+ * \brief Expression of the reverse of a vector or matrix
+ *
+ * \param MatrixType the type of the object of which we are taking the reverse
+ *
+ * This class represents an expression of the reverse of a vector.
+ * It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse()
+ * and most of the time this is the only way it is used.
+ *
+ * \sa MatrixBase::reverse(), VectorwiseOp::reverse()
+ */
+
+namespace internal {
+
+template<typename MatrixType, int Direction>
+struct traits<Reverse<MatrixType, Direction> >
+ : traits<MatrixType>
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
+ typedef typename traits<MatrixType>::XprKind XprKind;
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+
+ // let's enable LinearAccess only with vectorization because of the product overhead
+ LinearAccess = ( (Direction==BothDirections) && (int(_MatrixTypeNested::Flags)&PacketAccessBit) )
+ ? LinearAccessBit : 0,
+
+ Flags = int(_MatrixTypeNested::Flags) & (HereditaryBits | LvalueBit | PacketAccessBit | LinearAccess),
+
+ CoeffReadCost = _MatrixTypeNested::CoeffReadCost
+ };
+};
+
+template<typename PacketScalar, bool ReversePacket> struct reverse_packet_cond
+{
+ static inline PacketScalar run(const PacketScalar& x) { return preverse(x); }
+};
+
+template<typename PacketScalar> struct reverse_packet_cond<PacketScalar,false>
+{
+ static inline PacketScalar run(const PacketScalar& x) { return x; }
+};
+
+} // end namespace internal
+
+template<typename MatrixType, int Direction> class Reverse
+ : public internal::dense_xpr_base< Reverse<MatrixType, Direction> >::type
+{
+ public:
+
+ typedef typename internal::dense_xpr_base<Reverse>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Reverse)
+ using Base::IsRowMajor;
+
+ // next line is necessary because otherwise const version of operator()
+ // is hidden by non-const version defined in this file
+ using Base::operator();
+
+ protected:
+ enum {
+ PacketSize = internal::packet_traits<Scalar>::size,
+ IsColMajor = !IsRowMajor,
+ ReverseRow = (Direction == Vertical) || (Direction == BothDirections),
+ ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
+ OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
+ OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1,
+ ReversePacket = (Direction == BothDirections)
+ || ((Direction == Vertical) && IsColMajor)
+ || ((Direction == Horizontal) && IsRowMajor)
+ };
+ typedef internal::reverse_packet_cond<PacketScalar,ReversePacket> reverse_packet;
+ public:
+
+ inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { }
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse)
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ inline Index innerStride() const
+ {
+ return -m_matrix.innerStride();
+ }
+
+ inline Scalar& operator()(Index row, Index col)
+ {
+ eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
+ return coeffRef(row, col);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ return m_matrix.const_cast_derived().coeffRef(ReverseRow ? m_matrix.rows() - row - 1 : row,
+ ReverseCol ? m_matrix.cols() - col - 1 : col);
+ }
+
+ inline CoeffReturnType coeff(Index row, Index col) const
+ {
+ return m_matrix.coeff(ReverseRow ? m_matrix.rows() - row - 1 : row,
+ ReverseCol ? m_matrix.cols() - col - 1 : col);
+ }
+
+ inline CoeffReturnType coeff(Index index) const
+ {
+ return m_matrix.coeff(m_matrix.size() - index - 1);
+ }
+
+ inline Scalar& coeffRef(Index index)
+ {
+ return m_matrix.const_cast_derived().coeffRef(m_matrix.size() - index - 1);
+ }
+
+ inline Scalar& operator()(Index index)
+ {
+ eigen_assert(index >= 0 && index < m_matrix.size());
+ return coeffRef(index);
+ }
+
+ template<int LoadMode>
+ inline const PacketScalar packet(Index row, Index col) const
+ {
+ return reverse_packet::run(m_matrix.template packet<LoadMode>(
+ ReverseRow ? m_matrix.rows() - row - OffsetRow : row,
+ ReverseCol ? m_matrix.cols() - col - OffsetCol : col));
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
+ {
+ m_matrix.const_cast_derived().template writePacket<LoadMode>(
+ ReverseRow ? m_matrix.rows() - row - OffsetRow : row,
+ ReverseCol ? m_matrix.cols() - col - OffsetCol : col,
+ reverse_packet::run(x));
+ }
+
+ template<int LoadMode>
+ inline const PacketScalar packet(Index index) const
+ {
+ return internal::preverse(m_matrix.template packet<LoadMode>( m_matrix.size() - index - PacketSize ));
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index index, const PacketScalar& x)
+ {
+ m_matrix.const_cast_derived().template writePacket<LoadMode>(m_matrix.size() - index - PacketSize, internal::preverse(x));
+ }
+
+ protected:
+ const typename MatrixType::Nested m_matrix;
+};
+
+/** \returns an expression of the reverse of *this.
+ *
+ * Example: \include MatrixBase_reverse.cpp
+ * Output: \verbinclude MatrixBase_reverse.out
+ *
+ */
+template<typename Derived>
+inline typename DenseBase<Derived>::ReverseReturnType
+DenseBase<Derived>::reverse()
+{
+ return derived();
+}
+
+/** This is the const version of reverse(). */
+template<typename Derived>
+inline const typename DenseBase<Derived>::ConstReverseReturnType
+DenseBase<Derived>::reverse() const
+{
+ return derived();
+}
+
+/** This is the "in place" version of reverse: it reverses \c *this.
+ *
+ * In most cases it is probably better to simply use the reversed expression
+ * of a matrix. However, when reversing the matrix data itself is really needed,
+ * then this "in-place" version is probably the right choice because it provides
+ * the following additional features:
+ * - less error prone: doing the same operation with .reverse() requires special care:
+ * \code m = m.reverse().eval(); \endcode
+ * - this API allows to avoid creating a temporary (the current implementation creates a temporary, but that could be avoided using swap)
+ * - it allows future optimizations (cache friendliness, etc.)
+ *
+ * \sa reverse() */
+template<typename Derived>
+inline void DenseBase<Derived>::reverseInPlace()
+{
+ derived() = derived().reverse().eval();
+}
+
+
+#endif // EIGEN_REVERSE_H
diff --git a/extern/Eigen2/Eigen/src/Array/Select.h b/extern/Eigen3/Eigen/src/Core/Select.h
index 9dc3fb1b27a..d0cd66a261a 100644
--- a/extern/Eigen2/Eigen/src/Array/Select.h
+++ b/extern/Eigen3/Eigen/src/Core/Select.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -25,9 +25,8 @@
#ifndef EIGEN_SELECT_H
#define EIGEN_SELECT_H
-/** \array_module \ingroup Array
- *
- * \class Select
+/** \class Select
+ * \ingroup Core_Module
*
* \brief Expression of a coefficient wise version of the C++ ternary operator ?:
*
@@ -36,15 +35,19 @@
* \param ElseMatrixType the type of the \em else expression
*
* This class represents an expression of a coefficient wise version of the C++ ternary operator ?:.
- * It is the return type of MatrixBase::select() and most of the time this is the only way it is used.
+ * It is the return type of DenseBase::select() and most of the time this is the only way it is used.
*
- * \sa MatrixBase::select(const MatrixBase<ThenDerived>&, const MatrixBase<ElseDerived>&) const
+ * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const
*/
+namespace internal {
template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
-struct ei_traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
+struct traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
+ : traits<ThenMatrixType>
{
- typedef typename ei_traits<ThenMatrixType>::Scalar Scalar;
+ typedef typename traits<ThenMatrixType>::Scalar Scalar;
+ typedef Dense StorageKind;
+ typedef typename traits<ThenMatrixType>::XprKind XprKind;
typedef typename ConditionMatrixType::Nested ConditionMatrixNested;
typedef typename ThenMatrixType::Nested ThenMatrixNested;
typedef typename ElseMatrixType::Nested ElseMatrixNested;
@@ -54,41 +57,43 @@ struct ei_traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime,
Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits,
- CoeffReadCost = ei_traits<typename ei_cleantype<ConditionMatrixNested>::type>::CoeffReadCost
- + EIGEN_ENUM_MAX(ei_traits<typename ei_cleantype<ThenMatrixNested>::type>::CoeffReadCost,
- ei_traits<typename ei_cleantype<ElseMatrixNested>::type>::CoeffReadCost)
+ CoeffReadCost = traits<typename remove_all<ConditionMatrixNested>::type>::CoeffReadCost
+ + EIGEN_SIZE_MAX(traits<typename remove_all<ThenMatrixNested>::type>::CoeffReadCost,
+ traits<typename remove_all<ElseMatrixNested>::type>::CoeffReadCost)
};
};
+}
template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
-class Select : ei_no_assignment_operator,
- public MatrixBase<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
+class Select : internal::no_assignment_operator,
+ public internal::dense_xpr_base< Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::type
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(Select)
+ typedef typename internal::dense_xpr_base<Select>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Select)
Select(const ConditionMatrixType& conditionMatrix,
const ThenMatrixType& thenMatrix,
const ElseMatrixType& elseMatrix)
: m_condition(conditionMatrix), m_then(thenMatrix), m_else(elseMatrix)
{
- ei_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());
- ei_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
+ eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());
+ eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
}
- int rows() const { return m_condition.rows(); }
- int cols() const { return m_condition.cols(); }
+ Index rows() const { return m_condition.rows(); }
+ Index cols() const { return m_condition.cols(); }
- const Scalar coeff(int i, int j) const
+ const Scalar coeff(Index i, Index j) const
{
if (m_condition.coeff(i,j))
return m_then.coeff(i,j);
else
return m_else.coeff(i,j);
}
-
- const Scalar coeff(int i) const
+
+ const Scalar coeff(Index i) const
{
if (m_condition.coeff(i))
return m_then.coeff(i);
@@ -103,9 +108,7 @@ class Select : ei_no_assignment_operator,
};
-/** \array_module
- *
- * \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j)
+/** \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j)
* if \c *this(i,j), and \a elseMatrix(i,j) otherwise.
*
* Example: \include MatrixBase_select.cpp
@@ -116,43 +119,39 @@ class Select : ei_no_assignment_operator,
template<typename Derived>
template<typename ThenDerived,typename ElseDerived>
inline const Select<Derived,ThenDerived,ElseDerived>
-MatrixBase<Derived>::select(const MatrixBase<ThenDerived>& thenMatrix,
- const MatrixBase<ElseDerived>& elseMatrix) const
+DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
+ const DenseBase<ElseDerived>& elseMatrix) const
{
return Select<Derived,ThenDerived,ElseDerived>(derived(), thenMatrix.derived(), elseMatrix.derived());
}
-/** \array_module
- *
- * Version of MatrixBase::select(const MatrixBase&, const MatrixBase&) with
+/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
* the \em else expression being a scalar value.
*
- * \sa MatrixBase::select(const MatrixBase<ThenDerived>&, const MatrixBase<ElseDerived>&) const, class Select
+ * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
*/
template<typename Derived>
template<typename ThenDerived>
-inline const Select<Derived,ThenDerived, NestByValue<typename ThenDerived::ConstantReturnType> >
-MatrixBase<Derived>::select(const MatrixBase<ThenDerived>& thenMatrix,
+inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
+DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
typename ThenDerived::Scalar elseScalar) const
{
- return Select<Derived,ThenDerived,NestByValue<typename ThenDerived::ConstantReturnType> >(
+ return Select<Derived,ThenDerived,typename ThenDerived::ConstantReturnType>(
derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar));
}
-/** \array_module
- *
- * Version of MatrixBase::select(const MatrixBase&, const MatrixBase&) with
+/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
* the \em then expression being a scalar value.
*
- * \sa MatrixBase::select(const MatrixBase<ThenDerived>&, const MatrixBase<ElseDerived>&) const, class Select
+ * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
*/
template<typename Derived>
template<typename ElseDerived>
-inline const Select<Derived, NestByValue<typename ElseDerived::ConstantReturnType>, ElseDerived >
-MatrixBase<Derived>::select(typename ElseDerived::Scalar thenScalar,
- const MatrixBase<ElseDerived>& elseMatrix) const
+inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
+DenseBase<Derived>::select(typename ElseDerived::Scalar thenScalar,
+ const DenseBase<ElseDerived>& elseMatrix) const
{
- return Select<Derived,NestByValue<typename ElseDerived::ConstantReturnType>,ElseDerived>(
+ return Select<Derived,typename ElseDerived::ConstantReturnType,ElseDerived>(
derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived());
}
diff --git a/extern/Eigen3/Eigen/src/Core/SelfAdjointView.h b/extern/Eigen3/Eigen/src/Core/SelfAdjointView.h
new file mode 100644
index 00000000000..4bb68755eee
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/SelfAdjointView.h
@@ -0,0 +1,325 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SELFADJOINTMATRIX_H
+#define EIGEN_SELFADJOINTMATRIX_H
+
+/** \class SelfAdjointView
+ * \ingroup Core_Module
+ *
+ *
+ * \brief Expression of a selfadjoint matrix from a triangular part of a dense matrix
+ *
+ * \param MatrixType the type of the dense matrix storing the coefficients
+ * \param TriangularPart can be either \c #Lower or \c #Upper
+ *
+ * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
+ * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
+ * and most of the time this is the only way that it is used.
+ *
+ * \sa class TriangularBase, MatrixBase::selfadjointView()
+ */
+
+namespace internal {
+template<typename MatrixType, unsigned int UpLo>
+struct traits<SelfAdjointView<MatrixType, UpLo> > : traits<MatrixType>
+{
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
+ typedef MatrixType ExpressionType;
+ typedef typename MatrixType::PlainObject DenseMatrixType;
+ enum {
+ Mode = UpLo | SelfAdjoint,
+ Flags = MatrixTypeNestedCleaned::Flags & (HereditaryBits)
+ & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)), // FIXME these flags should be preserved
+ CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost
+ };
+};
+}
+
+template <typename Lhs, int LhsMode, bool LhsIsVector,
+ typename Rhs, int RhsMode, bool RhsIsVector>
+struct SelfadjointProductMatrix;
+
+// FIXME could also be called SelfAdjointWrapper to be consistent with DiagonalWrapper ??
+template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
+ : public TriangularBase<SelfAdjointView<MatrixType, UpLo> >
+{
+ public:
+
+ typedef TriangularBase<SelfAdjointView> Base;
+ typedef typename internal::traits<SelfAdjointView>::MatrixTypeNested MatrixTypeNested;
+ typedef typename internal::traits<SelfAdjointView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned;
+
+ /** \brief The type of coefficients in this matrix */
+ typedef typename internal::traits<SelfAdjointView>::Scalar Scalar;
+
+ typedef typename MatrixType::Index Index;
+
+ enum {
+ Mode = internal::traits<SelfAdjointView>::Mode
+ };
+ typedef typename MatrixType::PlainObject PlainObject;
+
+ inline SelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
+ {}
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+ inline Index outerStride() const { return m_matrix.outerStride(); }
+ inline Index innerStride() const { return m_matrix.innerStride(); }
+
+ /** \sa MatrixBase::coeff()
+ * \warning the coordinates must fit into the referenced triangular part
+ */
+ inline Scalar coeff(Index row, Index col) const
+ {
+ Base::check_coordinates_internal(row, col);
+ return m_matrix.coeff(row, col);
+ }
+
+ /** \sa MatrixBase::coeffRef()
+ * \warning the coordinates must fit into the referenced triangular part
+ */
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ Base::check_coordinates_internal(row, col);
+ return m_matrix.const_cast_derived().coeffRef(row, col);
+ }
+
+ /** \internal */
+ const MatrixTypeNestedCleaned& _expression() const { return m_matrix; }
+
+ const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
+ MatrixTypeNestedCleaned& nestedExpression() { return *const_cast<MatrixTypeNestedCleaned*>(&m_matrix); }
+
+ /** Efficient self-adjoint matrix times vector/matrix product */
+ template<typename OtherDerived>
+ SelfadjointProductMatrix<MatrixType,Mode,false,OtherDerived,0,OtherDerived::IsVectorAtCompileTime>
+ operator*(const MatrixBase<OtherDerived>& rhs) const
+ {
+ return SelfadjointProductMatrix
+ <MatrixType,Mode,false,OtherDerived,0,OtherDerived::IsVectorAtCompileTime>
+ (m_matrix, rhs.derived());
+ }
+
+ /** Efficient vector/matrix times self-adjoint matrix product */
+ template<typename OtherDerived> friend
+ SelfadjointProductMatrix<OtherDerived,0,OtherDerived::IsVectorAtCompileTime,MatrixType,Mode,false>
+ operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView& rhs)
+ {
+ return SelfadjointProductMatrix
+ <OtherDerived,0,OtherDerived::IsVectorAtCompileTime,MatrixType,Mode,false>
+ (lhs.derived(),rhs.m_matrix);
+ }
+
+ /** Perform a symmetric rank 2 update of the selfadjoint matrix \c *this:
+ * \f$ this = this + \alpha u v^* + conj(\alpha) v u^* \f$
+ * \returns a reference to \c *this
+ *
+ * The vectors \a u and \c v \b must be column vectors, however they can be
+ * a adjoint expression without any overhead. Only the meaningful triangular
+ * part of the matrix is updated, the rest is left unchanged.
+ *
+ * \sa rankUpdate(const MatrixBase<DerivedU>&, Scalar)
+ */
+ template<typename DerivedU, typename DerivedV>
+ SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, Scalar alpha = Scalar(1));
+
+ /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
+ * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
+ *
+ * \returns a reference to \c *this
+ *
+ * Note that to perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
+ * call this function with u.adjoint().
+ *
+ * \sa rankUpdate(const MatrixBase<DerivedU>&, const MatrixBase<DerivedV>&, Scalar)
+ */
+ template<typename DerivedU>
+ SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+
+/////////// Cholesky module ///////////
+
+ const LLT<PlainObject, UpLo> llt() const;
+ const LDLT<PlainObject, UpLo> ldlt() const;
+
+/////////// Eigenvalue module ///////////
+
+ /** Real part of #Scalar */
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ /** Return type of eigenvalues() */
+ typedef Matrix<RealScalar, internal::traits<MatrixType>::ColsAtCompileTime, 1> EigenvaluesReturnType;
+
+ EigenvaluesReturnType eigenvalues() const;
+ RealScalar operatorNorm() const;
+
+ #ifdef EIGEN2_SUPPORT
+ template<typename OtherDerived>
+ SelfAdjointView& operator=(const MatrixBase<OtherDerived>& other)
+ {
+ enum {
+ OtherPart = UpLo == Upper ? StrictlyLower : StrictlyUpper
+ };
+ m_matrix.const_cast_derived().template triangularView<UpLo>() = other;
+ m_matrix.const_cast_derived().template triangularView<OtherPart>() = other.adjoint();
+ return *this;
+ }
+ template<typename OtherMatrixType, unsigned int OtherMode>
+ SelfAdjointView& operator=(const TriangularView<OtherMatrixType, OtherMode>& other)
+ {
+ enum {
+ OtherPart = UpLo == Upper ? StrictlyLower : StrictlyUpper
+ };
+ m_matrix.const_cast_derived().template triangularView<UpLo>() = other.toDenseMatrix();
+ m_matrix.const_cast_derived().template triangularView<OtherPart>() = other.toDenseMatrix().adjoint();
+ return *this;
+ }
+ #endif
+
+ protected:
+ const MatrixTypeNested m_matrix;
+};
+
+
+// template<typename OtherDerived, typename MatrixType, unsigned int UpLo>
+// internal::selfadjoint_matrix_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo> >
+// operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView<MatrixType,UpLo>& rhs)
+// {
+// return internal::matrix_selfadjoint_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo> >(lhs.derived(),rhs);
+// }
+
+// selfadjoint to dense matrix
+
+namespace internal {
+
+template<typename Derived1, typename Derived2, int UnrollCount, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), UnrollCount, ClearOpposite>
+{
+ enum {
+ col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
+ row = (UnrollCount-1) % Derived1::RowsAtCompileTime
+ };
+
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), UnrollCount-1, ClearOpposite>::run(dst, src);
+
+ if(row == col)
+ dst.coeffRef(row, col) = real(src.coeff(row, col));
+ else if(row < col)
+ dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col));
+ }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, 0, ClearOpposite>
+{
+ inline static void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, int UnrollCount, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), UnrollCount, ClearOpposite>
+{
+ enum {
+ col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
+ row = (UnrollCount-1) % Derived1::RowsAtCompileTime
+ };
+
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), UnrollCount-1, ClearOpposite>::run(dst, src);
+
+ if(row == col)
+ dst.coeffRef(row, col) = real(src.coeff(row, col));
+ else if(row > col)
+ dst.coeffRef(col, row) = conj(dst.coeffRef(row, col) = src.coeff(row, col));
+ }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, 0, ClearOpposite>
+{
+ inline static void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, Dynamic, ClearOpposite>
+{
+ typedef typename Derived1::Index Index;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ for(Index j = 0; j < dst.cols(); ++j)
+ {
+ for(Index i = 0; i < j; ++i)
+ {
+ dst.copyCoeff(i, j, src);
+ dst.coeffRef(j,i) = conj(dst.coeff(i,j));
+ }
+ dst.copyCoeff(j, j, src);
+ }
+ }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, Dynamic, ClearOpposite>
+{
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ typedef typename Derived1::Index Index;
+ for(Index i = 0; i < dst.rows(); ++i)
+ {
+ for(Index j = 0; j < i; ++j)
+ {
+ dst.copyCoeff(i, j, src);
+ dst.coeffRef(j,i) = conj(dst.coeff(i,j));
+ }
+ dst.copyCoeff(i, i, src);
+ }
+ }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of MatrixBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<unsigned int UpLo>
+typename MatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type
+MatrixBase<Derived>::selfadjointView() const
+{
+ return derived();
+}
+
+template<typename Derived>
+template<unsigned int UpLo>
+typename MatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type
+MatrixBase<Derived>::selfadjointView()
+{
+ return derived();
+}
+
+#endif // EIGEN_SELFADJOINTMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
new file mode 100644
index 00000000000..4e9ca88745d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
@@ -0,0 +1,195 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SELFCWISEBINARYOP_H
+#define EIGEN_SELFCWISEBINARYOP_H
+
+/** \class SelfCwiseBinaryOp
+ * \ingroup Core_Module
+ *
+ * \internal
+ *
+ * \brief Internal helper class for optimizing operators like +=, -=
+ *
+ * This is a pseudo expression class re-implementing the copyCoeff/copyPacket
+ * method to directly performs a +=/-= operations in an optimal way. In particular,
+ * this allows to make sure that the input/output data are loaded only once using
+ * aligned packet loads.
+ *
+ * \sa class SwapWrapper for a similar trick.
+ */
+
+namespace internal {
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct traits<SelfCwiseBinaryOp<BinaryOp,Lhs,Rhs> >
+ : traits<CwiseBinaryOp<BinaryOp,Lhs,Rhs> >
+{
+ enum {
+ // Note that it is still a good idea to preserve the DirectAccessBit
+ // so that assign can correctly align the data.
+ Flags = traits<CwiseBinaryOp<BinaryOp,Lhs,Rhs> >::Flags | (Lhs::Flags&DirectAccessBit) | (Lhs::Flags&LvalueBit),
+ OuterStrideAtCompileTime = Lhs::OuterStrideAtCompileTime,
+ InnerStrideAtCompileTime = Lhs::InnerStrideAtCompileTime
+ };
+};
+}
+
+template<typename BinaryOp, typename Lhs, typename Rhs> class SelfCwiseBinaryOp
+ : public internal::dense_xpr_base< SelfCwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type
+{
+ public:
+
+ typedef typename internal::dense_xpr_base<SelfCwiseBinaryOp>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SelfCwiseBinaryOp)
+
+ typedef typename internal::packet_traits<Scalar>::type Packet;
+
+ inline SelfCwiseBinaryOp(Lhs& xpr, const BinaryOp& func = BinaryOp()) : m_matrix(xpr), m_functor(func) {}
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+ inline Index outerStride() const { return m_matrix.outerStride(); }
+ inline Index innerStride() const { return m_matrix.innerStride(); }
+ inline const Scalar* data() const { return m_matrix.data(); }
+
+ // note that this function is needed by assign to correctly align loads/stores
+ // TODO make Assign use .data()
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(Lhs)
+ return m_matrix.const_cast_derived().coeffRef(row, col);
+ }
+ inline const Scalar& coeffRef(Index row, Index col) const
+ {
+ return m_matrix.coeffRef(row, col);
+ }
+
+ // note that this function is needed by assign to correctly align loads/stores
+ // TODO make Assign use .data()
+ inline Scalar& coeffRef(Index index)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(Lhs)
+ return m_matrix.const_cast_derived().coeffRef(index);
+ }
+ inline const Scalar& coeffRef(Index index) const
+ {
+ return m_matrix.const_cast_derived().coeffRef(index);
+ }
+
+ template<typename OtherDerived>
+ void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
+ {
+ OtherDerived& _other = other.const_cast_derived();
+ eigen_internal_assert(row >= 0 && row < rows()
+ && col >= 0 && col < cols());
+ Scalar& tmp = m_matrix.coeffRef(row,col);
+ tmp = m_functor(tmp, _other.coeff(row,col));
+ }
+
+ template<typename OtherDerived>
+ void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
+ {
+ OtherDerived& _other = other.const_cast_derived();
+ eigen_internal_assert(index >= 0 && index < m_matrix.size());
+ Scalar& tmp = m_matrix.coeffRef(index);
+ tmp = m_functor(tmp, _other.coeff(index));
+ }
+
+ template<typename OtherDerived, int StoreMode, int LoadMode>
+ void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
+ {
+ OtherDerived& _other = other.const_cast_derived();
+ eigen_internal_assert(row >= 0 && row < rows()
+ && col >= 0 && col < cols());
+ m_matrix.template writePacket<StoreMode>(row, col,
+ m_functor.packetOp(m_matrix.template packet<StoreMode>(row, col),_other.template packet<LoadMode>(row, col)) );
+ }
+
+ template<typename OtherDerived, int StoreMode, int LoadMode>
+ void copyPacket(Index index, const DenseBase<OtherDerived>& other)
+ {
+ OtherDerived& _other = other.const_cast_derived();
+ eigen_internal_assert(index >= 0 && index < m_matrix.size());
+ m_matrix.template writePacket<StoreMode>(index,
+ m_functor.packetOp(m_matrix.template packet<StoreMode>(index),_other.template packet<LoadMode>(index)) );
+ }
+
+ // reimplement lazyAssign to handle complex *= real
+ // see CwiseBinaryOp ctor for details
+ template<typename RhsDerived>
+ EIGEN_STRONG_INLINE SelfCwiseBinaryOp& lazyAssign(const DenseBase<RhsDerived>& rhs)
+ {
+ EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs,RhsDerived)
+ EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename RhsDerived::Scalar);
+
+ #ifdef EIGEN_DEBUG_ASSIGN
+ internal::assign_traits<SelfCwiseBinaryOp, RhsDerived>::debug();
+ #endif
+ eigen_assert(rows() == rhs.rows() && cols() == rhs.cols());
+ internal::assign_impl<SelfCwiseBinaryOp, RhsDerived>::run(*this,rhs.derived());
+ #ifndef EIGEN_NO_DEBUG
+ this->checkTransposeAliasing(rhs.derived());
+ #endif
+ return *this;
+ }
+
+ // overloaded to honor evaluation of special matrices
+ // maybe another solution would be to not use SelfCwiseBinaryOp
+ // at first...
+ SelfCwiseBinaryOp& operator=(const Rhs& _rhs)
+ {
+ typename internal::nested<Rhs>::type rhs(_rhs);
+ return Base::operator=(rhs);
+ }
+
+ protected:
+ Lhs& m_matrix;
+ const BinaryOp& m_functor;
+
+ private:
+ SelfCwiseBinaryOp& operator=(const SelfCwiseBinaryOp&);
+};
+
+template<typename Derived>
+inline Derived& DenseBase<Derived>::operator*=(const Scalar& other)
+{
+ typedef typename Derived::PlainObject PlainObject;
+ SelfCwiseBinaryOp<internal::scalar_product_op<Scalar>, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
+ tmp = PlainObject::Constant(rows(),cols(),other);
+ return derived();
+}
+
+template<typename Derived>
+inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
+{
+ typedef typename internal::conditional<NumTraits<Scalar>::IsInteger,
+ internal::scalar_quotient_op<Scalar>,
+ internal::scalar_product_op<Scalar> >::type BinOp;
+ typedef typename Derived::PlainObject PlainObject;
+ SelfCwiseBinaryOp<BinOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
+ tmp = PlainObject::Constant(rows(),cols(), NumTraits<Scalar>::IsInteger ? other : Scalar(1)/other);
+ return derived();
+}
+
+#endif // EIGEN_SELFCWISEBINARYOP_H
diff --git a/extern/Eigen3/Eigen/src/Core/SolveTriangular.h b/extern/Eigen3/Eigen/src/Core/SolveTriangular.h
new file mode 100644
index 00000000000..71e129c7f12
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/SolveTriangular.h
@@ -0,0 +1,263 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SOLVETRIANGULAR_H
+#define EIGEN_SOLVETRIANGULAR_H
+
+namespace internal {
+
+// Forward declarations:
+// The following two routines are implemented in the products/TriangularSolver*.h files
+template<typename LhsScalar, typename RhsScalar, typename Index, int Side, int Mode, bool Conjugate, int StorageOrder>
+struct triangular_solve_vector;
+
+template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder>
+struct triangular_solve_matrix;
+
+// small helper struct extracting some traits on the underlying solver operation
+template<typename Lhs, typename Rhs, int Side>
+class trsolve_traits
+{
+ private:
+ enum {
+ RhsIsVectorAtCompileTime = (Side==OnTheLeft ? Rhs::ColsAtCompileTime : Rhs::RowsAtCompileTime)==1
+ };
+ public:
+ enum {
+ Unrolling = (RhsIsVectorAtCompileTime && Rhs::SizeAtCompileTime != Dynamic && Rhs::SizeAtCompileTime <= 8)
+ ? CompleteUnrolling : NoUnrolling,
+ RhsVectors = RhsIsVectorAtCompileTime ? 1 : Dynamic
+ };
+};
+
+template<typename Lhs, typename Rhs,
+ int Side, // can be OnTheLeft/OnTheRight
+ int Mode, // can be Upper/Lower | UnitDiag
+ int Unrolling = trsolve_traits<Lhs,Rhs,Side>::Unrolling,
+ int RhsVectors = trsolve_traits<Lhs,Rhs,Side>::RhsVectors
+ >
+struct triangular_solver_selector;
+
+template<typename Lhs, typename Rhs, int Side, int Mode>
+struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,1>
+{
+ typedef typename Lhs::Scalar LhsScalar;
+ typedef typename Rhs::Scalar RhsScalar;
+ typedef blas_traits<Lhs> LhsProductTraits;
+ typedef typename LhsProductTraits::ExtractType ActualLhsType;
+ typedef Map<Matrix<RhsScalar,Dynamic,1>, Aligned> MappedRhs;
+ static void run(const Lhs& lhs, Rhs& rhs)
+ {
+ ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
+
+ // FIXME find a way to allow an inner stride if packet_traits<Scalar>::size==1
+
+ bool useRhsDirectly = Rhs::InnerStrideAtCompileTime==1 || rhs.innerStride()==1;
+
+ ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhs,rhs.size(),
+ (useRhsDirectly ? rhs.data() : 0));
+
+ if(!useRhsDirectly)
+ MappedRhs(actualRhs,rhs.size()) = rhs;
+
+ triangular_solve_vector<LhsScalar, RhsScalar, typename Lhs::Index, Side, Mode, LhsProductTraits::NeedToConjugate,
+ (int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor>
+ ::run(actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs);
+
+ if(!useRhsDirectly)
+ rhs = MappedRhs(actualRhs, rhs.size());
+ }
+};
+
+// the rhs is a matrix
+template<typename Lhs, typename Rhs, int Side, int Mode>
+struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,Dynamic>
+{
+ typedef typename Rhs::Scalar Scalar;
+ typedef typename Rhs::Index Index;
+ typedef blas_traits<Lhs> LhsProductTraits;
+ typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType;
+ static void run(const Lhs& lhs, Rhs& rhs)
+ {
+ const ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
+ triangular_solve_matrix<Scalar,Index,Side,Mode,LhsProductTraits::NeedToConjugate,(int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor,
+ (Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor>
+ ::run(lhs.rows(), Side==OnTheLeft? rhs.cols() : rhs.rows(), &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride());
+ }
+};
+
+/***************************************************************************
+* meta-unrolling implementation
+***************************************************************************/
+
+template<typename Lhs, typename Rhs, int Mode, int Index, int Size,
+ bool Stop = Index==Size>
+struct triangular_solver_unroller;
+
+template<typename Lhs, typename Rhs, int Mode, int Index, int Size>
+struct triangular_solver_unroller<Lhs,Rhs,Mode,Index,Size,false> {
+ enum {
+ IsLower = ((Mode&Lower)==Lower),
+ I = IsLower ? Index : Size - Index - 1,
+ S = IsLower ? 0 : I+1
+ };
+ static void run(const Lhs& lhs, Rhs& rhs)
+ {
+ if (Index>0)
+ rhs.coeffRef(I) -= lhs.row(I).template segment<Index>(S).transpose()
+ .cwiseProduct(rhs.template segment<Index>(S)).sum();
+
+ if(!(Mode & UnitDiag))
+ rhs.coeffRef(I) /= lhs.coeff(I,I);
+
+ triangular_solver_unroller<Lhs,Rhs,Mode,Index+1,Size>::run(lhs,rhs);
+ }
+};
+
+template<typename Lhs, typename Rhs, int Mode, int Index, int Size>
+struct triangular_solver_unroller<Lhs,Rhs,Mode,Index,Size,true> {
+ static void run(const Lhs&, Rhs&) {}
+};
+
+template<typename Lhs, typename Rhs, int Mode>
+struct triangular_solver_selector<Lhs,Rhs,OnTheLeft,Mode,CompleteUnrolling,1> {
+ static void run(const Lhs& lhs, Rhs& rhs)
+ { triangular_solver_unroller<Lhs,Rhs,Mode,0,Rhs::SizeAtCompileTime>::run(lhs,rhs); }
+};
+
+template<typename Lhs, typename Rhs, int Mode>
+struct triangular_solver_selector<Lhs,Rhs,OnTheRight,Mode,CompleteUnrolling,1> {
+ static void run(const Lhs& lhs, Rhs& rhs)
+ {
+ Transpose<const Lhs> trLhs(lhs);
+ Transpose<Rhs> trRhs(rhs);
+
+ triangular_solver_unroller<Transpose<const Lhs>,Transpose<Rhs>,
+ ((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag),
+ 0,Rhs::SizeAtCompileTime>::run(trLhs,trRhs);
+ }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* TriangularView methods
+***************************************************************************/
+
+/** "in-place" version of TriangularView::solve() where the result is written in \a other
+ *
+ * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
+ * This function will const_cast it, so constness isn't honored here.
+ *
+ * See TriangularView:solve() for the details.
+ */
+template<typename MatrixType, unsigned int Mode>
+template<int Side, typename OtherDerived>
+void TriangularView<MatrixType,Mode>::solveInPlace(const MatrixBase<OtherDerived>& _other) const
+{
+ OtherDerived& other = _other.const_cast_derived();
+ eigen_assert(cols() == rows());
+ eigen_assert( (Side==OnTheLeft && cols() == other.rows()) || (Side==OnTheRight && cols() == other.cols()) );
+ eigen_assert(!(Mode & ZeroDiag));
+ eigen_assert(Mode & (Upper|Lower));
+
+ enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit && OtherDerived::IsVectorAtCompileTime };
+ typedef typename internal::conditional<copy,
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+ OtherCopy otherCopy(other);
+
+ internal::triangular_solver_selector<MatrixType, typename internal::remove_reference<OtherCopy>::type,
+ Side, Mode>::run(nestedExpression(), otherCopy);
+
+ if (copy)
+ other = otherCopy;
+}
+
+/** \returns the product of the inverse of \c *this with \a other, \a *this being triangular.
+ *
+ * This function computes the inverse-matrix matrix product inverse(\c *this) * \a other if
+ * \a Side==OnTheLeft (the default), or the right-inverse-multiply \a other * inverse(\c *this) if
+ * \a Side==OnTheRight.
+ *
+ * The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the
+ * diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this
+ * is an upper (resp. lower) triangular matrix.
+ *
+ * Example: \include MatrixBase_marked.cpp
+ * Output: \verbinclude MatrixBase_marked.out
+ *
+ * This function returns an expression of the inverse-multiply and can works in-place if it is assigned
+ * to the same matrix or vector \a other.
+ *
+ * For users coming from BLAS, this function (and more specifically solveInPlace()) offer
+ * all the operations supported by the \c *TRSV and \c *TRSM BLAS routines.
+ *
+ * \sa TriangularView::solveInPlace()
+ */
+template<typename Derived, unsigned int Mode>
+template<int Side, typename Other>
+const internal::triangular_solve_retval<Side,TriangularView<Derived,Mode>,Other>
+TriangularView<Derived,Mode>::solve(const MatrixBase<Other>& other) const
+{
+ return internal::triangular_solve_retval<Side,TriangularView,Other>(*this, other.derived());
+}
+
+namespace internal {
+
+
+template<int Side, typename TriangularType, typename Rhs>
+struct traits<triangular_solve_retval<Side, TriangularType, Rhs> >
+{
+ typedef typename internal::plain_matrix_type_column_major<Rhs>::type ReturnType;
+};
+
+template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval
+ : public ReturnByValue<triangular_solve_retval<Side, TriangularType, Rhs> >
+{
+ typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned;
+ typedef ReturnByValue<triangular_solve_retval> Base;
+ typedef typename Base::Index Index;
+
+ triangular_solve_retval(const TriangularType& tri, const Rhs& rhs)
+ : m_triangularMatrix(tri), m_rhs(rhs)
+ {}
+
+ inline Index rows() const { return m_rhs.rows(); }
+ inline Index cols() const { return m_rhs.cols(); }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ if(!(is_same<RhsNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_rhs)))
+ dst = m_rhs;
+ m_triangularMatrix.template solveInPlace<Side>(dst);
+ }
+
+ protected:
+ const TriangularType& m_triangularMatrix;
+ const typename Rhs::Nested m_rhs;
+};
+
+} // namespace internal
+
+#endif // EIGEN_SOLVETRIANGULAR_H
diff --git a/extern/Eigen3/Eigen/src/Core/StableNorm.h b/extern/Eigen3/Eigen/src/Core/StableNorm.h
new file mode 100644
index 00000000000..f667272e4a4
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/StableNorm.h
@@ -0,0 +1,190 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_STABLENORM_H
+#define EIGEN_STABLENORM_H
+
+namespace internal {
+template<typename ExpressionType, typename Scalar>
+inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
+{
+ Scalar max = bl.cwiseAbs().maxCoeff();
+ if (max>scale)
+ {
+ ssq = ssq * abs2(scale/max);
+ scale = max;
+ invScale = Scalar(1)/scale;
+ }
+ // TODO if the max is much much smaller than the current scale,
+ // then we can neglect this sub vector
+ ssq += (bl*invScale).squaredNorm();
+}
+}
+
+/** \returns the \em l2 norm of \c *this avoiding underflow and overflow.
+ * This version use a blockwise two passes algorithm:
+ * 1 - find the absolute largest coefficient \c s
+ * 2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way
+ *
+ * For architecture/scalar types supporting vectorization, this version
+ * is faster than blueNorm(). Otherwise the blueNorm() is much faster.
+ *
+ * \sa norm(), blueNorm(), hypotNorm()
+ */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::stableNorm() const
+{
+ using std::min;
+ const Index blockSize = 4096;
+ RealScalar scale = 0;
+ RealScalar invScale = 1;
+ RealScalar ssq = 0; // sum of square
+ enum {
+ Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0
+ };
+ Index n = size();
+ Index bi = internal::first_aligned(derived());
+ if (bi>0)
+ internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
+ for (; bi<n; bi+=blockSize)
+ internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
+ return scale * internal::sqrt(ssq);
+}
+
+/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
+ * A Portable Fortran Program to Find the Euclidean Norm of a Vector,
+ * ACM TOMS, Vol 4, Issue 1, 1978.
+ *
+ * For architecture/scalar types without vectorization, this version
+ * is much faster than stableNorm(). Otherwise the stableNorm() is faster.
+ *
+ * \sa norm(), stableNorm(), hypotNorm()
+ */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::blueNorm() const
+{
+ using std::pow;
+ using std::min;
+ using std::max;
+ static Index nmax = -1;
+ static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
+ if(nmax <= 0)
+ {
+ int nbig, ibeta, it, iemin, iemax, iexp;
+ RealScalar abig, eps;
+ // This program calculates the machine-dependent constants
+ // bl, b2, slm, s2m, relerr overfl, nmax
+ // from the "basic" machine-dependent numbers
+ // nbig, ibeta, it, iemin, iemax, rbig.
+ // The following define the basic machine-dependent constants.
+ // For portability, the PORT subprograms "ilmaeh" and "rlmach"
+ // are used. For any specific computer, each of the assignment
+ // statements can be replaced
+ nbig = (std::numeric_limits<Index>::max)(); // largest integer
+ ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
+ it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
+ iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
+ iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
+ rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
+
+ iexp = -((1-iemin)/2);
+ b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
+ iexp = (iemax + 1 - it)/2;
+ b2 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // upper boundary of midrange
+
+ iexp = (2-iemin)/2;
+ s1m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for lower range
+ iexp = - ((iemax+it)/2);
+ s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range
+
+ overfl = rbig*s2m; // overflow boundary for abig
+ eps = RealScalar(pow(double(ibeta), 1-it));
+ relerr = internal::sqrt(eps); // tolerance for neglecting asml
+ abig = RealScalar(1.0/eps - 1.0);
+ if (RealScalar(nbig)>abig) nmax = int(abig); // largest safe n
+ else nmax = nbig;
+ }
+ Index n = size();
+ RealScalar ab2 = b2 / RealScalar(n);
+ RealScalar asml = RealScalar(0);
+ RealScalar amed = RealScalar(0);
+ RealScalar abig = RealScalar(0);
+ for(Index j=0; j<n; ++j)
+ {
+ RealScalar ax = internal::abs(coeff(j));
+ if(ax > ab2) abig += internal::abs2(ax*s2m);
+ else if(ax < b1) asml += internal::abs2(ax*s1m);
+ else amed += internal::abs2(ax);
+ }
+ if(abig > RealScalar(0))
+ {
+ abig = internal::sqrt(abig);
+ if(abig > overfl)
+ {
+ eigen_assert(false && "overflow");
+ return rbig;
+ }
+ if(amed > RealScalar(0))
+ {
+ abig = abig/s2m;
+ amed = internal::sqrt(amed);
+ }
+ else
+ return abig/s2m;
+ }
+ else if(asml > RealScalar(0))
+ {
+ if (amed > RealScalar(0))
+ {
+ abig = internal::sqrt(amed);
+ amed = internal::sqrt(asml) / s1m;
+ }
+ else
+ return internal::sqrt(asml)/s1m;
+ }
+ else
+ return internal::sqrt(amed);
+ asml = (min)(abig, amed);
+ abig = (max)(abig, amed);
+ if(asml <= abig*relerr)
+ return abig;
+ else
+ return abig * internal::sqrt(RealScalar(1) + internal::abs2(asml/abig));
+}
+
+/** \returns the \em l2 norm of \c *this avoiding undeflow and overflow.
+ * This version use a concatenation of hypot() calls, and it is very slow.
+ *
+ * \sa norm(), stableNorm()
+ */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::hypotNorm() const
+{
+ return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>());
+}
+
+#endif // EIGEN_STABLENORM_H
diff --git a/extern/Eigen3/Eigen/src/Core/Stride.h b/extern/Eigen3/Eigen/src/Core/Stride.h
new file mode 100644
index 00000000000..0430f111627
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Stride.h
@@ -0,0 +1,119 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_STRIDE_H
+#define EIGEN_STRIDE_H
+
+/** \class Stride
+ * \ingroup Core_Module
+ *
+ * \brief Holds strides information for Map
+ *
+ * This class holds the strides information for mapping arrays with strides with class Map.
+ *
+ * It holds two values: the inner stride and the outer stride.
+ *
+ * The inner stride is the pointer increment between two consecutive entries within a given row of a
+ * row-major matrix or within a given column of a column-major matrix.
+ *
+ * The outer stride is the pointer increment between two consecutive rows of a row-major matrix or
+ * between two consecutive columns of a column-major matrix.
+ *
+ * These two values can be passed either at compile-time as template parameters, or at runtime as
+ * arguments to the constructor.
+ *
+ * Indeed, this class takes two template parameters:
+ * \param _OuterStrideAtCompileTime the outer stride, or Dynamic if you want to specify it at runtime.
+ * \param _InnerStrideAtCompileTime the inner stride, or Dynamic if you want to specify it at runtime.
+ *
+ * Here is an example:
+ * \include Map_general_stride.cpp
+ * Output: \verbinclude Map_general_stride.out
+ *
+ * \sa class InnerStride, class OuterStride, \ref TopicStorageOrders
+ */
+template<int _OuterStrideAtCompileTime, int _InnerStrideAtCompileTime>
+class Stride
+{
+ public:
+ typedef DenseIndex Index;
+ enum {
+ InnerStrideAtCompileTime = _InnerStrideAtCompileTime,
+ OuterStrideAtCompileTime = _OuterStrideAtCompileTime
+ };
+
+ /** Default constructor, for use when strides are fixed at compile time */
+ Stride()
+ : m_outer(OuterStrideAtCompileTime), m_inner(InnerStrideAtCompileTime)
+ {
+ eigen_assert(InnerStrideAtCompileTime != Dynamic && OuterStrideAtCompileTime != Dynamic);
+ }
+
+ /** Constructor allowing to pass the strides at runtime */
+ Stride(Index outerStride, Index innerStride)
+ : m_outer(outerStride), m_inner(innerStride)
+ {
+ eigen_assert(innerStride>=0 && outerStride>=0);
+ }
+
+ /** Copy constructor */
+ Stride(const Stride& other)
+ : m_outer(other.outer()), m_inner(other.inner())
+ {}
+
+ /** \returns the outer stride */
+ inline Index outer() const { return m_outer.value(); }
+ /** \returns the inner stride */
+ inline Index inner() const { return m_inner.value(); }
+
+ protected:
+ internal::variable_if_dynamic<Index, OuterStrideAtCompileTime> m_outer;
+ internal::variable_if_dynamic<Index, InnerStrideAtCompileTime> m_inner;
+};
+
+/** \brief Convenience specialization of Stride to specify only an inner stride
+ * See class Map for some examples */
+template<int Value = Dynamic>
+class InnerStride : public Stride<0, Value>
+{
+ typedef Stride<0, Value> Base;
+ public:
+ typedef DenseIndex Index;
+ InnerStride() : Base() {}
+ InnerStride(Index v) : Base(0, v) {}
+};
+
+/** \brief Convenience specialization of Stride to specify only an outer stride
+ * See class Map for some examples */
+template<int Value = Dynamic>
+class OuterStride : public Stride<Value, 0>
+{
+ typedef Stride<Value, 0> Base;
+ public:
+ typedef DenseIndex Index;
+ OuterStride() : Base() {}
+ OuterStride(Index v) : Base(v,0) {}
+};
+
+#endif // EIGEN_STRIDE_H
diff --git a/extern/Eigen2/Eigen/src/Core/Swap.h b/extern/Eigen3/Eigen/src/Core/Swap.h
index 9aaac652fd8..5fb03286675 100644
--- a/extern/Eigen2/Eigen/src/Core/Swap.h
+++ b/extern/Eigen3/Eigen/src/Core/Swap.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
@@ -26,54 +26,58 @@
#define EIGEN_SWAP_H
/** \class SwapWrapper
+ * \ingroup Core_Module
*
* \internal
*
* \brief Internal helper class for swapping two expressions
*/
+namespace internal {
template<typename ExpressionType>
-struct ei_traits<SwapWrapper<ExpressionType> >
-{
- typedef typename ExpressionType::Scalar Scalar;
- enum {
- RowsAtCompileTime = ExpressionType::RowsAtCompileTime,
- ColsAtCompileTime = ExpressionType::ColsAtCompileTime,
- MaxRowsAtCompileTime = ExpressionType::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = ExpressionType::MaxColsAtCompileTime,
- Flags = ExpressionType::Flags,
- CoeffReadCost = ExpressionType::CoeffReadCost
- };
-};
+struct traits<SwapWrapper<ExpressionType> > : traits<ExpressionType> {};
+}
template<typename ExpressionType> class SwapWrapper
- : public MatrixBase<SwapWrapper<ExpressionType> >
+ : public internal::dense_xpr_base<SwapWrapper<ExpressionType> >::type
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(SwapWrapper)
- typedef typename ei_packet_traits<Scalar>::type Packet;
+ typedef typename internal::dense_xpr_base<SwapWrapper>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SwapWrapper)
+ typedef typename internal::packet_traits<Scalar>::type Packet;
inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {}
- inline int rows() const { return m_expression.rows(); }
- inline int cols() const { return m_expression.cols(); }
- inline int stride() const { return m_expression.stride(); }
+ inline Index rows() const { return m_expression.rows(); }
+ inline Index cols() const { return m_expression.cols(); }
+ inline Index outerStride() const { return m_expression.outerStride(); }
+ inline Index innerStride() const { return m_expression.innerStride(); }
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_expression.const_cast_derived().coeffRef(row, col);
}
- inline Scalar& coeffRef(int index)
+ inline Scalar& coeffRef(Index index)
{
return m_expression.const_cast_derived().coeffRef(index);
}
+ inline Scalar& coeffRef(Index row, Index col) const
+ {
+ return m_expression.coeffRef(row, col);
+ }
+
+ inline Scalar& coeffRef(Index index) const
+ {
+ return m_expression.coeffRef(index);
+ }
+
template<typename OtherDerived>
- void copyCoeff(int row, int col, const MatrixBase<OtherDerived>& other)
+ void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
- ei_internal_assert(row >= 0 && row < rows()
+ eigen_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
Scalar tmp = m_expression.coeff(row, col);
m_expression.coeffRef(row, col) = _other.coeff(row, col);
@@ -81,20 +85,20 @@ template<typename ExpressionType> class SwapWrapper
}
template<typename OtherDerived>
- void copyCoeff(int index, const MatrixBase<OtherDerived>& other)
+ void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
- ei_internal_assert(index >= 0 && index < m_expression.size());
+ eigen_internal_assert(index >= 0 && index < m_expression.size());
Scalar tmp = m_expression.coeff(index);
m_expression.coeffRef(index) = _other.coeff(index);
_other.coeffRef(index) = tmp;
}
template<typename OtherDerived, int StoreMode, int LoadMode>
- void copyPacket(int row, int col, const MatrixBase<OtherDerived>& other)
+ void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
- ei_internal_assert(row >= 0 && row < rows()
+ eigen_internal_assert(row >= 0 && row < rows()
&& col >= 0 && col < cols());
Packet tmp = m_expression.template packet<StoreMode>(row, col);
m_expression.template writePacket<StoreMode>(row, col,
@@ -104,10 +108,10 @@ template<typename ExpressionType> class SwapWrapper
}
template<typename OtherDerived, int StoreMode, int LoadMode>
- void copyPacket(int index, const MatrixBase<OtherDerived>& other)
+ void copyPacket(Index index, const DenseBase<OtherDerived>& other)
{
OtherDerived& _other = other.const_cast_derived();
- ei_internal_assert(index >= 0 && index < m_expression.size());
+ eigen_internal_assert(index >= 0 && index < m_expression.size());
Packet tmp = m_expression.template packet<StoreMode>(index);
m_expression.template writePacket<StoreMode>(index,
_other.template packet<LoadMode>(index)
@@ -117,29 +121,6 @@ template<typename ExpressionType> class SwapWrapper
protected:
ExpressionType& m_expression;
-
- private:
- SwapWrapper& operator=(const SwapWrapper&);
};
-/** swaps *this with the expression \a other.
- *
- * \note \a other is only marked for internal reasons, but of course
- * it gets const-casted. One reason is that one will often call swap
- * on temporary objects (hence non-const references are forbidden).
- * Another reason is that lazyAssign takes a const argument anyway.
- */
-template<typename Derived>
-template<typename OtherDerived>
-void MatrixBase<Derived>::swap(const MatrixBase<OtherDerived>& other)
-{
- (SwapWrapper<Derived>(derived())).lazyAssign(other);
-}
-
#endif // EIGEN_SWAP_H
-
-
-
-
-
-
diff --git a/extern/Eigen3/Eigen/src/Core/Transpose.h b/extern/Eigen3/Eigen/src/Core/Transpose.h
new file mode 100644
index 00000000000..3f7c7df6ee1
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Transpose.h
@@ -0,0 +1,425 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRANSPOSE_H
+#define EIGEN_TRANSPOSE_H
+
+/** \class Transpose
+ * \ingroup Core_Module
+ *
+ * \brief Expression of the transpose of a matrix
+ *
+ * \param MatrixType the type of the object of which we are taking the transpose
+ *
+ * This class represents an expression of the transpose of a matrix.
+ * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint()
+ * and most of the time this is the only way it is used.
+ *
+ * \sa MatrixBase::transpose(), MatrixBase::adjoint()
+ */
+
+namespace internal {
+template<typename MatrixType>
+struct traits<Transpose<MatrixType> > : traits<MatrixType>
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedPlain;
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
+ typedef typename traits<MatrixType>::XprKind XprKind;
+ enum {
+ RowsAtCompileTime = MatrixType::ColsAtCompileTime,
+ ColsAtCompileTime = MatrixType::RowsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
+ Flags0 = MatrixTypeNestedPlain::Flags & ~(LvalueBit | NestByRefBit),
+ Flags1 = Flags0 | FlagsLvalueBit,
+ Flags = Flags1 ^ RowMajorBit,
+ CoeffReadCost = MatrixTypeNestedPlain::CoeffReadCost,
+ InnerStrideAtCompileTime = inner_stride_at_compile_time<MatrixType>::ret,
+ OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret
+ };
+};
+}
+
+template<typename MatrixType, typename StorageKind> class TransposeImpl;
+
+template<typename MatrixType> class Transpose
+ : public TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>
+{
+ public:
+
+ typedef typename TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
+ EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
+
+ inline Transpose(MatrixType& matrix) : m_matrix(matrix) {}
+
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
+
+ inline Index rows() const { return m_matrix.cols(); }
+ inline Index cols() const { return m_matrix.rows(); }
+
+ /** \returns the nested expression */
+ const typename internal::remove_all<typename MatrixType::Nested>::type&
+ nestedExpression() const { return m_matrix; }
+
+ /** \returns the nested expression */
+ typename internal::remove_all<typename MatrixType::Nested>::type&
+ nestedExpression() { return m_matrix.const_cast_derived(); }
+
+ protected:
+ const typename MatrixType::Nested m_matrix;
+};
+
+namespace internal {
+
+template<typename MatrixType, bool HasDirectAccess = has_direct_access<MatrixType>::ret>
+struct TransposeImpl_base
+{
+ typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;
+};
+
+template<typename MatrixType>
+struct TransposeImpl_base<MatrixType, false>
+{
+ typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;
+};
+
+} // end namespace internal
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
+ : public internal::TransposeImpl_base<MatrixType>::type
+{
+ public:
+
+ typedef typename internal::TransposeImpl_base<MatrixType>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+
+ inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
+ inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
+
+ typedef typename internal::conditional<
+ internal::is_lvalue<MatrixType>::value,
+ Scalar,
+ const Scalar
+ >::type ScalarWithConstIfNotLvalue;
+
+ inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
+ inline const Scalar* data() const { return derived().nestedExpression().data(); }
+
+ inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+ return derived().nestedExpression().const_cast_derived().coeffRef(col, row);
+ }
+
+ inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+ return derived().nestedExpression().const_cast_derived().coeffRef(index);
+ }
+
+ inline const Scalar& coeffRef(Index row, Index col) const
+ {
+ return derived().nestedExpression().coeffRef(col, row);
+ }
+
+ inline const Scalar& coeffRef(Index index) const
+ {
+ return derived().nestedExpression().coeffRef(index);
+ }
+
+ inline const CoeffReturnType coeff(Index row, Index col) const
+ {
+ return derived().nestedExpression().coeff(col, row);
+ }
+
+ inline const CoeffReturnType coeff(Index index) const
+ {
+ return derived().nestedExpression().coeff(index);
+ }
+
+ template<int LoadMode>
+ inline const PacketScalar packet(Index row, Index col) const
+ {
+ return derived().nestedExpression().template packet<LoadMode>(col, row);
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index row, Index col, const PacketScalar& x)
+ {
+ derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(col, row, x);
+ }
+
+ template<int LoadMode>
+ inline const PacketScalar packet(Index index) const
+ {
+ return derived().nestedExpression().template packet<LoadMode>(index);
+ }
+
+ template<int LoadMode>
+ inline void writePacket(Index index, const PacketScalar& x)
+ {
+ derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(index, x);
+ }
+};
+
+/** \returns an expression of the transpose of *this.
+ *
+ * Example: \include MatrixBase_transpose.cpp
+ * Output: \verbinclude MatrixBase_transpose.out
+ *
+ * \warning If you want to replace a matrix by its own transpose, do \b NOT do this:
+ * \code
+ * m = m.transpose(); // bug!!! caused by aliasing effect
+ * \endcode
+ * Instead, use the transposeInPlace() method:
+ * \code
+ * m.transposeInPlace();
+ * \endcode
+ * which gives Eigen good opportunities for optimization, or alternatively you can also do:
+ * \code
+ * m = m.transpose().eval();
+ * \endcode
+ *
+ * \sa transposeInPlace(), adjoint() */
+template<typename Derived>
+inline Transpose<Derived>
+DenseBase<Derived>::transpose()
+{
+ return derived();
+}
+
+/** This is the const version of transpose().
+ *
+ * Make sure you read the warning for transpose() !
+ *
+ * \sa transposeInPlace(), adjoint() */
+template<typename Derived>
+inline const typename DenseBase<Derived>::ConstTransposeReturnType
+DenseBase<Derived>::transpose() const
+{
+ return ConstTransposeReturnType(derived());
+}
+
+/** \returns an expression of the adjoint (i.e. conjugate transpose) of *this.
+ *
+ * Example: \include MatrixBase_adjoint.cpp
+ * Output: \verbinclude MatrixBase_adjoint.out
+ *
+ * \warning If you want to replace a matrix by its own adjoint, do \b NOT do this:
+ * \code
+ * m = m.adjoint(); // bug!!! caused by aliasing effect
+ * \endcode
+ * Instead, use the adjointInPlace() method:
+ * \code
+ * m.adjointInPlace();
+ * \endcode
+ * which gives Eigen good opportunities for optimization, or alternatively you can also do:
+ * \code
+ * m = m.adjoint().eval();
+ * \endcode
+ *
+ * \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */
+template<typename Derived>
+inline const typename MatrixBase<Derived>::AdjointReturnType
+MatrixBase<Derived>::adjoint() const
+{
+ return this->transpose(); // in the complex case, the .conjugate() is be implicit here
+ // due to implicit conversion to return type
+}
+
+/***************************************************************************
+* "in place" transpose implementation
+***************************************************************************/
+
+namespace internal {
+
+template<typename MatrixType,
+ bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic>
+struct inplace_transpose_selector;
+
+template<typename MatrixType>
+struct inplace_transpose_selector<MatrixType,true> { // square matrix
+ static void run(MatrixType& m) {
+ m.template triangularView<StrictlyUpper>().swap(m.transpose());
+ }
+};
+
+template<typename MatrixType>
+struct inplace_transpose_selector<MatrixType,false> { // non square matrix
+ static void run(MatrixType& m) {
+ if (m.rows()==m.cols())
+ m.template triangularView<StrictlyUpper>().swap(m.transpose());
+ else
+ m = m.transpose().eval();
+ }
+};
+
+} // end namespace internal
+
+/** This is the "in place" version of transpose(): it replaces \c *this by its own transpose.
+ * Thus, doing
+ * \code
+ * m.transposeInPlace();
+ * \endcode
+ * has the same effect on m as doing
+ * \code
+ * m = m.transpose().eval();
+ * \endcode
+ * and is faster and also safer because in the latter line of code, forgetting the eval() results
+ * in a bug caused by aliasing.
+ *
+ * Notice however that this method is only useful if you want to replace a matrix by its own transpose.
+ * If you just need the transpose of a matrix, use transpose().
+ *
+ * \note if the matrix is not square, then \c *this must be a resizable matrix.
+ *
+ * \sa transpose(), adjoint(), adjointInPlace() */
+template<typename Derived>
+inline void DenseBase<Derived>::transposeInPlace()
+{
+ internal::inplace_transpose_selector<Derived>::run(derived());
+}
+
+/***************************************************************************
+* "in place" adjoint implementation
+***************************************************************************/
+
+/** This is the "in place" version of adjoint(): it replaces \c *this by its own transpose.
+ * Thus, doing
+ * \code
+ * m.adjointInPlace();
+ * \endcode
+ * has the same effect on m as doing
+ * \code
+ * m = m.adjoint().eval();
+ * \endcode
+ * and is faster and also safer because in the latter line of code, forgetting the eval() results
+ * in a bug caused by aliasing.
+ *
+ * Notice however that this method is only useful if you want to replace a matrix by its own adjoint.
+ * If you just need the adjoint of a matrix, use adjoint().
+ *
+ * \note if the matrix is not square, then \c *this must be a resizable matrix.
+ *
+ * \sa transpose(), adjoint(), transposeInPlace() */
+template<typename Derived>
+inline void MatrixBase<Derived>::adjointInPlace()
+{
+ derived() = adjoint().eval();
+}
+
+#ifndef EIGEN_NO_DEBUG
+
+// The following is to detect aliasing problems in most common cases.
+
+namespace internal {
+
+template<typename BinOp,typename NestedXpr,typename Rhs>
+struct blas_traits<SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> >
+ : blas_traits<NestedXpr>
+{
+ typedef SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> XprType;
+ static inline const XprType extract(const XprType& x) { return x; }
+};
+
+template<bool DestIsTransposed, typename OtherDerived>
+struct check_transpose_aliasing_compile_time_selector
+{
+ enum { ret = bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed };
+};
+
+template<bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
+struct check_transpose_aliasing_compile_time_selector<DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
+{
+ enum { ret = bool(blas_traits<DerivedA>::IsTransposed) != DestIsTransposed
+ || bool(blas_traits<DerivedB>::IsTransposed) != DestIsTransposed
+ };
+};
+
+template<typename Scalar, bool DestIsTransposed, typename OtherDerived>
+struct check_transpose_aliasing_run_time_selector
+{
+ static bool run(const Scalar* dest, const OtherDerived& src)
+ {
+ return (bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src));
+ }
+};
+
+template<typename Scalar, bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
+struct check_transpose_aliasing_run_time_selector<Scalar,DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
+{
+ static bool run(const Scalar* dest, const CwiseBinaryOp<BinOp,DerivedA,DerivedB>& src)
+ {
+ return ((blas_traits<DerivedA>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src.lhs())))
+ || ((blas_traits<DerivedB>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(Scalar*)extract_data(src.rhs())));
+ }
+};
+
+// the following selector, checkTransposeAliasing_impl, based on MightHaveTransposeAliasing,
+// is because when the condition controlling the assert is known at compile time, ICC emits a warning.
+// This is actually a good warning: in expressions that don't have any transposing, the condition is
+// known at compile time to be false, and using that, we can avoid generating the code of the assert again
+// and again for all these expressions that don't need it.
+
+template<typename Derived, typename OtherDerived,
+ bool MightHaveTransposeAliasing
+ = check_transpose_aliasing_compile_time_selector
+ <blas_traits<Derived>::IsTransposed,OtherDerived>::ret
+ >
+struct checkTransposeAliasing_impl
+{
+ static void run(const Derived& dst, const OtherDerived& other)
+ {
+ eigen_assert((!check_transpose_aliasing_run_time_selector
+ <typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived>
+ ::run(extract_data(dst), other))
+ && "aliasing detected during tranposition, use transposeInPlace() "
+ "or evaluate the rhs into a temporary using .eval()");
+
+ }
+};
+
+template<typename Derived, typename OtherDerived>
+struct checkTransposeAliasing_impl<Derived, OtherDerived, false>
+{
+ static void run(const Derived&, const OtherDerived&)
+ {
+ }
+};
+
+} // end namespace internal
+
+template<typename Derived>
+template<typename OtherDerived>
+void DenseBase<Derived>::checkTransposeAliasing(const OtherDerived& other) const
+{
+ internal::checkTransposeAliasing_impl<Derived, OtherDerived>::run(derived(), other);
+}
+#endif
+
+#endif // EIGEN_TRANSPOSE_H
diff --git a/extern/Eigen3/Eigen/src/Core/Transpositions.h b/extern/Eigen3/Eigen/src/Core/Transpositions.h
new file mode 100644
index 00000000000..88fdfb2226f
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/Transpositions.h
@@ -0,0 +1,447 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRANSPOSITIONS_H
+#define EIGEN_TRANSPOSITIONS_H
+
+/** \class Transpositions
+ * \ingroup Core_Module
+ *
+ * \brief Represents a sequence of transpositions (row/column interchange)
+ *
+ * \param SizeAtCompileTime the number of transpositions, or Dynamic
+ * \param MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
+ *
+ * This class represents a permutation transformation as a sequence of \em n transpositions
+ * \f$[T_{n-1} \ldots T_{i} \ldots T_{0}]\f$. It is internally stored as a vector of integers \c indices.
+ * Each transposition \f$ T_{i} \f$ applied on the left of a matrix (\f$ T_{i} M\f$) interchanges
+ * the rows \c i and \c indices[i] of the matrix \c M.
+ * A transposition applied on the right (e.g., \f$ M T_{i}\f$) yields a column interchange.
+ *
+ * Compared to the class PermutationMatrix, such a sequence of transpositions is what is
+ * computed during a decomposition with pivoting, and it is faster when applying the permutation in-place.
+ *
+ * To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example:
+ * \code
+ * Transpositions tr;
+ * MatrixXf mat;
+ * mat = tr * mat;
+ * \endcode
+ * In this example, we detect that the matrix appears on both side, and so the transpositions
+ * are applied in-place without any temporary or extra copy.
+ *
+ * \sa class PermutationMatrix
+ */
+
+namespace internal {
+template<typename TranspositionType, typename MatrixType, int Side, bool Transposed=false> struct transposition_matrix_product_retval;
+}
+
+template<typename Derived>
+class TranspositionsBase
+{
+ typedef internal::traits<Derived> Traits;
+
+ public:
+
+ typedef typename Traits::IndicesType IndicesType;
+ typedef typename IndicesType::Scalar Index;
+
+ Derived& derived() { return *static_cast<Derived*>(this); }
+ const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+ /** Copies the \a other transpositions into \c *this */
+ template<typename OtherDerived>
+ Derived& operator=(const TranspositionsBase<OtherDerived>& other)
+ {
+ indices() = other.indices();
+ return derived();
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is a special case of the templated operator=. Its purpose is to
+ * prevent a default operator= from hiding the templated operator=.
+ */
+ Derived& operator=(const TranspositionsBase& other)
+ {
+ indices() = other.indices();
+ return derived();
+ }
+ #endif
+
+ /** \returns the number of transpositions */
+ inline Index size() const { return indices().size(); }
+
+ /** Direct access to the underlying index vector */
+ inline const Index& coeff(Index i) const { return indices().coeff(i); }
+ /** Direct access to the underlying index vector */
+ inline Index& coeffRef(Index i) { return indices().coeffRef(i); }
+ /** Direct access to the underlying index vector */
+ inline const Index& operator()(Index i) const { return indices()(i); }
+ /** Direct access to the underlying index vector */
+ inline Index& operator()(Index i) { return indices()(i); }
+ /** Direct access to the underlying index vector */
+ inline const Index& operator[](Index i) const { return indices()(i); }
+ /** Direct access to the underlying index vector */
+ inline Index& operator[](Index i) { return indices()(i); }
+
+ /** const version of indices(). */
+ const IndicesType& indices() const { return derived().indices(); }
+ /** \returns a reference to the stored array representing the transpositions. */
+ IndicesType& indices() { return derived().indices(); }
+
+ /** Resizes to given size. */
+ inline void resize(int size)
+ {
+ indices().resize(size);
+ }
+
+ /** Sets \c *this to represents an identity transformation */
+ void setIdentity()
+ {
+ for(int i = 0; i < indices().size(); ++i)
+ coeffRef(i) = i;
+ }
+
+ // FIXME: do we want such methods ?
+ // might be usefull when the target matrix expression is complex, e.g.:
+ // object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..);
+ /*
+ template<typename MatrixType>
+ void applyForwardToRows(MatrixType& mat) const
+ {
+ for(Index k=0 ; k<size() ; ++k)
+ if(m_indices(k)!=k)
+ mat.row(k).swap(mat.row(m_indices(k)));
+ }
+
+ template<typename MatrixType>
+ void applyBackwardToRows(MatrixType& mat) const
+ {
+ for(Index k=size()-1 ; k>=0 ; --k)
+ if(m_indices(k)!=k)
+ mat.row(k).swap(mat.row(m_indices(k)));
+ }
+ */
+
+ /** \returns the inverse transformation */
+ inline Transpose<TranspositionsBase> inverse() const
+ { return Transpose<TranspositionsBase>(derived()); }
+
+ /** \returns the tranpose transformation */
+ inline Transpose<TranspositionsBase> transpose() const
+ { return Transpose<TranspositionsBase>(derived()); }
+
+ protected:
+};
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+struct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
+{
+ typedef IndexType Index;
+ typedef Matrix<Index, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
+{
+ typedef internal::traits<Transpositions> Traits;
+ public:
+
+ typedef TranspositionsBase<Transpositions> Base;
+ typedef typename Traits::IndicesType IndicesType;
+ typedef typename IndicesType::Scalar Index;
+
+ inline Transpositions() {}
+
+ /** Copy constructor. */
+ template<typename OtherDerived>
+ inline Transpositions(const TranspositionsBase<OtherDerived>& other)
+ : m_indices(other.indices()) {}
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** Standard copy constructor. Defined only to prevent a default copy constructor
+ * from hiding the other templated constructor */
+ inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {}
+ #endif
+
+ /** Generic constructor from expression of the transposition indices. */
+ template<typename Other>
+ explicit inline Transpositions(const MatrixBase<Other>& indices) : m_indices(indices)
+ {}
+
+ /** Copies the \a other transpositions into \c *this */
+ template<typename OtherDerived>
+ Transpositions& operator=(const TranspositionsBase<OtherDerived>& other)
+ {
+ return Base::operator=(other);
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is a special case of the templated operator=. Its purpose is to
+ * prevent a default operator= from hiding the templated operator=.
+ */
+ Transpositions& operator=(const Transpositions& other)
+ {
+ m_indices = other.m_indices;
+ return *this;
+ }
+ #endif
+
+ /** Constructs an uninitialized permutation matrix of given size.
+ */
+ inline Transpositions(Index size) : m_indices(size)
+ {}
+
+ /** const version of indices(). */
+ const IndicesType& indices() const { return m_indices; }
+ /** \returns a reference to the stored array representing the transpositions. */
+ IndicesType& indices() { return m_indices; }
+
+ protected:
+
+ IndicesType m_indices;
+};
+
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
+struct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,_PacketAccess> >
+{
+ typedef IndexType Index;
+ typedef Map<const Matrix<Index,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int PacketAccess>
+class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess>
+ : public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess> >
+{
+ typedef internal::traits<Map> Traits;
+ public:
+
+ typedef TranspositionsBase<Map> Base;
+ typedef typename Traits::IndicesType IndicesType;
+ typedef typename IndicesType::Scalar Index;
+
+ inline Map(const Index* indices)
+ : m_indices(indices)
+ {}
+
+ inline Map(const Index* indices, Index size)
+ : m_indices(indices,size)
+ {}
+
+ /** Copies the \a other transpositions into \c *this */
+ template<typename OtherDerived>
+ Map& operator=(const TranspositionsBase<OtherDerived>& other)
+ {
+ return Base::operator=(other);
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is a special case of the templated operator=. Its purpose is to
+ * prevent a default operator= from hiding the templated operator=.
+ */
+ Map& operator=(const Map& other)
+ {
+ m_indices = other.m_indices;
+ return *this;
+ }
+ #endif
+
+ /** const version of indices(). */
+ const IndicesType& indices() const { return m_indices; }
+
+ /** \returns a reference to the stored array representing the transpositions. */
+ IndicesType& indices() { return m_indices; }
+
+ protected:
+
+ IndicesType m_indices;
+};
+
+namespace internal {
+template<typename _IndicesType>
+struct traits<TranspositionsWrapper<_IndicesType> >
+{
+ typedef typename _IndicesType::Scalar Index;
+ typedef _IndicesType IndicesType;
+};
+}
+
+template<typename _IndicesType>
+class TranspositionsWrapper
+ : public TranspositionsBase<TranspositionsWrapper<_IndicesType> >
+{
+ typedef internal::traits<TranspositionsWrapper> Traits;
+ public:
+
+ typedef TranspositionsBase<TranspositionsWrapper> Base;
+ typedef typename Traits::IndicesType IndicesType;
+ typedef typename IndicesType::Scalar Index;
+
+ inline TranspositionsWrapper(IndicesType& indices)
+ : m_indices(indices)
+ {}
+
+ /** Copies the \a other transpositions into \c *this */
+ template<typename OtherDerived>
+ TranspositionsWrapper& operator=(const TranspositionsBase<OtherDerived>& other)
+ {
+ return Base::operator=(other);
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is a special case of the templated operator=. Its purpose is to
+ * prevent a default operator= from hiding the templated operator=.
+ */
+ TranspositionsWrapper& operator=(const TranspositionsWrapper& other)
+ {
+ m_indices = other.m_indices;
+ return *this;
+ }
+ #endif
+
+ /** const version of indices(). */
+ const IndicesType& indices() const { return m_indices; }
+
+ /** \returns a reference to the stored array representing the transpositions. */
+ IndicesType& indices() { return m_indices; }
+
+ protected:
+
+ const typename IndicesType::Nested m_indices;
+};
+
+/** \returns the \a matrix with the \a transpositions applied to the columns.
+ */
+template<typename Derived, typename TranspositionsDerived>
+inline const internal::transposition_matrix_product_retval<TranspositionsDerived, Derived, OnTheRight>
+operator*(const MatrixBase<Derived>& matrix,
+ const TranspositionsBase<TranspositionsDerived> &transpositions)
+{
+ return internal::transposition_matrix_product_retval
+ <TranspositionsDerived, Derived, OnTheRight>
+ (transpositions.derived(), matrix.derived());
+}
+
+/** \returns the \a matrix with the \a transpositions applied to the rows.
+ */
+template<typename Derived, typename TranspositionDerived>
+inline const internal::transposition_matrix_product_retval
+ <TranspositionDerived, Derived, OnTheLeft>
+operator*(const TranspositionsBase<TranspositionDerived> &transpositions,
+ const MatrixBase<Derived>& matrix)
+{
+ return internal::transposition_matrix_product_retval
+ <TranspositionDerived, Derived, OnTheLeft>
+ (transpositions.derived(), matrix.derived());
+}
+
+namespace internal {
+
+template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
+struct traits<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
+{
+ typedef typename MatrixType::PlainObject ReturnType;
+};
+
+template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
+struct transposition_matrix_product_retval
+ : public ReturnByValue<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
+{
+ typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+ typedef typename TranspositionType::Index Index;
+
+ transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix)
+ : m_transpositions(tr), m_matrix(matrix)
+ {}
+
+ inline int rows() const { return m_matrix.rows(); }
+ inline int cols() const { return m_matrix.cols(); }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ const int size = m_transpositions.size();
+ Index j = 0;
+
+ if(!(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix)))
+ dst = m_matrix;
+
+ for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)
+ if((j=m_transpositions.coeff(k))!=k)
+ {
+ if(Side==OnTheLeft)
+ dst.row(k).swap(dst.row(j));
+ else if(Side==OnTheRight)
+ dst.col(k).swap(dst.col(j));
+ }
+ }
+
+ protected:
+ const TranspositionType& m_transpositions;
+ const typename MatrixType::Nested m_matrix;
+};
+
+} // end namespace internal
+
+/* Template partial specialization for transposed/inverse transpositions */
+
+template<typename TranspositionsDerived>
+class Transpose<TranspositionsBase<TranspositionsDerived> >
+{
+ typedef TranspositionsDerived TranspositionType;
+ typedef typename TranspositionType::IndicesType IndicesType;
+ public:
+
+ Transpose(const TranspositionType& t) : m_transpositions(t) {}
+
+ inline int size() const { return m_transpositions.size(); }
+
+ /** \returns the \a matrix with the inverse transpositions applied to the columns.
+ */
+ template<typename Derived> friend
+ inline const internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>
+ operator*(const MatrixBase<Derived>& matrix, const Transpose& trt)
+ {
+ return internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>(trt.m_transpositions, matrix.derived());
+ }
+
+ /** \returns the \a matrix with the inverse transpositions applied to the rows.
+ */
+ template<typename Derived>
+ inline const internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>
+ operator*(const MatrixBase<Derived>& matrix) const
+ {
+ return internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>(m_transpositions, matrix.derived());
+ }
+
+ protected:
+ const TranspositionType& m_transpositions;
+};
+
+#endif // EIGEN_TRANSPOSITIONS_H
diff --git a/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h b/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h
new file mode 100644
index 00000000000..033e81036f3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h
@@ -0,0 +1,838 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRIANGULARMATRIX_H
+#define EIGEN_TRIANGULARMATRIX_H
+
+namespace internal {
+
+template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval;
+
+}
+
+/** \internal
+ *
+ * \class TriangularBase
+ * \ingroup Core_Module
+ *
+ * \brief Base class for triangular part in a matrix
+ */
+template<typename Derived> class TriangularBase : public EigenBase<Derived>
+{
+ public:
+
+ enum {
+ Mode = internal::traits<Derived>::Mode,
+ CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime
+ };
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::traits<Derived>::DenseMatrixType DenseMatrixType;
+ typedef DenseMatrixType DenseType;
+
+ inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }
+
+ inline Index rows() const { return derived().rows(); }
+ inline Index cols() const { return derived().cols(); }
+ inline Index outerStride() const { return derived().outerStride(); }
+ inline Index innerStride() const { return derived().innerStride(); }
+
+ inline Scalar coeff(Index row, Index col) const { return derived().coeff(row,col); }
+ inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); }
+
+ /** \see MatrixBase::copyCoeff(row,col)
+ */
+ template<typename Other>
+ EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other)
+ {
+ derived().coeffRef(row, col) = other.coeff(row, col);
+ }
+
+ inline Scalar operator()(Index row, Index col) const
+ {
+ check_coordinates(row, col);
+ return coeff(row,col);
+ }
+ inline Scalar& operator()(Index row, Index col)
+ {
+ check_coordinates(row, col);
+ return coeffRef(row,col);
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+ inline Derived& derived() { return *static_cast<Derived*>(this); }
+ #endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived> &other) const;
+ template<typename DenseDerived>
+ void evalToLazy(MatrixBase<DenseDerived> &other) const;
+
+ DenseMatrixType toDenseMatrix() const
+ {
+ DenseMatrixType res(rows(), cols());
+ evalToLazy(res);
+ return res;
+ }
+
+ protected:
+
+ void check_coordinates(Index row, Index col) const
+ {
+ EIGEN_ONLY_USED_FOR_DEBUG(row);
+ EIGEN_ONLY_USED_FOR_DEBUG(col);
+ eigen_assert(col>=0 && col<cols() && row>=0 && row<rows());
+ const int mode = int(Mode) & ~SelfAdjoint;
+ EIGEN_ONLY_USED_FOR_DEBUG(mode);
+ eigen_assert((mode==Upper && col>=row)
+ || (mode==Lower && col<=row)
+ || ((mode==StrictlyUpper || mode==UnitUpper) && col>row)
+ || ((mode==StrictlyLower || mode==UnitLower) && col<row));
+ }
+
+ #ifdef EIGEN_INTERNAL_DEBUGGING
+ void check_coordinates_internal(Index row, Index col) const
+ {
+ check_coordinates(row, col);
+ }
+ #else
+ void check_coordinates_internal(Index , Index ) const {}
+ #endif
+
+};
+
+/** \class TriangularView
+ * \ingroup Core_Module
+ *
+ * \brief Base class for triangular part in a matrix
+ *
+ * \param MatrixType the type of the object in which we are taking the triangular part
+ * \param Mode the kind of triangular matrix expression to construct. Can be #Upper,
+ * #Lower, #UnitUpper, #UnitLower, #StrictlyUpper, or #StrictlyLower.
+ * This is in fact a bit field; it must have either #Upper or #Lower,
+ * and additionnaly it may have #UnitDiag or #ZeroDiag or neither.
+ *
+ * This class represents a triangular part of a matrix, not necessarily square. Strictly speaking, for rectangular
+ * matrices one should speak of "trapezoid" parts. This class is the return type
+ * of MatrixBase::triangularView() and most of the time this is the only way it is used.
+ *
+ * \sa MatrixBase::triangularView()
+ */
+namespace internal {
+template<typename MatrixType, unsigned int _Mode>
+struct traits<TriangularView<MatrixType, _Mode> > : traits<MatrixType>
+{
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
+ typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
+ typedef MatrixType ExpressionType;
+ typedef typename MatrixType::PlainObject DenseMatrixType;
+ enum {
+ Mode = _Mode,
+ Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) | Mode,
+ CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost
+ };
+};
+}
+
+template<int Mode, bool LhsIsTriangular,
+ typename Lhs, bool LhsIsVector,
+ typename Rhs, bool RhsIsVector>
+struct TriangularProduct;
+
+template<typename _MatrixType, unsigned int _Mode> class TriangularView
+ : public TriangularBase<TriangularView<_MatrixType, _Mode> >
+{
+ public:
+
+ typedef TriangularBase<TriangularView> Base;
+ typedef typename internal::traits<TriangularView>::Scalar Scalar;
+
+ typedef _MatrixType MatrixType;
+ typedef typename internal::traits<TriangularView>::DenseMatrixType DenseMatrixType;
+ typedef DenseMatrixType PlainObject;
+
+ protected:
+ typedef typename internal::traits<TriangularView>::MatrixTypeNested MatrixTypeNested;
+ typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef;
+ typedef typename internal::traits<TriangularView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned;
+
+ typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
+
+ public:
+ using Base::evalToLazy;
+
+
+ typedef typename internal::traits<TriangularView>::StorageKind StorageKind;
+ typedef typename internal::traits<TriangularView>::Index Index;
+
+ enum {
+ Mode = _Mode,
+ TransposeMode = (Mode & Upper ? Lower : 0)
+ | (Mode & Lower ? Upper : 0)
+ | (Mode & (UnitDiag))
+ | (Mode & (ZeroDiag))
+ };
+
+ inline TriangularView(const MatrixType& matrix) : m_matrix(matrix)
+ {}
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+ inline Index outerStride() const { return m_matrix.outerStride(); }
+ inline Index innerStride() const { return m_matrix.innerStride(); }
+
+ /** \sa MatrixBase::operator+=() */
+ template<typename Other> TriangularView& operator+=(const DenseBase<Other>& other) { return *this = m_matrix + other.derived(); }
+ /** \sa MatrixBase::operator-=() */
+ template<typename Other> TriangularView& operator-=(const DenseBase<Other>& other) { return *this = m_matrix - other.derived(); }
+ /** \sa MatrixBase::operator*=() */
+ TriangularView& operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = m_matrix * other; }
+ /** \sa MatrixBase::operator/=() */
+ TriangularView& operator/=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = m_matrix / other; }
+
+ /** \sa MatrixBase::fill() */
+ void fill(const Scalar& value) { setConstant(value); }
+ /** \sa MatrixBase::setConstant() */
+ TriangularView& setConstant(const Scalar& value)
+ { return *this = MatrixType::Constant(rows(), cols(), value); }
+ /** \sa MatrixBase::setZero() */
+ TriangularView& setZero() { return setConstant(Scalar(0)); }
+ /** \sa MatrixBase::setOnes() */
+ TriangularView& setOnes() { return setConstant(Scalar(1)); }
+
+ /** \sa MatrixBase::coeff()
+ * \warning the coordinates must fit into the referenced triangular part
+ */
+ inline Scalar coeff(Index row, Index col) const
+ {
+ Base::check_coordinates_internal(row, col);
+ return m_matrix.coeff(row, col);
+ }
+
+ /** \sa MatrixBase::coeffRef()
+ * \warning the coordinates must fit into the referenced triangular part
+ */
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ Base::check_coordinates_internal(row, col);
+ return m_matrix.const_cast_derived().coeffRef(row, col);
+ }
+
+ const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
+ MatrixTypeNestedCleaned& nestedExpression() { return *const_cast<MatrixTypeNestedCleaned*>(&m_matrix); }
+
+ /** Assigns a triangular matrix to a triangular part of a dense matrix */
+ template<typename OtherDerived>
+ TriangularView& operator=(const TriangularBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ TriangularView& operator=(const MatrixBase<OtherDerived>& other);
+
+ TriangularView& operator=(const TriangularView& other)
+ { return *this = other.nestedExpression(); }
+
+ template<typename OtherDerived>
+ void lazyAssign(const TriangularBase<OtherDerived>& other);
+
+ template<typename OtherDerived>
+ void lazyAssign(const MatrixBase<OtherDerived>& other);
+
+ /** \sa MatrixBase::conjugate() */
+ inline TriangularView<MatrixConjugateReturnType,Mode> conjugate()
+ { return m_matrix.conjugate(); }
+ /** \sa MatrixBase::conjugate() const */
+ inline const TriangularView<MatrixConjugateReturnType,Mode> conjugate() const
+ { return m_matrix.conjugate(); }
+
+ /** \sa MatrixBase::adjoint() */
+ inline TriangularView<typename MatrixType::AdjointReturnType,TransposeMode> adjoint()
+ { return m_matrix.adjoint(); }
+ /** \sa MatrixBase::adjoint() const */
+ inline const TriangularView<typename MatrixType::AdjointReturnType,TransposeMode> adjoint() const
+ { return m_matrix.adjoint(); }
+
+ /** \sa MatrixBase::transpose() */
+ inline TriangularView<Transpose<MatrixType>,TransposeMode> transpose()
+ {
+ EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+ return m_matrix.const_cast_derived().transpose();
+ }
+ /** \sa MatrixBase::transpose() const */
+ inline const TriangularView<Transpose<MatrixType>,TransposeMode> transpose() const
+ { return m_matrix.transpose(); }
+
+ /** Efficient triangular matrix times vector/matrix product */
+ template<typename OtherDerived>
+ TriangularProduct<Mode,true,MatrixType,false,OtherDerived,OtherDerived::IsVectorAtCompileTime>
+ operator*(const MatrixBase<OtherDerived>& rhs) const
+ {
+ return TriangularProduct
+ <Mode,true,MatrixType,false,OtherDerived,OtherDerived::IsVectorAtCompileTime>
+ (m_matrix, rhs.derived());
+ }
+
+ /** Efficient vector/matrix times triangular matrix product */
+ template<typename OtherDerived> friend
+ TriangularProduct<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
+ operator*(const MatrixBase<OtherDerived>& lhs, const TriangularView& rhs)
+ {
+ return TriangularProduct
+ <Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
+ (lhs.derived(),rhs.m_matrix);
+ }
+
+ #ifdef EIGEN2_SUPPORT
+ template<typename OtherDerived>
+ struct eigen2_product_return_type
+ {
+ typedef typename TriangularView<MatrixType,Mode>::DenseMatrixType DenseMatrixType;
+ typedef typename OtherDerived::PlainObject::DenseType OtherPlainObject;
+ typedef typename ProductReturnType<DenseMatrixType, OtherPlainObject>::Type ProdRetType;
+ typedef typename ProdRetType::PlainObject type;
+ };
+ template<typename OtherDerived>
+ const typename eigen2_product_return_type<OtherDerived>::type
+ operator*(const EigenBase<OtherDerived>& rhs) const
+ {
+ typename OtherDerived::PlainObject::DenseType rhsPlainObject;
+ rhs.evalTo(rhsPlainObject);
+ return this->toDenseMatrix() * rhsPlainObject;
+ }
+ template<typename OtherMatrixType>
+ bool isApprox(const TriangularView<OtherMatrixType, Mode>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+ {
+ return this->toDenseMatrix().isApprox(other.toDenseMatrix(), precision);
+ }
+ template<typename OtherDerived>
+ bool isApprox(const MatrixBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+ {
+ return this->toDenseMatrix().isApprox(other, precision);
+ }
+ #endif // EIGEN2_SUPPORT
+
+ template<int Side, typename Other>
+ inline const internal::triangular_solve_retval<Side,TriangularView, Other>
+ solve(const MatrixBase<Other>& other) const;
+
+ template<int Side, typename OtherDerived>
+ void solveInPlace(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename Other>
+ inline const internal::triangular_solve_retval<OnTheLeft,TriangularView, Other>
+ solve(const MatrixBase<Other>& other) const
+ { return solve<OnTheLeft>(other); }
+
+ template<typename OtherDerived>
+ void solveInPlace(const MatrixBase<OtherDerived>& other) const
+ { return solveInPlace<OnTheLeft>(other); }
+
+ const SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView() const
+ {
+ EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR);
+ return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
+ }
+ SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView()
+ {
+ EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR);
+ return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
+ }
+
+ template<typename OtherDerived>
+ void swap(TriangularBase<OtherDerived> const & other)
+ {
+ TriangularView<SwapWrapper<MatrixType>,Mode>(const_cast<MatrixType&>(m_matrix)).lazyAssign(other.derived());
+ }
+
+ template<typename OtherDerived>
+ void swap(MatrixBase<OtherDerived> const & other)
+ {
+ TriangularView<SwapWrapper<MatrixType>,Mode>(const_cast<MatrixType&>(m_matrix)).lazyAssign(other.derived());
+ }
+
+ Scalar determinant() const
+ {
+ if (Mode & UnitDiag)
+ return 1;
+ else if (Mode & ZeroDiag)
+ return 0;
+ else
+ return m_matrix.diagonal().prod();
+ }
+
+ // TODO simplify the following:
+ template<typename ProductDerived, typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+ {
+ setZero();
+ return assignProduct(other,1);
+ }
+
+ template<typename ProductDerived, typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+ {
+ return assignProduct(other,1);
+ }
+
+ template<typename ProductDerived, typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+ {
+ return assignProduct(other,-1);
+ }
+
+
+ template<typename ProductDerived>
+ EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct<ProductDerived>& other)
+ {
+ setZero();
+ return assignProduct(other,other.alpha());
+ }
+
+ template<typename ProductDerived>
+ EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct<ProductDerived>& other)
+ {
+ return assignProduct(other,other.alpha());
+ }
+
+ template<typename ProductDerived>
+ EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct<ProductDerived>& other)
+ {
+ return assignProduct(other,-other.alpha());
+ }
+
+ protected:
+
+ template<typename ProductDerived, typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase<ProductDerived, Lhs,Rhs>& prod, const Scalar& alpha);
+
+ const MatrixTypeNested m_matrix;
+};
+
+/***************************************************************************
+* Implementation of triangular evaluation/assignment
+***************************************************************************/
+
+namespace internal {
+
+template<typename Derived1, typename Derived2, unsigned int Mode, int UnrollCount, bool ClearOpposite>
+struct triangular_assignment_selector
+{
+ enum {
+ col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
+ row = (UnrollCount-1) % Derived1::RowsAtCompileTime
+ };
+
+ typedef typename Derived1::Scalar Scalar;
+
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ triangular_assignment_selector<Derived1, Derived2, Mode, UnrollCount-1, ClearOpposite>::run(dst, src);
+
+ eigen_assert( Mode == Upper || Mode == Lower
+ || Mode == StrictlyUpper || Mode == StrictlyLower
+ || Mode == UnitUpper || Mode == UnitLower);
+ if((Mode == Upper && row <= col)
+ || (Mode == Lower && row >= col)
+ || (Mode == StrictlyUpper && row < col)
+ || (Mode == StrictlyLower && row > col)
+ || (Mode == UnitUpper && row < col)
+ || (Mode == UnitLower && row > col))
+ dst.copyCoeff(row, col, src);
+ else if(ClearOpposite)
+ {
+ if (Mode&UnitDiag && row==col)
+ dst.coeffRef(row, col) = Scalar(1);
+ else
+ dst.coeffRef(row, col) = Scalar(0);
+ }
+ }
+};
+
+// prevent buggy user code from causing an infinite recursion
+template<typename Derived1, typename Derived2, unsigned int Mode, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, Mode, 0, ClearOpposite>
+{
+ inline static void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearOpposite>
+{
+ typedef typename Derived1::Index Index;
+ typedef typename Derived1::Scalar Scalar;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ for(Index j = 0; j < dst.cols(); ++j)
+ {
+ Index maxi = (std::min)(j, dst.rows()-1);
+ for(Index i = 0; i <= maxi; ++i)
+ dst.copyCoeff(i, j, src);
+ if (ClearOpposite)
+ for(Index i = maxi+1; i < dst.rows(); ++i)
+ dst.coeffRef(i, j) = Scalar(0);
+ }
+ }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearOpposite>
+{
+ typedef typename Derived1::Index Index;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ for(Index j = 0; j < dst.cols(); ++j)
+ {
+ for(Index i = j; i < dst.rows(); ++i)
+ dst.copyCoeff(i, j, src);
+ Index maxi = (std::min)(j, dst.rows());
+ if (ClearOpposite)
+ for(Index i = 0; i < maxi; ++i)
+ dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
+ }
+ }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic, ClearOpposite>
+{
+ typedef typename Derived1::Index Index;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ for(Index j = 0; j < dst.cols(); ++j)
+ {
+ Index maxi = (std::min)(j, dst.rows());
+ for(Index i = 0; i < maxi; ++i)
+ dst.copyCoeff(i, j, src);
+ if (ClearOpposite)
+ for(Index i = maxi; i < dst.rows(); ++i)
+ dst.coeffRef(i, j) = 0;
+ }
+ }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic, ClearOpposite>
+{
+ typedef typename Derived1::Index Index;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ for(Index j = 0; j < dst.cols(); ++j)
+ {
+ for(Index i = j+1; i < dst.rows(); ++i)
+ dst.copyCoeff(i, j, src);
+ Index maxi = (std::min)(j, dst.rows()-1);
+ if (ClearOpposite)
+ for(Index i = 0; i <= maxi; ++i)
+ dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
+ }
+ }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, ClearOpposite>
+{
+ typedef typename Derived1::Index Index;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ for(Index j = 0; j < dst.cols(); ++j)
+ {
+ Index maxi = (std::min)(j, dst.rows());
+ for(Index i = 0; i < maxi; ++i)
+ dst.copyCoeff(i, j, src);
+ if (ClearOpposite)
+ {
+ for(Index i = maxi+1; i < dst.rows(); ++i)
+ dst.coeffRef(i, j) = 0;
+ }
+ }
+ dst.diagonal().setOnes();
+ }
+};
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, ClearOpposite>
+{
+ typedef typename Derived1::Index Index;
+ inline static void run(Derived1 &dst, const Derived2 &src)
+ {
+ for(Index j = 0; j < dst.cols(); ++j)
+ {
+ Index maxi = (std::min)(j, dst.rows());
+ for(Index i = maxi+1; i < dst.rows(); ++i)
+ dst.copyCoeff(i, j, src);
+ if (ClearOpposite)
+ {
+ for(Index i = 0; i < maxi; ++i)
+ dst.coeffRef(i, j) = 0;
+ }
+ }
+ dst.diagonal().setOnes();
+ }
+};
+
+} // end namespace internal
+
+// FIXME should we keep that possibility
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+inline TriangularView<MatrixType, Mode>&
+TriangularView<MatrixType, Mode>::operator=(const MatrixBase<OtherDerived>& other)
+{
+ if(OtherDerived::Flags & EvalBeforeAssigningBit)
+ {
+ typename internal::plain_matrix_type<OtherDerived>::type other_evaluated(other.rows(), other.cols());
+ other_evaluated.template triangularView<Mode>().lazyAssign(other.derived());
+ lazyAssign(other_evaluated);
+ }
+ else
+ lazyAssign(other.derived());
+ return *this;
+}
+
+// FIXME should we keep that possibility
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+void TriangularView<MatrixType, Mode>::lazyAssign(const MatrixBase<OtherDerived>& other)
+{
+ enum {
+ unroll = MatrixType::SizeAtCompileTime != Dynamic
+ && internal::traits<OtherDerived>::CoeffReadCost != Dynamic
+ && MatrixType::SizeAtCompileTime*internal::traits<OtherDerived>::CoeffReadCost/2 <= EIGEN_UNROLLING_LIMIT
+ };
+ eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
+
+ internal::triangular_assignment_selector
+ <MatrixType, OtherDerived, int(Mode),
+ unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
+ false // do not change the opposite triangular part
+ >::run(m_matrix.const_cast_derived(), other.derived());
+}
+
+
+
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+inline TriangularView<MatrixType, Mode>&
+TriangularView<MatrixType, Mode>::operator=(const TriangularBase<OtherDerived>& other)
+{
+ eigen_assert(Mode == int(OtherDerived::Mode));
+ if(internal::traits<OtherDerived>::Flags & EvalBeforeAssigningBit)
+ {
+ typename OtherDerived::DenseMatrixType other_evaluated(other.rows(), other.cols());
+ other_evaluated.template triangularView<Mode>().lazyAssign(other.derived().nestedExpression());
+ lazyAssign(other_evaluated);
+ }
+ else
+ lazyAssign(other.derived().nestedExpression());
+ return *this;
+}
+
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+void TriangularView<MatrixType, Mode>::lazyAssign(const TriangularBase<OtherDerived>& other)
+{
+ enum {
+ unroll = MatrixType::SizeAtCompileTime != Dynamic
+ && internal::traits<OtherDerived>::CoeffReadCost != Dynamic
+ && MatrixType::SizeAtCompileTime * internal::traits<OtherDerived>::CoeffReadCost / 2
+ <= EIGEN_UNROLLING_LIMIT
+ };
+ eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
+
+ internal::triangular_assignment_selector
+ <MatrixType, OtherDerived, int(Mode),
+ unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
+ false // preserve the opposite triangular part
+ >::run(m_matrix.const_cast_derived(), other.derived().nestedExpression());
+}
+
+/***************************************************************************
+* Implementation of TriangularBase methods
+***************************************************************************/
+
+/** Assigns a triangular or selfadjoint matrix to a dense matrix.
+ * If the matrix is triangular, the opposite part is set to zero. */
+template<typename Derived>
+template<typename DenseDerived>
+void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
+{
+ if(internal::traits<Derived>::Flags & EvalBeforeAssigningBit)
+ {
+ typename internal::plain_matrix_type<Derived>::type other_evaluated(rows(), cols());
+ evalToLazy(other_evaluated);
+ other.derived().swap(other_evaluated);
+ }
+ else
+ evalToLazy(other.derived());
+}
+
+/** Assigns a triangular or selfadjoint matrix to a dense matrix.
+ * If the matrix is triangular, the opposite part is set to zero. */
+template<typename Derived>
+template<typename DenseDerived>
+void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
+{
+ enum {
+ unroll = DenseDerived::SizeAtCompileTime != Dynamic
+ && internal::traits<Derived>::CoeffReadCost != Dynamic
+ && DenseDerived::SizeAtCompileTime * internal::traits<Derived>::CoeffReadCost / 2
+ <= EIGEN_UNROLLING_LIMIT
+ };
+ other.derived().resize(this->rows(), this->cols());
+
+ internal::triangular_assignment_selector
+ <DenseDerived, typename internal::traits<Derived>::MatrixTypeNestedCleaned, Derived::Mode,
+ unroll ? int(DenseDerived::SizeAtCompileTime) : Dynamic,
+ true // clear the opposite triangular part
+ >::run(other.derived(), derived().nestedExpression());
+}
+
+/***************************************************************************
+* Implementation of TriangularView methods
+***************************************************************************/
+
+/***************************************************************************
+* Implementation of MatrixBase methods
+***************************************************************************/
+
+#ifdef EIGEN2_SUPPORT
+
+// implementation of part<>(), including the SelfAdjoint case.
+
+namespace internal {
+template<typename MatrixType, unsigned int Mode>
+struct eigen2_part_return_type
+{
+ typedef TriangularView<MatrixType, Mode> type;
+};
+
+template<typename MatrixType>
+struct eigen2_part_return_type<MatrixType, SelfAdjoint>
+{
+ typedef SelfAdjointView<MatrixType, Upper> type;
+};
+}
+
+/** \deprecated use MatrixBase::triangularView() */
+template<typename Derived>
+template<unsigned int Mode>
+const typename internal::eigen2_part_return_type<Derived, Mode>::type MatrixBase<Derived>::part() const
+{
+ return derived();
+}
+
+/** \deprecated use MatrixBase::triangularView() */
+template<typename Derived>
+template<unsigned int Mode>
+typename internal::eigen2_part_return_type<Derived, Mode>::type MatrixBase<Derived>::part()
+{
+ return derived();
+}
+#endif
+
+/**
+ * \returns an expression of a triangular view extracted from the current matrix
+ *
+ * The parameter \a Mode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper,
+ * \c #Lower, \c #StrictlyLower, \c #UnitLower.
+ *
+ * Example: \include MatrixBase_extract.cpp
+ * Output: \verbinclude MatrixBase_extract.out
+ *
+ * \sa class TriangularView
+ */
+template<typename Derived>
+template<unsigned int Mode>
+typename MatrixBase<Derived>::template TriangularViewReturnType<Mode>::Type
+MatrixBase<Derived>::triangularView()
+{
+ return derived();
+}
+
+/** This is the const version of MatrixBase::triangularView() */
+template<typename Derived>
+template<unsigned int Mode>
+typename MatrixBase<Derived>::template ConstTriangularViewReturnType<Mode>::Type
+MatrixBase<Derived>::triangularView() const
+{
+ return derived();
+}
+
+/** \returns true if *this is approximately equal to an upper triangular matrix,
+ * within the precision given by \a prec.
+ *
+ * \sa isLowerTriangular()
+ */
+template<typename Derived>
+bool MatrixBase<Derived>::isUpperTriangular(RealScalar prec) const
+{
+ RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
+ for(Index j = 0; j < cols(); ++j)
+ {
+ Index maxi = (std::min)(j, rows()-1);
+ for(Index i = 0; i <= maxi; ++i)
+ {
+ RealScalar absValue = internal::abs(coeff(i,j));
+ if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;
+ }
+ }
+ RealScalar threshold = maxAbsOnUpperPart * prec;
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = j+1; i < rows(); ++i)
+ if(internal::abs(coeff(i, j)) > threshold) return false;
+ return true;
+}
+
+/** \returns true if *this is approximately equal to a lower triangular matrix,
+ * within the precision given by \a prec.
+ *
+ * \sa isUpperTriangular()
+ */
+template<typename Derived>
+bool MatrixBase<Derived>::isLowerTriangular(RealScalar prec) const
+{
+ RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);
+ for(Index j = 0; j < cols(); ++j)
+ for(Index i = j; i < rows(); ++i)
+ {
+ RealScalar absValue = internal::abs(coeff(i,j));
+ if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;
+ }
+ RealScalar threshold = maxAbsOnLowerPart * prec;
+ for(Index j = 1; j < cols(); ++j)
+ {
+ Index maxi = (std::min)(j, rows()-1);
+ for(Index i = 0; i < maxi; ++i)
+ if(internal::abs(coeff(i, j)) > threshold) return false;
+ }
+ return true;
+}
+
+#endif // EIGEN_TRIANGULARMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/VectorBlock.h b/extern/Eigen3/Eigen/src/Core/VectorBlock.h
new file mode 100644
index 00000000000..858e4c7865a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/VectorBlock.h
@@ -0,0 +1,296 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_VECTORBLOCK_H
+#define EIGEN_VECTORBLOCK_H
+
+/** \class VectorBlock
+ * \ingroup Core_Module
+ *
+ * \brief Expression of a fixed-size or dynamic-size sub-vector
+ *
+ * \param VectorType the type of the object in which we are taking a sub-vector
+ * \param Size size of the sub-vector we are taking at compile time (optional)
+ *
+ * This class represents an expression of either a fixed-size or dynamic-size sub-vector.
+ * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and
+ * most of the time this is the only way it is used.
+ *
+ * However, if you want to directly maniputate sub-vector expressions,
+ * for instance if you want to write a function returning such an expression, you
+ * will need to use this class.
+ *
+ * Here is an example illustrating the dynamic case:
+ * \include class_VectorBlock.cpp
+ * Output: \verbinclude class_VectorBlock.out
+ *
+ * \note Even though this expression has dynamic size, in the case where \a VectorType
+ * has fixed size, this expression inherits a fixed maximal size which means that evaluating
+ * it does not cause a dynamic memory allocation.
+ *
+ * Here is an example illustrating the fixed-size case:
+ * \include class_FixedVectorBlock.cpp
+ * Output: \verbinclude class_FixedVectorBlock.out
+ *
+ * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index)
+ */
+
+namespace internal {
+template<typename VectorType, int Size>
+struct traits<VectorBlock<VectorType, Size> >
+ : public traits<Block<VectorType,
+ traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
+ traits<VectorType>::Flags & RowMajorBit ? Size : 1> >
+{
+};
+}
+
+template<typename VectorType, int Size> class VectorBlock
+ : public Block<VectorType,
+ internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
+ internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1>
+{
+ typedef Block<VectorType,
+ internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
+ internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1> Base;
+ enum {
+ IsColVector = !(internal::traits<VectorType>::Flags & RowMajorBit)
+ };
+ public:
+ EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock)
+
+ using Base::operator=;
+
+ /** Dynamic-size constructor
+ */
+ inline VectorBlock(VectorType& vector, Index start, Index size)
+ : Base(vector,
+ IsColVector ? start : 0, IsColVector ? 0 : start,
+ IsColVector ? size : 1, IsColVector ? 1 : size)
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
+ }
+
+ /** Fixed-size constructor
+ */
+ inline VectorBlock(VectorType& vector, Index start)
+ : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
+ }
+};
+
+
+/** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this.
+ *
+ * \only_for_vectors
+ *
+ * \param start the first coefficient in the segment
+ * \param size the number of coefficients in the segment
+ *
+ * Example: \include MatrixBase_segment_int_int.cpp
+ * Output: \verbinclude MatrixBase_segment_int_int.out
+ *
+ * \note Even though the returned expression has dynamic size, in the case
+ * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+ * which means that evaluating it does not cause a dynamic memory allocation.
+ *
+ * \sa class Block, segment(Index)
+ */
+template<typename Derived>
+inline typename DenseBase<Derived>::SegmentReturnType
+DenseBase<Derived>::segment(Index start, Index size)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return SegmentReturnType(derived(), start, size);
+}
+
+/** This is the const version of segment(Index,Index).*/
+template<typename Derived>
+inline typename DenseBase<Derived>::ConstSegmentReturnType
+DenseBase<Derived>::segment(Index start, Index size) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return ConstSegmentReturnType(derived(), start, size);
+}
+
+/** \returns a dynamic-size expression of the first coefficients of *this.
+ *
+ * \only_for_vectors
+ *
+ * \param size the number of coefficients in the block
+ *
+ * Example: \include MatrixBase_start_int.cpp
+ * Output: \verbinclude MatrixBase_start_int.out
+ *
+ * \note Even though the returned expression has dynamic size, in the case
+ * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+ * which means that evaluating it does not cause a dynamic memory allocation.
+ *
+ * \sa class Block, block(Index,Index)
+ */
+template<typename Derived>
+inline typename DenseBase<Derived>::SegmentReturnType
+DenseBase<Derived>::head(Index size)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return SegmentReturnType(derived(), 0, size);
+}
+
+/** This is the const version of head(Index).*/
+template<typename Derived>
+inline typename DenseBase<Derived>::ConstSegmentReturnType
+DenseBase<Derived>::head(Index size) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return ConstSegmentReturnType(derived(), 0, size);
+}
+
+/** \returns a dynamic-size expression of the last coefficients of *this.
+ *
+ * \only_for_vectors
+ *
+ * \param size the number of coefficients in the block
+ *
+ * Example: \include MatrixBase_end_int.cpp
+ * Output: \verbinclude MatrixBase_end_int.out
+ *
+ * \note Even though the returned expression has dynamic size, in the case
+ * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+ * which means that evaluating it does not cause a dynamic memory allocation.
+ *
+ * \sa class Block, block(Index,Index)
+ */
+template<typename Derived>
+inline typename DenseBase<Derived>::SegmentReturnType
+DenseBase<Derived>::tail(Index size)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return SegmentReturnType(derived(), this->size() - size, size);
+}
+
+/** This is the const version of tail(Index).*/
+template<typename Derived>
+inline typename DenseBase<Derived>::ConstSegmentReturnType
+DenseBase<Derived>::tail(Index size) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return ConstSegmentReturnType(derived(), this->size() - size, size);
+}
+
+/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
+ *
+ * \only_for_vectors
+ *
+ * The template parameter \a Size is the number of coefficients in the block
+ *
+ * \param start the index of the first element of the sub-vector
+ *
+ * Example: \include MatrixBase_template_int_segment.cpp
+ * Output: \verbinclude MatrixBase_template_int_segment.out
+ *
+ * \sa class Block
+ */
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::segment(Index start)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename FixedSegmentReturnType<Size>::Type(derived(), start);
+}
+
+/** This is the const version of segment<int>(Index).*/
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::segment(Index start) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename ConstFixedSegmentReturnType<Size>::Type(derived(), start);
+}
+
+/** \returns a fixed-size expression of the first coefficients of *this.
+ *
+ * \only_for_vectors
+ *
+ * The template parameter \a Size is the number of coefficients in the block
+ *
+ * Example: \include MatrixBase_template_int_start.cpp
+ * Output: \verbinclude MatrixBase_template_int_start.out
+ *
+ * \sa class Block
+ */
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::head()
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename FixedSegmentReturnType<Size>::Type(derived(), 0);
+}
+
+/** This is the const version of head<int>().*/
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::head() const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename ConstFixedSegmentReturnType<Size>::Type(derived(), 0);
+}
+
+/** \returns a fixed-size expression of the last coefficients of *this.
+ *
+ * \only_for_vectors
+ *
+ * The template parameter \a Size is the number of coefficients in the block
+ *
+ * Example: \include MatrixBase_template_int_end.cpp
+ * Output: \verbinclude MatrixBase_template_int_end.out
+ *
+ * \sa class Block
+ */
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template FixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::tail()
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename FixedSegmentReturnType<Size>::Type(derived(), size() - Size);
+}
+
+/** This is the const version of tail<int>.*/
+template<typename Derived>
+template<int Size>
+inline typename DenseBase<Derived>::template ConstFixedSegmentReturnType<Size>::Type
+DenseBase<Derived>::tail() const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return typename ConstFixedSegmentReturnType<Size>::Type(derived(), size() - Size);
+}
+
+
+#endif // EIGEN_VECTORBLOCK_H
diff --git a/extern/Eigen3/Eigen/src/Core/VectorwiseOp.h b/extern/Eigen3/Eigen/src/Core/VectorwiseOp.h
new file mode 100644
index 00000000000..20f6881575b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/VectorwiseOp.h
@@ -0,0 +1,557 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PARTIAL_REDUX_H
+#define EIGEN_PARTIAL_REDUX_H
+
+/** \class PartialReduxExpr
+ * \ingroup Core_Module
+ *
+ * \brief Generic expression of a partially reduxed matrix
+ *
+ * \tparam MatrixType the type of the matrix we are applying the redux operation
+ * \tparam MemberOp type of the member functor
+ * \tparam Direction indicates the direction of the redux (#Vertical or #Horizontal)
+ *
+ * This class represents an expression of a partial redux operator of a matrix.
+ * It is the return type of some VectorwiseOp functions,
+ * and most of the time this is the only way it is used.
+ *
+ * \sa class VectorwiseOp
+ */
+
+template< typename MatrixType, typename MemberOp, int Direction>
+class PartialReduxExpr;
+
+namespace internal {
+template<typename MatrixType, typename MemberOp, int Direction>
+struct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
+ : traits<MatrixType>
+{
+ typedef typename MemberOp::result_type Scalar;
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
+ typedef typename traits<MatrixType>::XprKind XprKind;
+ typedef typename MatrixType::Scalar InputScalar;
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+ enum {
+ RowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime,
+ Flags0 = (unsigned int)_MatrixTypeNested::Flags & HereditaryBits,
+ Flags = (Flags0 & ~RowMajorBit) | (RowsAtCompileTime == 1 ? RowMajorBit : 0),
+ TraversalSize = Direction==Vertical ? RowsAtCompileTime : ColsAtCompileTime
+ };
+ #if EIGEN_GNUC_AT_LEAST(3,4)
+ typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
+ #else
+ typedef typename MemberOp::template Cost<InputScalar,TraversalSize> CostOpType;
+ #endif
+ enum {
+ CoeffReadCost = TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value)
+ };
+};
+}
+
+template< typename MatrixType, typename MemberOp, int Direction>
+class PartialReduxExpr : internal::no_assignment_operator,
+ public internal::dense_xpr_base< PartialReduxExpr<MatrixType, MemberOp, Direction> >::type
+{
+ public:
+
+ typedef typename internal::dense_xpr_base<PartialReduxExpr>::type Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(PartialReduxExpr)
+ typedef typename internal::traits<PartialReduxExpr>::MatrixTypeNested MatrixTypeNested;
+ typedef typename internal::traits<PartialReduxExpr>::_MatrixTypeNested _MatrixTypeNested;
+
+ PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp())
+ : m_matrix(mat), m_functor(func) {}
+
+ Index rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); }
+ Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
+
+ EIGEN_STRONG_INLINE const Scalar coeff(Index i, Index j) const
+ {
+ if (Direction==Vertical)
+ return m_functor(m_matrix.col(j));
+ else
+ return m_functor(m_matrix.row(i));
+ }
+
+ const Scalar coeff(Index index) const
+ {
+ if (Direction==Vertical)
+ return m_functor(m_matrix.col(index));
+ else
+ return m_functor(m_matrix.row(index));
+ }
+
+ protected:
+ const MatrixTypeNested m_matrix;
+ const MemberOp m_functor;
+};
+
+#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \
+ template <typename ResultType> \
+ struct member_##MEMBER { \
+ EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER) \
+ typedef ResultType result_type; \
+ template<typename Scalar, int Size> struct Cost \
+ { enum { value = COST }; }; \
+ template<typename XprType> \
+ EIGEN_STRONG_INLINE ResultType operator()(const XprType& mat) const \
+ { return mat.MEMBER(); } \
+ }
+
+namespace internal {
+
+EIGEN_MEMBER_FUNCTOR(squaredNorm, Size * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(norm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(stableNorm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(blueNorm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(hypotNorm, (Size-1) * functor_traits<scalar_hypot_op<Scalar> >::Cost );
+EIGEN_MEMBER_FUNCTOR(sum, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(mean, (Size-1)*NumTraits<Scalar>::AddCost + NumTraits<Scalar>::MulCost);
+EIGEN_MEMBER_FUNCTOR(minCoeff, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(maxCoeff, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(all, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(any, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(count, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(prod, (Size-1)*NumTraits<Scalar>::MulCost);
+
+
+template <typename BinaryOp, typename Scalar>
+struct member_redux {
+ typedef typename result_of<
+ BinaryOp(Scalar)
+ >::type result_type;
+ template<typename _Scalar, int Size> struct Cost
+ { enum { value = (Size-1) * functor_traits<BinaryOp>::Cost }; };
+ member_redux(const BinaryOp func) : m_functor(func) {}
+ template<typename Derived>
+ inline result_type operator()(const DenseBase<Derived>& mat) const
+ { return mat.redux(m_functor); }
+ const BinaryOp m_functor;
+};
+}
+
+/** \class VectorwiseOp
+ * \ingroup Core_Module
+ *
+ * \brief Pseudo expression providing partial reduction operations
+ *
+ * \param ExpressionType the type of the object on which to do partial reductions
+ * \param Direction indicates the direction of the redux (#Vertical or #Horizontal)
+ *
+ * This class represents a pseudo expression with partial reduction features.
+ * It is the return type of DenseBase::colwise() and DenseBase::rowwise()
+ * and most of the time this is the only way it is used.
+ *
+ * Example: \include MatrixBase_colwise.cpp
+ * Output: \verbinclude MatrixBase_colwise.out
+ *
+ * \sa DenseBase::colwise(), DenseBase::rowwise(), class PartialReduxExpr
+ */
+template<typename ExpressionType, int Direction> class VectorwiseOp
+{
+ public:
+
+ typedef typename ExpressionType::Scalar Scalar;
+ typedef typename ExpressionType::RealScalar RealScalar;
+ typedef typename ExpressionType::Index Index;
+ typedef typename internal::conditional<internal::must_nest_by_value<ExpressionType>::ret,
+ ExpressionType, ExpressionType&>::type ExpressionTypeNested;
+ typedef typename internal::remove_all<ExpressionTypeNested>::type ExpressionTypeNestedCleaned;
+
+ template<template<typename _Scalar> class Functor,
+ typename Scalar=typename internal::traits<ExpressionType>::Scalar> struct ReturnType
+ {
+ typedef PartialReduxExpr<ExpressionType,
+ Functor<Scalar>,
+ Direction
+ > Type;
+ };
+
+ template<typename BinaryOp> struct ReduxReturnType
+ {
+ typedef PartialReduxExpr<ExpressionType,
+ internal::member_redux<BinaryOp,typename internal::traits<ExpressionType>::Scalar>,
+ Direction
+ > Type;
+ };
+
+ enum {
+ IsVertical = (Direction==Vertical) ? 1 : 0,
+ IsHorizontal = (Direction==Horizontal) ? 1 : 0
+ };
+
+ protected:
+
+ /** \internal
+ * \returns the i-th subvector according to the \c Direction */
+ typedef typename internal::conditional<Direction==Vertical,
+ typename ExpressionType::ColXpr,
+ typename ExpressionType::RowXpr>::type SubVector;
+ SubVector subVector(Index i)
+ {
+ return SubVector(m_matrix.derived(),i);
+ }
+
+ /** \internal
+ * \returns the number of subvectors in the direction \c Direction */
+ Index subVectors() const
+ { return Direction==Vertical?m_matrix.cols():m_matrix.rows(); }
+
+ template<typename OtherDerived> struct ExtendedType {
+ typedef Replicate<OtherDerived,
+ Direction==Vertical ? 1 : ExpressionType::RowsAtCompileTime,
+ Direction==Horizontal ? 1 : ExpressionType::ColsAtCompileTime> Type;
+ };
+
+ /** \internal
+ * Replicates a vector to match the size of \c *this */
+ template<typename OtherDerived>
+ typename ExtendedType<OtherDerived>::Type
+ extendedTo(const DenseBase<OtherDerived>& other) const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived);
+ return typename ExtendedType<OtherDerived>::Type
+ (other.derived(),
+ Direction==Vertical ? 1 : m_matrix.rows(),
+ Direction==Horizontal ? 1 : m_matrix.cols());
+ }
+
+ public:
+
+ inline VectorwiseOp(ExpressionType& matrix) : m_matrix(matrix) {}
+
+ /** \internal */
+ inline const ExpressionType& _expression() const { return m_matrix; }
+
+ /** \returns a row or column vector expression of \c *this reduxed by \a func
+ *
+ * The template parameter \a BinaryOp is the type of the functor
+ * of the custom redux operator. Note that func must be an associative operator.
+ *
+ * \sa class VectorwiseOp, DenseBase::colwise(), DenseBase::rowwise()
+ */
+ template<typename BinaryOp>
+ const typename ReduxReturnType<BinaryOp>::Type
+ redux(const BinaryOp& func = BinaryOp()) const
+ { return typename ReduxReturnType<BinaryOp>::Type(_expression(), func); }
+
+ /** \returns a row (or column) vector expression of the smallest coefficient
+ * of each column (or row) of the referenced expression.
+ *
+ * Example: \include PartialRedux_minCoeff.cpp
+ * Output: \verbinclude PartialRedux_minCoeff.out
+ *
+ * \sa DenseBase::minCoeff() */
+ const typename ReturnType<internal::member_minCoeff>::Type minCoeff() const
+ { return _expression(); }
+
+ /** \returns a row (or column) vector expression of the largest coefficient
+ * of each column (or row) of the referenced expression.
+ *
+ * Example: \include PartialRedux_maxCoeff.cpp
+ * Output: \verbinclude PartialRedux_maxCoeff.out
+ *
+ * \sa DenseBase::maxCoeff() */
+ const typename ReturnType<internal::member_maxCoeff>::Type maxCoeff() const
+ { return _expression(); }
+
+ /** \returns a row (or column) vector expression of the squared norm
+ * of each column (or row) of the referenced expression.
+ *
+ * Example: \include PartialRedux_squaredNorm.cpp
+ * Output: \verbinclude PartialRedux_squaredNorm.out
+ *
+ * \sa DenseBase::squaredNorm() */
+ const typename ReturnType<internal::member_squaredNorm,RealScalar>::Type squaredNorm() const
+ { return _expression(); }
+
+ /** \returns a row (or column) vector expression of the norm
+ * of each column (or row) of the referenced expression.
+ *
+ * Example: \include PartialRedux_norm.cpp
+ * Output: \verbinclude PartialRedux_norm.out
+ *
+ * \sa DenseBase::norm() */
+ const typename ReturnType<internal::member_norm,RealScalar>::Type norm() const
+ { return _expression(); }
+
+
+ /** \returns a row (or column) vector expression of the norm
+ * of each column (or row) of the referenced expression, using
+ * blue's algorithm.
+ *
+ * \sa DenseBase::blueNorm() */
+ const typename ReturnType<internal::member_blueNorm,RealScalar>::Type blueNorm() const
+ { return _expression(); }
+
+
+ /** \returns a row (or column) vector expression of the norm
+ * of each column (or row) of the referenced expression, avoiding
+ * underflow and overflow.
+ *
+ * \sa DenseBase::stableNorm() */
+ const typename ReturnType<internal::member_stableNorm,RealScalar>::Type stableNorm() const
+ { return _expression(); }
+
+
+ /** \returns a row (or column) vector expression of the norm
+ * of each column (or row) of the referenced expression, avoiding
+ * underflow and overflow using a concatenation of hypot() calls.
+ *
+ * \sa DenseBase::hypotNorm() */
+ const typename ReturnType<internal::member_hypotNorm,RealScalar>::Type hypotNorm() const
+ { return _expression(); }
+
+ /** \returns a row (or column) vector expression of the sum
+ * of each column (or row) of the referenced expression.
+ *
+ * Example: \include PartialRedux_sum.cpp
+ * Output: \verbinclude PartialRedux_sum.out
+ *
+ * \sa DenseBase::sum() */
+ const typename ReturnType<internal::member_sum>::Type sum() const
+ { return _expression(); }
+
+ /** \returns a row (or column) vector expression of the mean
+ * of each column (or row) of the referenced expression.
+ *
+ * \sa DenseBase::mean() */
+ const typename ReturnType<internal::member_mean>::Type mean() const
+ { return _expression(); }
+
+ /** \returns a row (or column) vector expression representing
+ * whether \b all coefficients of each respective column (or row) are \c true.
+ *
+ * \sa DenseBase::all() */
+ const typename ReturnType<internal::member_all>::Type all() const
+ { return _expression(); }
+
+ /** \returns a row (or column) vector expression representing
+ * whether \b at \b least one coefficient of each respective column (or row) is \c true.
+ *
+ * \sa DenseBase::any() */
+ const typename ReturnType<internal::member_any>::Type any() const
+ { return _expression(); }
+
+ /** \returns a row (or column) vector expression representing
+ * the number of \c true coefficients of each respective column (or row).
+ *
+ * Example: \include PartialRedux_count.cpp
+ * Output: \verbinclude PartialRedux_count.out
+ *
+ * \sa DenseBase::count() */
+ const PartialReduxExpr<ExpressionType, internal::member_count<Index>, Direction> count() const
+ { return _expression(); }
+
+ /** \returns a row (or column) vector expression of the product
+ * of each column (or row) of the referenced expression.
+ *
+ * Example: \include PartialRedux_prod.cpp
+ * Output: \verbinclude PartialRedux_prod.out
+ *
+ * \sa DenseBase::prod() */
+ const typename ReturnType<internal::member_prod>::Type prod() const
+ { return _expression(); }
+
+
+ /** \returns a matrix expression
+ * where each column (or row) are reversed.
+ *
+ * Example: \include Vectorwise_reverse.cpp
+ * Output: \verbinclude Vectorwise_reverse.out
+ *
+ * \sa DenseBase::reverse() */
+ const Reverse<ExpressionType, Direction> reverse() const
+ { return Reverse<ExpressionType, Direction>( _expression() ); }
+
+ typedef Replicate<ExpressionType,Direction==Vertical?Dynamic:1,Direction==Horizontal?Dynamic:1> ReplicateReturnType;
+ const ReplicateReturnType replicate(Index factor) const;
+
+ /**
+ * \return an expression of the replication of each column (or row) of \c *this
+ *
+ * Example: \include DirectionWise_replicate.cpp
+ * Output: \verbinclude DirectionWise_replicate.out
+ *
+ * \sa VectorwiseOp::replicate(Index), DenseBase::replicate(), class Replicate
+ */
+ // NOTE implemented here because of sunstudio's compilation errors
+ template<int Factor> const Replicate<ExpressionType,(IsVertical?Factor:1),(IsHorizontal?Factor:1)>
+ replicate(Index factor = Factor) const
+ {
+ return Replicate<ExpressionType,Direction==Vertical?Factor:1,Direction==Horizontal?Factor:1>
+ (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
+ }
+
+/////////// Artithmetic operators ///////////
+
+ /** Copies the vector \a other to each subvector of \c *this */
+ template<typename OtherDerived>
+ ExpressionType& operator=(const DenseBase<OtherDerived>& other)
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ //eigen_assert((m_matrix.isNull()) == (other.isNull())); FIXME
+ for(Index j=0; j<subVectors(); ++j)
+ subVector(j) = other;
+ return const_cast<ExpressionType&>(m_matrix);
+ }
+
+ /** Adds the vector \a other to each subvector of \c *this */
+ template<typename OtherDerived>
+ ExpressionType& operator+=(const DenseBase<OtherDerived>& other)
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ for(Index j=0; j<subVectors(); ++j)
+ subVector(j) += other.derived();
+ return const_cast<ExpressionType&>(m_matrix);
+ }
+
+ /** Substracts the vector \a other to each subvector of \c *this */
+ template<typename OtherDerived>
+ ExpressionType& operator-=(const DenseBase<OtherDerived>& other)
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ for(Index j=0; j<subVectors(); ++j)
+ subVector(j) -= other.derived();
+ return const_cast<ExpressionType&>(m_matrix);
+ }
+
+ /** Returns the expression of the sum of the vector \a other to each subvector of \c *this */
+ template<typename OtherDerived> EIGEN_STRONG_INLINE
+ CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
+ const ExpressionTypeNestedCleaned,
+ const typename ExtendedType<OtherDerived>::Type>
+ operator+(const DenseBase<OtherDerived>& other) const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived);
+ return m_matrix + extendedTo(other.derived());
+ }
+
+ /** Returns the expression of the difference between each subvector of \c *this and the vector \a other */
+ template<typename OtherDerived>
+ CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
+ const ExpressionTypeNestedCleaned,
+ const typename ExtendedType<OtherDerived>::Type>
+ operator-(const DenseBase<OtherDerived>& other) const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived);
+ return m_matrix - extendedTo(other.derived());
+ }
+
+/////////// Geometry module ///////////
+
+ #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+ Homogeneous<ExpressionType,Direction> homogeneous() const;
+ #endif
+
+ typedef typename ExpressionType::PlainObject CrossReturnType;
+ template<typename OtherDerived>
+ const CrossReturnType cross(const MatrixBase<OtherDerived>& other) const;
+
+ enum {
+ HNormalized_Size = Direction==Vertical ? internal::traits<ExpressionType>::RowsAtCompileTime
+ : internal::traits<ExpressionType>::ColsAtCompileTime,
+ HNormalized_SizeMinusOne = HNormalized_Size==Dynamic ? Dynamic : HNormalized_Size-1
+ };
+ typedef Block<const ExpressionType,
+ Direction==Vertical ? int(HNormalized_SizeMinusOne)
+ : int(internal::traits<ExpressionType>::RowsAtCompileTime),
+ Direction==Horizontal ? int(HNormalized_SizeMinusOne)
+ : int(internal::traits<ExpressionType>::ColsAtCompileTime)>
+ HNormalized_Block;
+ typedef Block<const ExpressionType,
+ Direction==Vertical ? 1 : int(internal::traits<ExpressionType>::RowsAtCompileTime),
+ Direction==Horizontal ? 1 : int(internal::traits<ExpressionType>::ColsAtCompileTime)>
+ HNormalized_Factors;
+ typedef CwiseBinaryOp<internal::scalar_quotient_op<typename internal::traits<ExpressionType>::Scalar>,
+ const HNormalized_Block,
+ const Replicate<HNormalized_Factors,
+ Direction==Vertical ? HNormalized_SizeMinusOne : 1,
+ Direction==Horizontal ? HNormalized_SizeMinusOne : 1> >
+ HNormalizedReturnType;
+
+ const HNormalizedReturnType hnormalized() const;
+
+ protected:
+ ExpressionTypeNested m_matrix;
+};
+
+/** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations
+ *
+ * Example: \include MatrixBase_colwise.cpp
+ * Output: \verbinclude MatrixBase_colwise.out
+ *
+ * \sa rowwise(), class VectorwiseOp
+ */
+template<typename Derived>
+inline const typename DenseBase<Derived>::ConstColwiseReturnType
+DenseBase<Derived>::colwise() const
+{
+ return derived();
+}
+
+/** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations
+ *
+ * \sa rowwise(), class VectorwiseOp
+ */
+template<typename Derived>
+inline typename DenseBase<Derived>::ColwiseReturnType
+DenseBase<Derived>::colwise()
+{
+ return derived();
+}
+
+/** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations
+ *
+ * Example: \include MatrixBase_rowwise.cpp
+ * Output: \verbinclude MatrixBase_rowwise.out
+ *
+ * \sa colwise(), class VectorwiseOp
+ */
+template<typename Derived>
+inline const typename DenseBase<Derived>::ConstRowwiseReturnType
+DenseBase<Derived>::rowwise() const
+{
+ return derived();
+}
+
+/** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations
+ *
+ * \sa colwise(), class VectorwiseOp
+ */
+template<typename Derived>
+inline typename DenseBase<Derived>::RowwiseReturnType
+DenseBase<Derived>::rowwise()
+{
+ return derived();
+}
+
+#endif // EIGEN_PARTIAL_REDUX_H
diff --git a/extern/Eigen2/Eigen/src/Core/Visitor.h b/extern/Eigen3/Eigen/src/Core/Visitor.h
index 7569114e90d..378ebcba174 100644
--- a/extern/Eigen2/Eigen/src/Core/Visitor.h
+++ b/extern/Eigen3/Eigen/src/Core/Visitor.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -25,8 +25,10 @@
#ifndef EIGEN_VISITOR_H
#define EIGEN_VISITOR_H
+namespace internal {
+
template<typename Visitor, typename Derived, int UnrollCount>
-struct ei_visitor_impl
+struct visitor_impl
{
enum {
col = (UnrollCount-1) / Derived::RowsAtCompileTime,
@@ -35,13 +37,13 @@ struct ei_visitor_impl
inline static void run(const Derived &mat, Visitor& visitor)
{
- ei_visitor_impl<Visitor, Derived, UnrollCount-1>::run(mat, visitor);
+ visitor_impl<Visitor, Derived, UnrollCount-1>::run(mat, visitor);
visitor(mat.coeff(row, col), row, col);
}
};
template<typename Visitor, typename Derived>
-struct ei_visitor_impl<Visitor, Derived, 1>
+struct visitor_impl<Visitor, Derived, 1>
{
inline static void run(const Derived &mat, Visitor& visitor)
{
@@ -50,19 +52,21 @@ struct ei_visitor_impl<Visitor, Derived, 1>
};
template<typename Visitor, typename Derived>
-struct ei_visitor_impl<Visitor, Derived, Dynamic>
+struct visitor_impl<Visitor, Derived, Dynamic>
{
+ typedef typename Derived::Index Index;
inline static void run(const Derived& mat, Visitor& visitor)
{
visitor.init(mat.coeff(0,0), 0, 0);
- for(int i = 1; i < mat.rows(); ++i)
+ for(Index i = 1; i < mat.rows(); ++i)
visitor(mat.coeff(i, 0), i, 0);
- for(int j = 1; j < mat.cols(); ++j)
- for(int i = 0; i < mat.rows(); ++i)
+ for(Index j = 1; j < mat.cols(); ++j)
+ for(Index i = 0; i < mat.rows(); ++i)
visitor(mat.coeff(i, j), i, j);
}
};
+} // end namespace internal
/** Applies the visitor \a visitor to the whole coefficients of the matrix or vector.
*
@@ -70,38 +74,44 @@ struct ei_visitor_impl<Visitor, Derived, Dynamic>
* \code
* struct MyVisitor {
* // called for the first coefficient
- * void init(const Scalar& value, int i, int j);
+ * void init(const Scalar& value, Index i, Index j);
* // called for all other coefficients
- * void operator() (const Scalar& value, int i, int j);
+ * void operator() (const Scalar& value, Index i, Index j);
* };
* \endcode
*
* \note compared to one or two \em for \em loops, visitors offer automatic
* unrolling for small fixed size matrix.
*
- * \sa minCoeff(int*,int*), maxCoeff(int*,int*), MatrixBase::redux()
+ * \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux()
*/
template<typename Derived>
template<typename Visitor>
-void MatrixBase<Derived>::visit(Visitor& visitor) const
+void DenseBase<Derived>::visit(Visitor& visitor) const
{
- const bool unroll = SizeAtCompileTime * CoeffReadCost
- + (SizeAtCompileTime-1) * ei_functor_traits<Visitor>::Cost
- <= EIGEN_UNROLLING_LIMIT;
- return ei_visitor_impl<Visitor, Derived,
+ enum { unroll = SizeAtCompileTime != Dynamic
+ && CoeffReadCost != Dynamic
+ && (SizeAtCompileTime == 1 || internal::functor_traits<Visitor>::Cost != Dynamic)
+ && SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost
+ <= EIGEN_UNROLLING_LIMIT };
+ return internal::visitor_impl<Visitor, Derived,
unroll ? int(SizeAtCompileTime) : Dynamic
>::run(derived(), visitor);
}
+namespace internal {
+
/** \internal
* \brief Base class to implement min and max visitors
*/
-template <typename Scalar>
-struct ei_coeff_visitor
+template <typename Derived>
+struct coeff_visitor
{
- int row, col;
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ Index row, col;
Scalar res;
- inline void init(const Scalar& value, int i, int j)
+ inline void init(const Scalar& value, Index i, Index j)
{
res = value;
row = i;
@@ -112,12 +122,14 @@ struct ei_coeff_visitor
/** \internal
* \brief Visitor computing the min coefficient with its value and coordinates
*
- * \sa MatrixBase::minCoeff(int*, int*)
+ * \sa DenseBase::minCoeff(Index*, Index*)
*/
-template <typename Scalar>
-struct ei_min_coeff_visitor : ei_coeff_visitor<Scalar>
+template <typename Derived>
+struct min_coeff_visitor : coeff_visitor<Derived>
{
- void operator() (const Scalar& value, int i, int j)
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ void operator() (const Scalar& value, Index i, Index j)
{
if(value < this->res)
{
@@ -129,7 +141,7 @@ struct ei_min_coeff_visitor : ei_coeff_visitor<Scalar>
};
template<typename Scalar>
-struct ei_functor_traits<ei_min_coeff_visitor<Scalar> > {
+struct functor_traits<min_coeff_visitor<Scalar> > {
enum {
Cost = NumTraits<Scalar>::AddCost
};
@@ -138,12 +150,14 @@ struct ei_functor_traits<ei_min_coeff_visitor<Scalar> > {
/** \internal
* \brief Visitor computing the max coefficient with its value and coordinates
*
- * \sa MatrixBase::maxCoeff(int*, int*)
+ * \sa DenseBase::maxCoeff(Index*, Index*)
*/
-template <typename Scalar>
-struct ei_max_coeff_visitor : ei_coeff_visitor<Scalar>
+template <typename Derived>
+struct max_coeff_visitor : coeff_visitor<Derived>
{
- void operator() (const Scalar& value, int i, int j)
+ typedef typename Derived::Index Index;
+ typedef typename Derived::Scalar Scalar;
+ void operator() (const Scalar& value, Index i, Index j)
{
if(value > this->res)
{
@@ -155,22 +169,25 @@ struct ei_max_coeff_visitor : ei_coeff_visitor<Scalar>
};
template<typename Scalar>
-struct ei_functor_traits<ei_max_coeff_visitor<Scalar> > {
+struct functor_traits<max_coeff_visitor<Scalar> > {
enum {
Cost = NumTraits<Scalar>::AddCost
};
};
+} // end namespace internal
+
/** \returns the minimum of all coefficients of *this
* and puts in *row and *col its location.
*
- * \sa MatrixBase::minCoeff(int*), MatrixBase::maxCoeff(int*,int*), MatrixBase::visitor(), MatrixBase::minCoeff()
+ * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
*/
template<typename Derived>
-typename ei_traits<Derived>::Scalar
-MatrixBase<Derived>::minCoeff(int* row, int* col) const
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::minCoeff(IndexType* row, IndexType* col) const
{
- ei_min_coeff_visitor<Scalar> minVisitor;
+ internal::min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor);
*row = minVisitor.row;
if (col) *col = minVisitor.col;
@@ -180,14 +197,15 @@ MatrixBase<Derived>::minCoeff(int* row, int* col) const
/** \returns the minimum of all coefficients of *this
* and puts in *index its location.
*
- * \sa MatrixBase::minCoeff(int*,int*), MatrixBase::maxCoeff(int*,int*), MatrixBase::visitor(), MatrixBase::minCoeff()
+ * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::minCoeff()
*/
template<typename Derived>
-typename ei_traits<Derived>::Scalar
-MatrixBase<Derived>::minCoeff(int* index) const
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::minCoeff(IndexType* index) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- ei_min_coeff_visitor<Scalar> minVisitor;
+ internal::min_coeff_visitor<Derived> minVisitor;
this->visit(minVisitor);
*index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row;
return minVisitor.res;
@@ -196,13 +214,14 @@ MatrixBase<Derived>::minCoeff(int* index) const
/** \returns the maximum of all coefficients of *this
* and puts in *row and *col its location.
*
- * \sa MatrixBase::minCoeff(int*,int*), MatrixBase::visitor(), MatrixBase::maxCoeff()
+ * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
*/
template<typename Derived>
-typename ei_traits<Derived>::Scalar
-MatrixBase<Derived>::maxCoeff(int* row, int* col) const
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::maxCoeff(IndexType* row, IndexType* col) const
{
- ei_max_coeff_visitor<Scalar> maxVisitor;
+ internal::max_coeff_visitor<Derived> maxVisitor;
this->visit(maxVisitor);
*row = maxVisitor.row;
if (col) *col = maxVisitor.col;
@@ -212,14 +231,15 @@ MatrixBase<Derived>::maxCoeff(int* row, int* col) const
/** \returns the maximum of all coefficients of *this
* and puts in *index its location.
*
- * \sa MatrixBase::maxCoeff(int*,int*), MatrixBase::minCoeff(int*,int*), MatrixBase::visitor(), MatrixBase::maxCoeff()
+ * \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
*/
template<typename Derived>
-typename ei_traits<Derived>::Scalar
-MatrixBase<Derived>::maxCoeff(int* index) const
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::maxCoeff(IndexType* index) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
- ei_max_coeff_visitor<Scalar> maxVisitor;
+ internal::max_coeff_visitor<Derived> maxVisitor;
this->visit(maxVisitor);
*index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row;
return maxVisitor.res;
diff --git a/extern/Eigen3/Eigen/src/Core/arch/AltiVec/Complex.h b/extern/Eigen3/Eigen/src/Core/arch/AltiVec/Complex.h
new file mode 100644
index 00000000000..f8adf1b6385
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/AltiVec/Complex.h
@@ -0,0 +1,228 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COMPLEX_ALTIVEC_H
+#define EIGEN_COMPLEX_ALTIVEC_H
+
+namespace internal {
+
+static Packet4ui p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_ZERO_);//{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
+static Packet16uc p16uc_COMPLEX_RE = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
+static Packet16uc p16uc_COMPLEX_IM = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };
+static Packet16uc p16uc_COMPLEX_REV = vec_sld(p16uc_REVERSE, p16uc_REVERSE, 8);//{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };
+static Packet16uc p16uc_COMPLEX_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8);//{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
+static Packet16uc p16uc_PSET_HI = (Packet16uc) vec_mergeh((Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 1));//{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };
+static Packet16uc p16uc_PSET_LO = (Packet16uc) vec_mergeh((Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 2), (Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 3));//{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 };
+
+//---------- float ----------
+struct Packet2cf
+{
+ EIGEN_STRONG_INLINE Packet2cf() {}
+ EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
+ Packet4f v;
+};
+
+template<> struct packet_traits<std::complex<float> > : default_packet_traits
+{
+ typedef Packet2cf type;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 2,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0
+ };
+};
+
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2}; };
+
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
+{
+ Packet2cf res;
+ /* On AltiVec we cannot load 64-bit registers, so wa have to take care of alignment */
+ if((ptrdiff_t(&from) % 16) == 0)
+ res.v = pload<Packet4f>((const float *)&from);
+ else
+ res.v = ploadu<Packet4f>((const float *)&from);
+ res.v = vec_perm(res.v, res.v, p16uc_PSET_HI);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_add(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_sub(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf((Packet4f)vec_xor((Packet4ui)a.v, p4ui_CONJ_XOR)); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ Packet4f v1, v2;
+
+ // Permute and multiply the real parts of a and b
+ v1 = vec_perm(a.v, a.v, p16uc_COMPLEX_RE);
+ // Get the imaginary parts of a
+ v2 = vec_perm(a.v, a.v, p16uc_COMPLEX_IM);
+ // multiply a_re * b
+ v1 = vec_madd(v1, b.v, p4f_ZERO);
+ // multiply a_im * b and get the conjugate result
+ v2 = vec_madd(v2, b.v, p4f_ZERO);
+ v2 = (Packet4f) vec_xor((Packet4ui)v2, p4ui_CONJ_XOR);
+ // permute back to a proper order
+ v2 = vec_perm(v2, v2, p16uc_COMPLEX_REV);
+
+ return Packet2cf(vec_add(v1, v2));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_and(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_or(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_xor(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_and(a.v, vec_nor(b.v,b.v))); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from)
+{
+ return pset1<Packet2cf>(*from);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { vec_dstt((float *)addr, DST_CTRL(2,2,32), DST_CHAN); }
+
+template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
+{
+ std::complex<float> EIGEN_ALIGN16 res[2];
+ pstore((float *)&res, a.v);
+
+ return res[0];
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
+{
+ Packet4f rev_a;
+ rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX_REV2);
+ return Packet2cf(rev_a);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
+{
+ Packet4f b;
+ b = (Packet4f) vec_sld(a.v, a.v, 8);
+ b = padd(a.v, b);
+ return pfirst(Packet2cf(b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
+{
+ Packet4f b1, b2;
+
+ b1 = (Packet4f) vec_sld(vecs[0].v, vecs[1].v, 8);
+ b2 = (Packet4f) vec_sld(vecs[1].v, vecs[0].v, 8);
+ b2 = (Packet4f) vec_sld(b2, b2, 8);
+ b2 = padd(b1, b2);
+
+ return Packet2cf(b2);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
+{
+ Packet4f b;
+ Packet2cf prod;
+ b = (Packet4f) vec_sld(a.v, a.v, 8);
+ prod = pmul(a, Packet2cf(b));
+
+ return pfirst(prod);
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet2cf>
+{
+ EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second)
+ {
+ if (Offset==1)
+ {
+ first.v = vec_sld(first.v, second.v, 8);
+ }
+ }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+ {
+ return internal::pmul(a, pconj(b));
+ }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+ {
+ return internal::pmul(pconj(a), b);
+ }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+ {
+ return pconj(internal::pmul(a, b));
+ }
+};
+
+template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ // TODO optimize it for AltiVec
+ Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
+ Packet4f s = vec_madd(b.v, b.v, p4f_ZERO);
+ return Packet2cf(pdiv(res.v, vec_add(s,vec_perm(s, s, p16uc_COMPLEX_REV))));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& x)
+{
+ return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX_REV));
+}
+
+} // end namespace internal
+
+#endif // EIGEN_COMPLEX_ALTIVEC_H
diff --git a/extern/Eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h b/extern/Eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h
new file mode 100644
index 00000000000..dc34ebbd660
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/AltiVec/PacketMath.h
@@ -0,0 +1,509 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Konstantinos Margaritis <markos@codex.gr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PACKET_MATH_ALTIVEC_H
+#define EIGEN_PACKET_MATH_ALTIVEC_H
+
+namespace internal {
+
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4
+#endif
+
+#ifndef EIGEN_HAS_FUSE_CJMADD
+#define EIGEN_HAS_FUSE_CJMADD 1
+#endif
+
+// NOTE Altivec has 32 registers, but Eigen only accepts a value of 8 or 16
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
+#endif
+
+typedef __vector float Packet4f;
+typedef __vector int Packet4i;
+typedef __vector unsigned int Packet4ui;
+typedef __vector __bool int Packet4bi;
+typedef __vector short int Packet8i;
+typedef __vector unsigned char Packet16uc;
+
+// We don't want to write the same code all the time, but we need to reuse the constants
+// and it doesn't really work to declare them global, so we define macros instead
+
+#define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \
+ Packet4f p4f_##NAME = (Packet4f) vec_splat_s32(X)
+
+#define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \
+ Packet4i p4i_##NAME = vec_splat_s32(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
+ Packet4f p4f_##NAME = pset1<Packet4f>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
+ Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X))
+
+#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
+ Packet4i p4i_##NAME = pset1<Packet4i>(X)
+
+#define DST_CHAN 1
+#define DST_CTRL(size, count, stride) (((size) << 24) | ((count) << 16) | (stride))
+
+// Define global static constants:
+static Packet4f p4f_COUNTDOWN = { 3.0, 2.0, 1.0, 0.0 };
+static Packet4i p4i_COUNTDOWN = { 3, 2, 1, 0 };
+static Packet16uc p16uc_REVERSE = {12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3};
+static Packet16uc p16uc_FORWARD = vec_lvsl(0, (float*)0);
+static Packet16uc p16uc_DUPLICATE = {0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7};
+
+static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0);
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0);
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE,1);
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS16,-16);
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1);
+static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0);
+static Packet4f p4f_ZERO_ = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1);
+
+template<> struct packet_traits<float> : default_packet_traits
+{
+ typedef Packet4f type;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size=4,
+
+ // FIXME check the Has*
+ HasSin = 0,
+ HasCos = 0,
+ HasLog = 0,
+ HasExp = 0,
+ HasSqrt = 0
+ };
+};
+template<> struct packet_traits<int> : default_packet_traits
+{
+ typedef Packet4i type;
+ enum {
+ // FIXME check the Has*
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size=4
+ };
+};
+
+template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; };
+template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; };
+/*
+inline std::ostream & operator <<(std::ostream & s, const Packet4f & v)
+{
+ union {
+ Packet4f v;
+ float n[4];
+ } vt;
+ vt.v = v;
+ s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
+ return s;
+}
+
+inline std::ostream & operator <<(std::ostream & s, const Packet4i & v)
+{
+ union {
+ Packet4i v;
+ int n[4];
+ } vt;
+ vt.v = v;
+ s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
+ return s;
+}
+
+inline std::ostream & operator <<(std::ostream & s, const Packet4ui & v)
+{
+ union {
+ Packet4ui v;
+ unsigned int n[4];
+ } vt;
+ vt.v = v;
+ s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
+ return s;
+}
+
+inline std::ostream & operator <<(std::ostream & s, const Packetbi & v)
+{
+ union {
+ Packet4bi v;
+ unsigned int n[4];
+ } vt;
+ vt.v = v;
+ s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
+ return s;
+}
+*/
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) {
+ // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
+ float EIGEN_ALIGN16 af[4];
+ af[0] = from;
+ Packet4f vc = vec_ld(0, af);
+ vc = vec_splat(vc, 0);
+ return vc;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) {
+ int EIGEN_ALIGN16 ai[4];
+ ai[0] = from;
+ Packet4i vc = vec_ld(0, ai);
+ vc = vec_splat(vc, 0);
+ return vc;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return vec_add(pset1<Packet4f>(a), p4f_COUNTDOWN); }
+template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) { return vec_add(pset1<Packet4i>(a), p4i_COUNTDOWN); }
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_add(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_add(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_sub(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_sub(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return psub<Packet4f>(p4f_ZERO, a); }
+template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return psub<Packet4i>(p4i_ZERO, a); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b,p4f_ZERO); }
+/* Commented out: it's actually slower than processing it scalar
+ *
+template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+ // Detailed in: http://freevec.org/content/32bit_signed_integer_multiplication_altivec
+ //Set up constants, variables
+ Packet4i a1, b1, bswap, low_prod, high_prod, prod, prod_, v1sel;
+
+ // Get the absolute values
+ a1 = vec_abs(a);
+ b1 = vec_abs(b);
+
+ // Get the signs using xor
+ Packet4bi sgn = (Packet4bi) vec_cmplt(vec_xor(a, b), p4i_ZERO);
+
+ // Do the multiplication for the asbolute values.
+ bswap = (Packet4i) vec_rl((Packet4ui) b1, (Packet4ui) p4i_MINUS16 );
+ low_prod = vec_mulo((Packet8i) a1, (Packet8i)b1);
+ high_prod = vec_msum((Packet8i) a1, (Packet8i) bswap, p4i_ZERO);
+ high_prod = (Packet4i) vec_sl((Packet4ui) high_prod, (Packet4ui) p4i_MINUS16);
+ prod = vec_add( low_prod, high_prod );
+
+ // NOR the product and select only the negative elements according to the sign mask
+ prod_ = vec_nor(prod, prod);
+ prod_ = vec_sel(p4i_ZERO, prod_, sgn);
+
+ // Add 1 to the result to get the negative numbers
+ v1sel = vec_sel(p4i_ZERO, p4i_ONE, sgn);
+ prod_ = vec_add(prod_, v1sel);
+
+ // Merge the results back to the final vector.
+ prod = vec_sel(prod, prod_, sgn);
+
+ return prod;
+}
+*/
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f t, y_0, y_1, res;
+
+ // Altivec does not offer a divide instruction, we have to do a reciprocal approximation
+ y_0 = vec_re(b);
+
+ // Do one Newton-Raphson iteration to get the needed accuracy
+ t = vec_nmsub(y_0, b, p4f_ONE);
+ y_1 = vec_madd(y_0, t, y_0);
+
+ res = vec_madd(a, y_1, p4f_ZERO);
+ return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
+{ eigen_assert(false && "packet integer division are not supported by AltiVec");
+ return pset1<Packet4i>(0);
+}
+
+// for some weird raisons, it has to be overloaded for packet of integers
+template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a, b, c); }
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }
+
+// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_or(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); }
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, vec_nor(b, b)); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); }
+template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
+{
+ EIGEN_DEBUG_ALIGNED_LOAD
+ // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
+ Packet16uc MSQ, LSQ;
+ Packet16uc mask;
+ MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword
+ LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword
+ mask = vec_lvsl(0, from); // create the permute mask
+ return (Packet4f) vec_perm(MSQ, LSQ, mask); // align the data
+
+}
+template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
+{
+ EIGEN_DEBUG_ALIGNED_LOAD
+ // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
+ Packet16uc MSQ, LSQ;
+ Packet16uc mask;
+ MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword
+ LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword
+ mask = vec_lvsl(0, from); // create the permute mask
+ return (Packet4i) vec_perm(MSQ, LSQ, mask); // align the data
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
+{
+ Packet4f p;
+ if((ptrdiff_t(&from) % 16) == 0) p = pload<Packet4f>(from);
+ else p = ploadu<Packet4f>(from);
+ return vec_perm(p, p, p16uc_DUPLICATE);
+}
+template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
+{
+ Packet4i p;
+ if((ptrdiff_t(&from) % 16) == 0) p = pload<Packet4i>(from);
+ else p = ploadu<Packet4i>(from);
+ return vec_perm(p, p, p16uc_DUPLICATE);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); }
+template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); }
+
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from)
+{
+ EIGEN_DEBUG_UNALIGNED_STORE
+ // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
+ // Warning: not thread safe!
+ Packet16uc MSQ, LSQ, edges;
+ Packet16uc edgeAlign, align;
+
+ MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword
+ LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword
+ edgeAlign = vec_lvsl(0, to); // permute map to extract edges
+ edges=vec_perm(LSQ,MSQ,edgeAlign); // extract the edges
+ align = vec_lvsr( 0, to ); // permute map to misalign data
+ MSQ = vec_perm(edges,(Packet16uc)from,align); // misalign the data (MSQ)
+ LSQ = vec_perm((Packet16uc)from,edges,align); // misalign the data (LSQ)
+ vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first
+ vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
+}
+template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from)
+{
+ EIGEN_DEBUG_UNALIGNED_STORE
+ // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
+ // Warning: not thread safe!
+ Packet16uc MSQ, LSQ, edges;
+ Packet16uc edgeAlign, align;
+
+ MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword
+ LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword
+ edgeAlign = vec_lvsl(0, to); // permute map to extract edges
+ edges=vec_perm(LSQ, MSQ, edgeAlign); // extract the edges
+ align = vec_lvsr( 0, to ); // permute map to misalign data
+ MSQ = vec_perm(edges, (Packet16uc) from, align); // misalign the data (MSQ)
+ LSQ = vec_perm((Packet16uc) from, edges, align); // misalign the data (LSQ)
+ vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first
+ vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
+}
+
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); }
+
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; }
+template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; }
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return (Packet4f)vec_perm((Packet16uc)a,(Packet16uc)a, p16uc_REVERSE); }
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return (Packet4i)vec_perm((Packet16uc)a,(Packet16uc)a, p16uc_REVERSE); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vec_abs(a); }
+template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); }
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+ Packet4f b, sum;
+ b = (Packet4f) vec_sld(a, a, 8);
+ sum = vec_add(a, b);
+ b = (Packet4f) vec_sld(sum, sum, 4);
+ sum = vec_add(sum, b);
+ return pfirst(sum);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+ Packet4f v[4], sum[4];
+
+ // It's easier and faster to transpose then add as columns
+ // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
+ // Do the transpose, first set of moves
+ v[0] = vec_mergeh(vecs[0], vecs[2]);
+ v[1] = vec_mergel(vecs[0], vecs[2]);
+ v[2] = vec_mergeh(vecs[1], vecs[3]);
+ v[3] = vec_mergel(vecs[1], vecs[3]);
+ // Get the resulting vectors
+ sum[0] = vec_mergeh(v[0], v[2]);
+ sum[1] = vec_mergel(v[0], v[2]);
+ sum[2] = vec_mergeh(v[1], v[3]);
+ sum[3] = vec_mergel(v[1], v[3]);
+
+ // Now do the summation:
+ // Lines 0+1
+ sum[0] = vec_add(sum[0], sum[1]);
+ // Lines 2+3
+ sum[1] = vec_add(sum[2], sum[3]);
+ // Add the results
+ sum[0] = vec_add(sum[0], sum[1]);
+
+ return sum[0];
+}
+
+template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
+{
+ Packet4i sum;
+ sum = vec_sums(a, p4i_ZERO);
+ sum = vec_sld(sum, p4i_ZERO, 12);
+ return pfirst(sum);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
+{
+ Packet4i v[4], sum[4];
+
+ // It's easier and faster to transpose then add as columns
+ // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
+ // Do the transpose, first set of moves
+ v[0] = vec_mergeh(vecs[0], vecs[2]);
+ v[1] = vec_mergel(vecs[0], vecs[2]);
+ v[2] = vec_mergeh(vecs[1], vecs[3]);
+ v[3] = vec_mergel(vecs[1], vecs[3]);
+ // Get the resulting vectors
+ sum[0] = vec_mergeh(v[0], v[2]);
+ sum[1] = vec_mergel(v[0], v[2]);
+ sum[2] = vec_mergeh(v[1], v[3]);
+ sum[3] = vec_mergel(v[1], v[3]);
+
+ // Now do the summation:
+ // Lines 0+1
+ sum[0] = vec_add(sum[0], sum[1]);
+ // Lines 2+3
+ sum[1] = vec_add(sum[2], sum[3]);
+ // Add the results
+ sum[0] = vec_add(sum[0], sum[1]);
+
+ return sum[0];
+}
+
+// Other reduction functions:
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+ Packet4f prod;
+ prod = pmul(a, (Packet4f)vec_sld(a, a, 8));
+ return pfirst(pmul(prod, (Packet4f)vec_sld(prod, prod, 4)));
+}
+
+template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
+{
+ EIGEN_ALIGN16 int aux[4];
+ pstore(aux, a);
+ return aux[0] * aux[1] * aux[2] * aux[3];
+}
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+ Packet4f b, res;
+ b = vec_min(a, vec_sld(a, a, 8));
+ res = vec_min(b, vec_sld(b, b, 4));
+ return pfirst(res);
+}
+
+template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
+{
+ Packet4i b, res;
+ b = vec_min(a, vec_sld(a, a, 8));
+ res = vec_min(b, vec_sld(b, b, 4));
+ return pfirst(res);
+}
+
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+ Packet4f b, res;
+ b = vec_max(a, vec_sld(a, a, 8));
+ res = vec_max(b, vec_sld(b, b, 4));
+ return pfirst(res);
+}
+
+template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
+{
+ Packet4i b, res;
+ b = vec_max(a, vec_sld(a, a, 8));
+ res = vec_max(b, vec_sld(b, b, 4));
+ return pfirst(res);
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+ EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second)
+ {
+ if (Offset!=0)
+ first = vec_sld(first, second, Offset*4);
+ }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet4i>
+{
+ EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second)
+ {
+ if (Offset!=0)
+ first = vec_sld(first, second, Offset*4);
+ }
+};
+
+} // end namespace internal
+
+#endif // EIGEN_PACKET_MATH_ALTIVEC_H
diff --git a/extern/Eigen3/Eigen/src/Core/arch/Default/Settings.h b/extern/Eigen3/Eigen/src/Core/arch/Default/Settings.h
new file mode 100644
index 00000000000..957adc8fe42
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/Default/Settings.h
@@ -0,0 +1,64 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+
+/* All the parameters defined in this file can be specialized in the
+ * architecture specific files, and/or by the user.
+ * More to come... */
+
+#ifndef EIGEN_DEFAULT_SETTINGS_H
+#define EIGEN_DEFAULT_SETTINGS_H
+
+/** Defines the maximal loop size to enable meta unrolling of loops.
+ * Note that the value here is expressed in Eigen's own notion of "number of FLOPS",
+ * it does not correspond to the number of iterations or the number of instructions
+ */
+#ifndef EIGEN_UNROLLING_LIMIT
+#define EIGEN_UNROLLING_LIMIT 100
+#endif
+
+/** Defines the threshold between a "small" and a "large" matrix.
+ * This threshold is mainly used to select the proper product implementation.
+ */
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+/** Defines the maximal width of the blocks used in the triangular product and solver
+ * for vectors (level 2 blas xTRMV and xTRSV). The default is 8.
+ */
+#ifndef EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH
+#define EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH 8
+#endif
+
+
+/** Defines the default number of registers available for that architecture.
+ * Currently it must be 8 or 16. Other values will fail.
+ */
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8
+#endif
+
+#endif // EIGEN_DEFAULT_SETTINGS_H
diff --git a/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h b/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h
new file mode 100644
index 00000000000..8e55548c946
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h
@@ -0,0 +1,270 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COMPLEX_NEON_H
+#define EIGEN_COMPLEX_NEON_H
+
+namespace internal {
+
+static uint32x4_t p4ui_CONJ_XOR = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
+static uint32x2_t p2ui_CONJ_XOR = { 0x00000000, 0x80000000 };
+
+//---------- float ----------
+struct Packet2cf
+{
+ EIGEN_STRONG_INLINE Packet2cf() {}
+ EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
+ Packet4f v;
+};
+
+template<> struct packet_traits<std::complex<float> > : default_packet_traits
+{
+ typedef Packet2cf type;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 2,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0
+ };
+};
+
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2}; };
+
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
+{
+ float32x2_t r64;
+ r64 = vld1_f32((float *)&from);
+
+ return Packet2cf(vcombine_f32(r64, r64));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate<Packet4f>(a.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
+{
+ Packet4ui b = vreinterpretq_u32_f32(a.v);
+ return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ Packet4f v1, v2;
+ float32x2_t a_lo, a_hi;
+
+ // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
+ v1 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 0), vdup_lane_f32(vget_high_f32(a.v), 0));
+ // Get the real values of a | a1_im | a1_im | a2_im | a2_im |
+ v2 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 1), vdup_lane_f32(vget_high_f32(a.v), 1));
+ // Multiply the real a with b
+ v1 = vmulq_f32(v1, b.v);
+ // Multiply the imag a with b
+ v2 = vmulq_f32(v2, b.v);
+ // Conjugate v2
+ v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR));
+ // Swap real/imag elements in v2.
+ a_lo = vrev64_f32(vget_low_f32(v2));
+ a_hi = vrev64_f32(vget_high_f32(v2));
+ v2 = vcombine_f32(a_lo, a_hi);
+ // Add and return the result
+ return Packet2cf(vaddq_f32(v1, v2));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
+}
+template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
+}
+template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
+}
+template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { __pld((float *)addr); }
+
+template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
+{
+ std::complex<float> EIGEN_ALIGN16 x[2];
+ vst1q_f32((float *)x, a.v);
+ return x[0];
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
+{
+ float32x2_t a_lo, a_hi;
+ Packet4f a_r128;
+
+ a_lo = vget_low_f32(a.v);
+ a_hi = vget_high_f32(a.v);
+ a_r128 = vcombine_f32(a_hi, a_lo);
+
+ return Packet2cf(a_r128);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& a)
+{
+ return Packet2cf(vrev64q_f32(a.v));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
+{
+ float32x2_t a1, a2;
+ std::complex<float> s;
+
+ a1 = vget_low_f32(a.v);
+ a2 = vget_high_f32(a.v);
+ a2 = vadd_f32(a1, a2);
+ vst1_f32((float *)&s, a2);
+
+ return s;
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
+{
+ Packet4f sum1, sum2, sum;
+
+ // Add the first two 64-bit float32x2_t of vecs[0]
+ sum1 = vcombine_f32(vget_low_f32(vecs[0].v), vget_low_f32(vecs[1].v));
+ sum2 = vcombine_f32(vget_high_f32(vecs[0].v), vget_high_f32(vecs[1].v));
+ sum = vaddq_f32(sum1, sum2);
+
+ return Packet2cf(sum);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
+{
+ float32x2_t a1, a2, v1, v2, prod;
+ std::complex<float> s;
+
+ a1 = vget_low_f32(a.v);
+ a2 = vget_high_f32(a.v);
+ // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
+ v1 = vdup_lane_f32(a1, 0);
+ // Get the real values of a | a1_im | a1_im | a2_im | a2_im |
+ v2 = vdup_lane_f32(a1, 1);
+ // Multiply the real a with b
+ v1 = vmul_f32(v1, a2);
+ // Multiply the imag a with b
+ v2 = vmul_f32(v2, a2);
+ // Conjugate v2
+ v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR));
+ // Swap real/imag elements in v2.
+ v2 = vrev64_f32(v2);
+ // Add v1, v2
+ prod = vadd_f32(v1, v2);
+
+ vst1_f32((float *)&s, prod);
+
+ return s;
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet2cf>
+{
+ EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second)
+ {
+ if (Offset==1)
+ {
+ first.v = vextq_f32(first.v, second.v, 2);
+ }
+ }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+ {
+ return internal::pmul(a, pconj(b));
+ }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+ {
+ return internal::pmul(pconj(a), b);
+ }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+ {
+ return pconj(internal::pmul(a, b));
+ }
+};
+
+template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ // TODO optimize it for AltiVec
+ Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
+ Packet4f s, rev_s;
+ float32x2_t a_lo, a_hi;
+
+ // this computes the norm
+ s = vmulq_f32(b.v, b.v);
+ a_lo = vrev64_f32(vget_low_f32(s));
+ a_hi = vrev64_f32(vget_high_f32(s));
+ rev_s = vcombine_f32(a_lo, a_hi);
+
+ return Packet2cf(pdiv(res.v, vaddq_f32(s,rev_s)));
+}
+
+} // end namespace internal
+
+#endif // EIGEN_COMPLEX_NEON_H
diff --git a/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h b/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
new file mode 100644
index 00000000000..478ef8038c0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -0,0 +1,420 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Konstantinos Margaritis <markos@codex.gr>
+// Heavily based on Gael's SSE version.
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PACKET_MATH_NEON_H
+#define EIGEN_PACKET_MATH_NEON_H
+
+namespace internal {
+
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+// FIXME NEON has 16 quad registers, but since the current register allocator
+// is so bad, it is much better to reduce it to 8
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8
+#endif
+
+typedef float32x4_t Packet4f;
+typedef int32x4_t Packet4i;
+typedef uint32x4_t Packet4ui;
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
+ const Packet4f p4f_##NAME = pset1<Packet4f>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
+ const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X))
+
+#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
+ const Packet4i p4i_##NAME = pset1<Packet4i>(X)
+
+#ifndef __pld
+#define __pld(x) asm volatile ( " pld [%[addr]]\n" :: [addr] "r" (x) : "cc" );
+#endif
+
+template<> struct packet_traits<float> : default_packet_traits
+{
+ typedef Packet4f type;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 4,
+
+ HasDiv = 1,
+ // FIXME check the Has*
+ HasSin = 0,
+ HasCos = 0,
+ HasLog = 0,
+ HasExp = 0,
+ HasSqrt = 0
+ };
+};
+template<> struct packet_traits<int> : default_packet_traits
+{
+ typedef Packet4i type;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size=4
+ // FIXME check the Has*
+ };
+};
+
+#if EIGEN_GNUC_AT_MOST(4,4)
+// workaround gcc 4.2, 4.3 and 4.4 compilatin issue
+EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }
+EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); }
+EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
+EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
+#endif
+
+template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; };
+template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; };
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return vdupq_n_s32(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a)
+{
+ Packet4f countdown = { 0, 1, 2, 3 };
+ return vaddq_f32(pset1<Packet4f>(a), countdown);
+}
+template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a)
+{
+ Packet4i countdown = { 0, 1, 2, 3 };
+ return vaddq_s32(pset1<Packet4i>(a), countdown);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); }
+template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ Packet4f inv, restep, div;
+
+ // NEON does not offer a divide instruction, we have to do a reciprocal approximation
+ // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers
+ // a reciprocal estimate AND a reciprocal step -which saves a few instructions
+ // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with
+ // Newton-Raphson and vrecpsq_f32()
+ inv = vrecpeq_f32(b);
+
+ // This returns a differential, by which we will have to multiply inv to get a better
+ // approximation of 1/b.
+ restep = vrecpsq_f32(b, inv);
+ inv = vmulq_f32(restep, inv);
+
+ // Finally, multiply a by 1/b and get the wanted result of the division.
+ div = vmulq_f32(a, inv);
+
+ return div;
+}
+template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
+{ eigen_assert(false && "packet integer division are not supported by NEON");
+ return pset1<Packet4i>(0);
+}
+
+// for some weird raisons, it has to be overloaded for packet of integers
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); }
+
+// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+ return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
+{
+ float32x2_t lo, hi;
+ lo = vdup_n_f32(*from);
+ hi = vdup_n_f32(*(from+1));
+ return vcombine_f32(lo, hi);
+}
+template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
+{
+ int32x2_t lo, hi;
+ lo = vdup_n_s32(*from);
+ hi = vdup_n_s32(*(from+1));
+ return vcombine_s32(lo, hi);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }
+template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }
+
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { __pld(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { __pld(addr); }
+
+// FIXME only store the 2 first elements ?
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }
+template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; }
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
+ float32x2_t a_lo, a_hi;
+ Packet4f a_r64;
+
+ a_r64 = vrev64q_f32(a);
+ a_lo = vget_low_f32(a_r64);
+ a_hi = vget_high_f32(a_r64);
+ return vcombine_f32(a_hi, a_lo);
+}
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {
+ int32x2_t a_lo, a_hi;
+ Packet4i a_r64;
+
+ a_r64 = vrev64q_s32(a);
+ a_lo = vget_low_s32(a_r64);
+ a_hi = vget_high_s32(a_r64);
+ return vcombine_s32(a_hi, a_lo);
+}
+template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); }
+template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); }
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+ float32x2_t a_lo, a_hi, sum;
+ float s[2];
+
+ a_lo = vget_low_f32(a);
+ a_hi = vget_high_f32(a);
+ sum = vpadd_f32(a_lo, a_hi);
+ sum = vpadd_f32(sum, sum);
+ vst1_f32(s, sum);
+
+ return s[0];
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+ float32x4x2_t vtrn1, vtrn2, res1, res2;
+ Packet4f sum1, sum2, sum;
+
+ // NEON zip performs interleaving of the supplied vectors.
+ // We perform two interleaves in a row to acquire the transposed vector
+ vtrn1 = vzipq_f32(vecs[0], vecs[2]);
+ vtrn2 = vzipq_f32(vecs[1], vecs[3]);
+ res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
+ res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
+
+ // Do the addition of the resulting vectors
+ sum1 = vaddq_f32(res1.val[0], res1.val[1]);
+ sum2 = vaddq_f32(res2.val[0], res2.val[1]);
+ sum = vaddq_f32(sum1, sum2);
+
+ return sum;
+}
+
+template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
+{
+ int32x2_t a_lo, a_hi, sum;
+ int32_t s[2];
+
+ a_lo = vget_low_s32(a);
+ a_hi = vget_high_s32(a);
+ sum = vpadd_s32(a_lo, a_hi);
+ sum = vpadd_s32(sum, sum);
+ vst1_s32(s, sum);
+
+ return s[0];
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
+{
+ int32x4x2_t vtrn1, vtrn2, res1, res2;
+ Packet4i sum1, sum2, sum;
+
+ // NEON zip performs interleaving of the supplied vectors.
+ // We perform two interleaves in a row to acquire the transposed vector
+ vtrn1 = vzipq_s32(vecs[0], vecs[2]);
+ vtrn2 = vzipq_s32(vecs[1], vecs[3]);
+ res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);
+ res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);
+
+ // Do the addition of the resulting vectors
+ sum1 = vaddq_s32(res1.val[0], res1.val[1]);
+ sum2 = vaddq_s32(res2.val[0], res2.val[1]);
+ sum = vaddq_s32(sum1, sum2);
+
+ return sum;
+}
+
+// Other reduction functions:
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+ float32x2_t a_lo, a_hi, prod;
+ float s[2];
+
+ // Get a_lo = |a1|a2| and a_hi = |a3|a4|
+ a_lo = vget_low_f32(a);
+ a_hi = vget_high_f32(a);
+ // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
+ prod = vmul_f32(a_lo, a_hi);
+ // Multiply prod with its swapped value |a2*a4|a1*a3|
+ prod = vmul_f32(prod, vrev64_f32(prod));
+ vst1_f32(s, prod);
+
+ return s[0];
+}
+template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
+{
+ int32x2_t a_lo, a_hi, prod;
+ int32_t s[2];
+
+ // Get a_lo = |a1|a2| and a_hi = |a3|a4|
+ a_lo = vget_low_s32(a);
+ a_hi = vget_high_s32(a);
+ // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
+ prod = vmul_s32(a_lo, a_hi);
+ // Multiply prod with its swapped value |a2*a4|a1*a3|
+ prod = vmul_s32(prod, vrev64_s32(prod));
+ vst1_s32(s, prod);
+
+ return s[0];
+}
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+ float32x2_t a_lo, a_hi, min;
+ float s[2];
+
+ a_lo = vget_low_f32(a);
+ a_hi = vget_high_f32(a);
+ min = vpmin_f32(a_lo, a_hi);
+ min = vpmin_f32(min, min);
+ vst1_f32(s, min);
+
+ return s[0];
+}
+template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
+{
+ int32x2_t a_lo, a_hi, min;
+ int32_t s[2];
+
+ a_lo = vget_low_s32(a);
+ a_hi = vget_high_s32(a);
+ min = vpmin_s32(a_lo, a_hi);
+ min = vpmin_s32(min, min);
+ vst1_s32(s, min);
+
+ return s[0];
+}
+
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+ float32x2_t a_lo, a_hi, max;
+ float s[2];
+
+ a_lo = vget_low_f32(a);
+ a_hi = vget_high_f32(a);
+ max = vpmax_f32(a_lo, a_hi);
+ max = vpmax_f32(max, max);
+ vst1_f32(s, max);
+
+ return s[0];
+}
+template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
+{
+ int32x2_t a_lo, a_hi, max;
+ int32_t s[2];
+
+ a_lo = vget_low_s32(a);
+ a_hi = vget_high_s32(a);
+ max = vpmax_s32(a_lo, a_hi);
+ max = vpmax_s32(max, max);
+ vst1_s32(s, max);
+
+ return s[0];
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+ EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second)
+ {
+ if (Offset!=0)
+ first = vextq_f32(first, second, Offset);
+ }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet4i>
+{
+ EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second)
+ {
+ if (Offset!=0)
+ first = vextq_s32(first, second, Offset);
+ }
+};
+
+} // end namespace internal
+
+#endif // EIGEN_PACKET_MATH_NEON_H
diff --git a/extern/Eigen3/Eigen/src/Core/arch/SSE/Complex.h b/extern/Eigen3/Eigen/src/Core/arch/SSE/Complex.h
new file mode 100644
index 00000000000..c352bb3e6cf
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/SSE/Complex.h
@@ -0,0 +1,447 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COMPLEX_SSE_H
+#define EIGEN_COMPLEX_SSE_H
+
+namespace internal {
+
+//---------- float ----------
+struct Packet2cf
+{
+ EIGEN_STRONG_INLINE Packet2cf() {}
+ EIGEN_STRONG_INLINE explicit Packet2cf(const __m128& a) : v(a) {}
+ __m128 v;
+};
+
+template<> struct packet_traits<std::complex<float> > : default_packet_traits
+{
+ typedef Packet2cf type;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size = 2,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0
+ };
+};
+
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2}; };
+
+template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a)
+{
+ const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
+ return Packet2cf(_mm_xor_ps(a.v,mask));
+}
+template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
+{
+ const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
+ return Packet2cf(_mm_xor_ps(a.v,mask));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ // TODO optimize it for SSE3 and 4
+ #ifdef EIGEN_VECTORIZE_SSE3
+ return Packet2cf(_mm_addsub_ps(_mm_mul_ps(_mm_moveldup_ps(a.v), b.v),
+ _mm_mul_ps(_mm_movehdup_ps(a.v),
+ vec4f_swizzle1(b.v, 1, 0, 3, 2))));
+// return Packet2cf(_mm_addsub_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
+// _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
+// vec4f_swizzle1(b.v, 1, 0, 3, 2))));
+ #else
+ const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x00000000,0x80000000,0x00000000));
+ return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
+ _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
+ vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask)));
+ #endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_and_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_or_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(a.v,b.v)); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&real_ref(*from))); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&real_ref(*from))); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
+{
+ Packet2cf res;
+ #if EIGEN_GNUC_AT_MOST(4,2)
+ // workaround annoying "may be used uninitialized in this function" warning with gcc 4.2
+ res.v = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)&from);
+ #else
+ res.v = _mm_loadl_pi(res.v, (const __m64*)&from);
+ #endif
+ return Packet2cf(_mm_movelh_ps(res.v,res.v));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&real_ref(*to), from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&real_ref(*to), from.v); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+
+template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
+{
+ #if EIGEN_GNUC_AT_MOST(4,3)
+ // Workaround gcc 4.2 ICE - this is not performance wise ideal, but who cares...
+ // This workaround also fix invalid code generation with gcc 4.3
+ EIGEN_ALIGN16 std::complex<float> res[2];
+ _mm_store_ps((float*)res, a.v);
+ return res[0];
+ #else
+ std::complex<float> res;
+ _mm_storel_pi((__m64*)&res, a.v);
+ return res;
+ #endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { return Packet2cf(_mm_castpd_ps(preverse(_mm_castps_pd(a.v)))); }
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
+{
+ return pfirst(Packet2cf(_mm_add_ps(a.v, _mm_movehl_ps(a.v,a.v))));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
+{
+ return Packet2cf(_mm_add_ps(_mm_movelh_ps(vecs[0].v,vecs[1].v), _mm_movehl_ps(vecs[1].v,vecs[0].v)));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
+{
+ return pfirst(pmul(a, Packet2cf(_mm_movehl_ps(a.v,a.v))));
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet2cf>
+{
+ EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second)
+ {
+ if (Offset==1)
+ {
+ first.v = _mm_movehl_ps(first.v, first.v);
+ first.v = _mm_movelh_ps(first.v, second.v);
+ }
+ }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+ {
+ #ifdef EIGEN_VECTORIZE_SSE3
+ return internal::pmul(a, pconj(b));
+ #else
+ const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
+ return Packet2cf(_mm_add_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask),
+ _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
+ vec4f_swizzle1(b.v, 1, 0, 3, 2))));
+ #endif
+ }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+ {
+ #ifdef EIGEN_VECTORIZE_SSE3
+ return internal::pmul(pconj(a), b);
+ #else
+ const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
+ return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
+ _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
+ vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask)));
+ #endif
+ }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+ {
+ #ifdef EIGEN_VECTORIZE_SSE3
+ return pconj(internal::pmul(a, b));
+ #else
+ const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
+ return Packet2cf(_mm_sub_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask),
+ _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
+ vec4f_swizzle1(b.v, 1, 0, 3, 2))));
+ #endif
+ }
+};
+
+template<> struct conj_helper<Packet4f, Packet2cf, false,false>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet4f& x, const Packet2cf& y, const Packet2cf& c) const
+ { return padd(c, pmul(x,y)); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet4f& x, const Packet2cf& y) const
+ { return Packet2cf(Eigen::internal::pmul(x, y.v)); }
+};
+
+template<> struct conj_helper<Packet2cf, Packet4f, false,false>
+{
+ EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet4f& y, const Packet2cf& c) const
+ { return padd(c, pmul(x,y)); }
+
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& x, const Packet4f& y) const
+ { return Packet2cf(Eigen::internal::pmul(x.v, y)); }
+};
+
+template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+ // TODO optimize it for SSE3 and 4
+ Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
+ __m128 s = _mm_mul_ps(b.v,b.v);
+ return Packet2cf(_mm_div_ps(res.v,_mm_add_ps(s,_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(s), 0xb1)))));
+}
+
+EIGEN_STRONG_INLINE Packet2cf pcplxflip/*<Packet2cf>*/(const Packet2cf& x)
+{
+ return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2));
+}
+
+
+//---------- double ----------
+struct Packet1cd
+{
+ EIGEN_STRONG_INLINE Packet1cd() {}
+ EIGEN_STRONG_INLINE explicit Packet1cd(const __m128d& a) : v(a) {}
+ __m128d v;
+};
+
+template<> struct packet_traits<std::complex<double> > : default_packet_traits
+{
+ typedef Packet1cd type;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 0,
+ size = 1,
+
+ HasAdd = 1,
+ HasSub = 1,
+ HasMul = 1,
+ HasDiv = 1,
+ HasNegate = 1,
+ HasAbs = 0,
+ HasAbs2 = 0,
+ HasMin = 0,
+ HasMax = 0,
+ HasSetLinear = 0
+ };
+};
+
+template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1}; };
+
+template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(a.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a)
+{
+ const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
+ return Packet1cd(_mm_xor_pd(a.v,mask));
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+{
+ // TODO optimize it for SSE3 and 4
+ #ifdef EIGEN_VECTORIZE_SSE3
+ return Packet1cd(_mm_addsub_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),
+ _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
+ vec2d_swizzle1(b.v, 1, 0))));
+ #else
+ const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
+ return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),
+ _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
+ vec2d_swizzle1(b.v, 1, 0)), mask)));
+ #endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_and_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_or_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_xor_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(a.v,b.v)); }
+
+// FIXME force unaligned load, this is a temporary fix
+template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }
+template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
+{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
+
+template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }
+
+// FIXME force unaligned store, this is a temporary fix
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+
+template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
+{
+ EIGEN_ALIGN16 double res[2];
+ _mm_store_pd(res, a.v);
+ return std::complex<double>(res[0],res[1]);
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a)
+{
+ return pfirst(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs)
+{
+ return vecs[0];
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)
+{
+ return pfirst(a);
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet1cd>
+{
+ EIGEN_STRONG_INLINE static void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)
+ {
+ // FIXME is it sure we never have to align a Packet1cd?
+ // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...
+ }
+};
+
+template<> struct conj_helper<Packet1cd, Packet1cd, false,true>
+{
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+ {
+ #ifdef EIGEN_VECTORIZE_SSE3
+ return internal::pmul(a, pconj(b));
+ #else
+ const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
+ return Packet1cd(_mm_add_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask),
+ _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
+ vec2d_swizzle1(b.v, 1, 0))));
+ #endif
+ }
+};
+
+template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
+{
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+ {
+ #ifdef EIGEN_VECTORIZE_SSE3
+ return internal::pmul(pconj(a), b);
+ #else
+ const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
+ return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),
+ _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
+ vec2d_swizzle1(b.v, 1, 0)), mask)));
+ #endif
+ }
+};
+
+template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
+{
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+ { return padd(pmul(x,y),c); }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+ {
+ #ifdef EIGEN_VECTORIZE_SSE3
+ return pconj(internal::pmul(a, b));
+ #else
+ const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
+ return Packet1cd(_mm_sub_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask),
+ _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
+ vec2d_swizzle1(b.v, 1, 0))));
+ #endif
+ }
+};
+
+template<> struct conj_helper<Packet2d, Packet1cd, false,false>
+{
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet2d& x, const Packet1cd& y, const Packet1cd& c) const
+ { return padd(c, pmul(x,y)); }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet2d& x, const Packet1cd& y) const
+ { return Packet1cd(Eigen::internal::pmul(x, y.v)); }
+};
+
+template<> struct conj_helper<Packet1cd, Packet2d, false,false>
+{
+ EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet2d& y, const Packet1cd& c) const
+ { return padd(c, pmul(x,y)); }
+
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& x, const Packet2d& y) const
+ { return Packet1cd(Eigen::internal::pmul(x.v, y)); }
+};
+
+template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+{
+ // TODO optimize it for SSE3 and 4
+ Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);
+ __m128d s = _mm_mul_pd(b.v,b.v);
+ return Packet1cd(_mm_div_pd(res.v, _mm_add_pd(s,_mm_shuffle_pd(s, s, 0x1))));
+}
+
+EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
+{
+ return Packet1cd(preverse(x.v));
+}
+
+} // end namespace internal
+
+#endif // EIGEN_COMPLEX_SSE_H
diff --git a/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h b/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
new file mode 100644
index 00000000000..9d56d82180b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -0,0 +1,395 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007 Julien Pommier
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+/* The sin, cos, exp, and log functions of this file come from
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
+ */
+
+#ifndef EIGEN_MATH_FUNCTIONS_SSE_H
+#define EIGEN_MATH_FUNCTIONS_SSE_H
+
+namespace internal {
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f plog<Packet4f>(const Packet4f& _x)
+{
+ Packet4f x = _x;
+ _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+ _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+ _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
+
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
+
+ /* the smallest non denormalized float number */
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
+
+ /* natural logarithm computed for 4 simultaneous float
+ return NaN for x <= 0
+ */
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
+
+
+ Packet4i emm0;
+
+ Packet4f invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
+
+ x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
+ emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
+
+ /* keep only the fractional part */
+ x = _mm_and_ps(x, p4f_inv_mant_mask);
+ x = _mm_or_ps(x, p4f_half);
+
+ emm0 = _mm_sub_epi32(emm0, p4i_0x7f);
+ Packet4f e = padd(_mm_cvtepi32_ps(emm0), p4f_1);
+
+ /* part2:
+ if( x < SQRTHF ) {
+ e -= 1;
+ x = x + x - 1.0;
+ } else { x = x - 1.0; }
+ */
+ Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF);
+ Packet4f tmp = _mm_and_ps(x, mask);
+ x = psub(x, p4f_1);
+ e = psub(e, _mm_and_ps(p4f_1, mask));
+ x = padd(x, tmp);
+
+ Packet4f x2 = pmul(x,x);
+ Packet4f x3 = pmul(x2,x);
+
+ Packet4f y, y1, y2;
+ y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
+ y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
+ y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
+ y = pmadd(y , x, p4f_cephes_log_p2);
+ y1 = pmadd(y1, x, p4f_cephes_log_p5);
+ y2 = pmadd(y2, x, p4f_cephes_log_p8);
+ y = pmadd(y, x3, y1);
+ y = pmadd(y, x3, y2);
+ y = pmul(y, x3);
+
+ y1 = pmul(e, p4f_cephes_log_q1);
+ tmp = pmul(x2, p4f_half);
+ y = padd(y, y1);
+ x = psub(x, tmp);
+ y2 = pmul(e, p4f_cephes_log_q2);
+ x = padd(x, y);
+ x = padd(x, y2);
+ return _mm_or_ps(x, invalid_mask); // negative arg will be NAN
+}
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pexp<Packet4f>(const Packet4f& _x)
+{
+ Packet4f x = _x;
+ _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+ _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+ _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
+
+
+ _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647949f);
+ _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
+
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
+
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
+
+ Packet4f tmp = _mm_setzero_ps(), fx;
+ Packet4i emm0;
+
+ // clamp x
+ x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
+
+ /* express exp(x) as exp(g + n*log(2)) */
+ fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
+
+ /* how to perform a floorf with SSE: just below */
+ emm0 = _mm_cvttps_epi32(fx);
+ tmp = _mm_cvtepi32_ps(emm0);
+ /* if greater, substract 1 */
+ Packet4f mask = _mm_cmpgt_ps(tmp, fx);
+ mask = _mm_and_ps(mask, p4f_1);
+ fx = psub(tmp, mask);
+
+ tmp = pmul(fx, p4f_cephes_exp_C1);
+ Packet4f z = pmul(fx, p4f_cephes_exp_C2);
+ x = psub(x, tmp);
+ x = psub(x, z);
+
+ z = pmul(x,x);
+
+ Packet4f y = p4f_cephes_exp_p0;
+ y = pmadd(y, x, p4f_cephes_exp_p1);
+ y = pmadd(y, x, p4f_cephes_exp_p2);
+ y = pmadd(y, x, p4f_cephes_exp_p3);
+ y = pmadd(y, x, p4f_cephes_exp_p4);
+ y = pmadd(y, x, p4f_cephes_exp_p5);
+ y = pmadd(y, z, x);
+ y = padd(y, p4f_1);
+
+ /* build 2^n */
+ emm0 = _mm_cvttps_epi32(fx);
+ emm0 = _mm_add_epi32(emm0, p4i_0x7f);
+ emm0 = _mm_slli_epi32(emm0, 23);
+ return pmul(y, _mm_castsi128_ps(emm0));
+}
+
+/* evaluation of 4 sines at onces, using SSE2 intrinsics.
+
+ The code is the exact rewriting of the cephes sinf function.
+ Precision is excellent as long as x < 8192 (I did not bother to
+ take into account the special handling they have for greater values
+ -- it does not return garbage for arguments over 8192, though, but
+ the extra precision is missing).
+
+ Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
+ surprising but correct result.
+*/
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f psin<Packet4f>(const Packet4f& _x)
+{
+ Packet4f x = _x;
+ _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+ _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+
+ _EIGEN_DECLARE_CONST_Packet4i(1, 1);
+ _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
+ _EIGEN_DECLARE_CONST_Packet4i(2, 2);
+ _EIGEN_DECLARE_CONST_Packet4i(4, 4);
+
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
+
+ _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
+ _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
+ _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
+ _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
+ _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f);
+ _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f);
+ _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
+ _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
+
+ Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
+
+ Packet4i emm0, emm2;
+ sign_bit = x;
+ /* take the absolute value */
+ x = pabs(x);
+
+ /* take the modulo */
+
+ /* extract the sign bit (upper one) */
+ sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask);
+
+ /* scale by 4/Pi */
+ y = pmul(x, p4f_cephes_FOPI);
+
+ /* store the integer part of y in mm0 */
+ emm2 = _mm_cvttps_epi32(y);
+ /* j=(j+1) & (~1) (see the cephes sources) */
+ emm2 = _mm_add_epi32(emm2, p4i_1);
+ emm2 = _mm_and_si128(emm2, p4i_not1);
+ y = _mm_cvtepi32_ps(emm2);
+ /* get the swap sign flag */
+ emm0 = _mm_and_si128(emm2, p4i_4);
+ emm0 = _mm_slli_epi32(emm0, 29);
+ /* get the polynom selection mask
+ there is one polynom for 0 <= x <= Pi/4
+ and another one for Pi/4<x<=Pi/2
+
+ Both branches will be computed.
+ */
+ emm2 = _mm_and_si128(emm2, p4i_2);
+ emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
+
+ Packet4f swap_sign_bit = _mm_castsi128_ps(emm0);
+ Packet4f poly_mask = _mm_castsi128_ps(emm2);
+ sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
+
+ /* The magic pass: "Extended precision modular arithmetic"
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
+ xmm1 = pmul(y, p4f_minus_cephes_DP1);
+ xmm2 = pmul(y, p4f_minus_cephes_DP2);
+ xmm3 = pmul(y, p4f_minus_cephes_DP3);
+ x = padd(x, xmm1);
+ x = padd(x, xmm2);
+ x = padd(x, xmm3);
+
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
+ y = p4f_coscof_p0;
+ Packet4f z = _mm_mul_ps(x,x);
+
+ y = pmadd(y, z, p4f_coscof_p1);
+ y = pmadd(y, z, p4f_coscof_p2);
+ y = pmul(y, z);
+ y = pmul(y, z);
+ Packet4f tmp = pmul(z, p4f_half);
+ y = psub(y, tmp);
+ y = padd(y, p4f_1);
+
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
+
+ Packet4f y2 = p4f_sincof_p0;
+ y2 = pmadd(y2, z, p4f_sincof_p1);
+ y2 = pmadd(y2, z, p4f_sincof_p2);
+ y2 = pmul(y2, z);
+ y2 = pmul(y2, x);
+ y2 = padd(y2, x);
+
+ /* select the correct result from the two polynoms */
+ y2 = _mm_and_ps(poly_mask, y2);
+ y = _mm_andnot_ps(poly_mask, y);
+ y = _mm_or_ps(y,y2);
+ /* update the sign */
+ return _mm_xor_ps(y, sign_bit);
+}
+
+/* almost the same as psin */
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pcos<Packet4f>(const Packet4f& _x)
+{
+ Packet4f x = _x;
+ _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+ _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+
+ _EIGEN_DECLARE_CONST_Packet4i(1, 1);
+ _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
+ _EIGEN_DECLARE_CONST_Packet4i(2, 2);
+ _EIGEN_DECLARE_CONST_Packet4i(4, 4);
+
+ _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
+ _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
+ _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
+ _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
+ _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f);
+ _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
+ _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f);
+ _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
+ _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
+ _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
+
+ Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
+ Packet4i emm0, emm2;
+
+ x = pabs(x);
+
+ /* scale by 4/Pi */
+ y = pmul(x, p4f_cephes_FOPI);
+
+ /* get the integer part of y */
+ emm2 = _mm_cvttps_epi32(y);
+ /* j=(j+1) & (~1) (see the cephes sources) */
+ emm2 = _mm_add_epi32(emm2, p4i_1);
+ emm2 = _mm_and_si128(emm2, p4i_not1);
+ y = _mm_cvtepi32_ps(emm2);
+
+ emm2 = _mm_sub_epi32(emm2, p4i_2);
+
+ /* get the swap sign flag */
+ emm0 = _mm_andnot_si128(emm2, p4i_4);
+ emm0 = _mm_slli_epi32(emm0, 29);
+ /* get the polynom selection mask */
+ emm2 = _mm_and_si128(emm2, p4i_2);
+ emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
+
+ Packet4f sign_bit = _mm_castsi128_ps(emm0);
+ Packet4f poly_mask = _mm_castsi128_ps(emm2);
+
+ /* The magic pass: "Extended precision modular arithmetic"
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
+ xmm1 = pmul(y, p4f_minus_cephes_DP1);
+ xmm2 = pmul(y, p4f_minus_cephes_DP2);
+ xmm3 = pmul(y, p4f_minus_cephes_DP3);
+ x = padd(x, xmm1);
+ x = padd(x, xmm2);
+ x = padd(x, xmm3);
+
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
+ y = p4f_coscof_p0;
+ Packet4f z = pmul(x,x);
+
+ y = pmadd(y,z,p4f_coscof_p1);
+ y = pmadd(y,z,p4f_coscof_p2);
+ y = pmul(y, z);
+ y = pmul(y, z);
+ Packet4f tmp = _mm_mul_ps(z, p4f_half);
+ y = psub(y, tmp);
+ y = padd(y, p4f_1);
+
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
+ Packet4f y2 = p4f_sincof_p0;
+ y2 = pmadd(y2, z, p4f_sincof_p1);
+ y2 = pmadd(y2, z, p4f_sincof_p2);
+ y2 = pmul(y2, z);
+ y2 = pmadd(y2, x, x);
+
+ /* select the correct result from the two polynoms */
+ y2 = _mm_and_ps(poly_mask, y2);
+ y = _mm_andnot_ps(poly_mask, y);
+ y = _mm_or_ps(y,y2);
+
+ /* update the sign */
+ return _mm_xor_ps(y, sign_bit);
+}
+
+// This is based on Quake3's fast inverse square root.
+// For detail see here: http://www.beyond3d.com/content/articles/8/
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f psqrt<Packet4f>(const Packet4f& _x)
+{
+ Packet4f half = pmul(_x, pset1<Packet4f>(.5f));
+
+ /* select only the inverse sqrt of non-zero inputs */
+ Packet4f non_zero_mask = _mm_cmpgt_ps(_x, pset1<Packet4f>(std::numeric_limits<float>::epsilon()));
+ Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x));
+
+ x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x))));
+ return pmul(_x,x);
+}
+
+} // end namespace internal
+
+#endif // EIGEN_MATH_FUNCTIONS_SSE_H
diff --git a/extern/Eigen3/Eigen/src/Core/arch/SSE/PacketMath.h b/extern/Eigen3/Eigen/src/Core/arch/SSE/PacketMath.h
new file mode 100644
index 00000000000..908e27368e8
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -0,0 +1,634 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PACKET_MATH_SSE_H
+#define EIGEN_PACKET_MATH_SSE_H
+
+namespace internal {
+
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
+#endif
+
+typedef __m128 Packet4f;
+typedef __m128i Packet4i;
+typedef __m128d Packet2d;
+
+template<> struct is_arithmetic<__m128> { enum { value = true }; };
+template<> struct is_arithmetic<__m128i> { enum { value = true }; };
+template<> struct is_arithmetic<__m128d> { enum { value = true }; };
+
+#define vec4f_swizzle1(v,p,q,r,s) \
+ (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
+
+#define vec4i_swizzle1(v,p,q,r,s) \
+ (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
+
+#define vec2d_swizzle1(v,p,q) \
+ (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
+
+#define vec4f_swizzle2(a,b,p,q,r,s) \
+ (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
+
+#define vec4i_swizzle2(a,b,p,q,r,s) \
+ (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
+ const Packet4f p4f_##NAME = pset1<Packet4f>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
+ const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
+
+#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
+ const Packet4i p4i_##NAME = pset1<Packet4i>(X)
+
+
+template<> struct packet_traits<float> : default_packet_traits
+{
+ typedef Packet4f type;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size=4,
+
+ HasDiv = 1,
+ HasSin = EIGEN_FAST_MATH,
+ HasCos = EIGEN_FAST_MATH,
+ HasLog = 1,
+ HasExp = 1,
+ HasSqrt = 1
+ };
+};
+template<> struct packet_traits<double> : default_packet_traits
+{
+ typedef Packet2d type;
+ enum {
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size=2,
+
+ HasDiv = 1
+ };
+};
+template<> struct packet_traits<int> : default_packet_traits
+{
+ typedef Packet4i type;
+ enum {
+ // FIXME check the Has*
+ Vectorizable = 1,
+ AlignedOnScalar = 1,
+ size=4
+ };
+};
+
+template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; };
+template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; };
+template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; };
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set1_ps(from); }
+template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
+template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
+template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
+{
+ const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
+ return _mm_xor_ps(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
+{
+ const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
+ return _mm_xor_pd(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
+{
+ return psub(_mm_setr_epi32(0,0,0,0), a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+#ifdef EIGEN_VECTORIZE_SSE4_1
+ return _mm_mullo_epi32(a,b);
+#else
+ // this version is slightly faster than 4 scalar products
+ return vec4i_swizzle1(
+ vec4i_swizzle2(
+ _mm_mul_epu32(a,b),
+ _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
+ vec4i_swizzle1(b,1,0,3,2)),
+ 0,2,0,2),
+ 0,2,1,3);
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
+{ eigen_assert(false && "packet integer division are not supported by SSE");
+ return pset1<Packet4i>(0);
+}
+
+// for some weird raisons, it has to be overloaded for packet of integers
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+ // after some bench, this version *is* faster than a scalar implementation
+ Packet4i mask = _mm_cmplt_epi32(a,b);
+ return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+ // after some bench, this version *is* faster than a scalar implementation
+ Packet4i mask = _mm_cmpgt_epi32(a,b);
+ return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
+template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const Packet4i*>(from)); }
+
+#if defined(_MSC_VER)
+ template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
+ EIGEN_DEBUG_UNALIGNED_LOAD
+ #if (_MSC_VER==1600)
+ // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
+ // (i.e., it does not generate an unaligned load!!
+ // TODO On most architectures this version should also be faster than a single _mm_loadu_ps
+ // so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so...
+ __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
+ res = _mm_loadh_pi(res, (const __m64*)(from+2));
+ return res;
+ #else
+ return _mm_loadu_ps(from);
+ #endif
+ }
+ template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
+ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); }
+#else
+// Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
+// require pointer casting to incompatible pointer types and leads to invalid code
+// because of the strict aliasing rule. The "dummy" stuff are required to enforce
+// a correct instruction dependency.
+// TODO: do the same for MSVC (ICC is compatible)
+// NOTE: with the code below, MSVC's compiler crashes!
+
+#if defined(__GNUC__) && defined(__i386__)
+ // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
+ #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
+#elif defined(__clang__)
+ // bug 201: Segfaults in __mm_loadh_pd with clang 2.8
+ #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
+#else
+ #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
+{
+ EIGEN_DEBUG_UNALIGNED_LOAD
+#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
+ return _mm_loadu_ps(from);
+#else
+ __m128d res;
+ res = _mm_load_sd((const double*)(from)) ;
+ res = _mm_loadh_pd(res, (const double*)(from+2)) ;
+ return _mm_castpd_ps(res);
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
+{
+ EIGEN_DEBUG_UNALIGNED_LOAD
+#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
+ return _mm_loadu_pd(from);
+#else
+ __m128d res;
+ res = _mm_load_sd(from) ;
+ res = _mm_loadh_pd(res,from+1);
+ return res;
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
+{
+ EIGEN_DEBUG_UNALIGNED_LOAD
+#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
+ return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from));
+#else
+ __m128d res;
+ res = _mm_load_sd((const double*)(from)) ;
+ res = _mm_loadh_pd(res, (const double*)(from+2)) ;
+ return _mm_castpd_si128(res);
+#endif
+}
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
+{
+ return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd((const double*)from)), 0, 0, 1, 1);
+}
+template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
+{ return pset1<Packet2d>(from[0]); }
+template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
+{
+ Packet4i tmp;
+ tmp = _mm_loadl_epi64(reinterpret_cast<const Packet4i*>(from));
+ return vec4i_swizzle1(tmp, 0, 0, 1, 1);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
+template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
+template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<Packet4i*>(to), from); }
+
+template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
+ EIGEN_DEBUG_UNALIGNED_STORE
+ _mm_storel_pd((to), from);
+ _mm_storeh_pd((to+1), from);
+}
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, _mm_castps_pd(from)); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, _mm_castsi128_pd(from)); }
+
+// some compilers might be tempted to perform multiple moves instead of using a vector path.
+template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
+{
+ Packet4f pa = _mm_set_ss(a);
+ pstore(to, vec4f_swizzle1(pa,0,0,0,0));
+}
+// some compilers might be tempted to perform multiple moves instead of using a vector path.
+template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
+{
+ Packet2d pa = _mm_set_sd(a);
+ pstore(to, vec2d_swizzle1(pa,0,0));
+}
+
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+
+#if defined(_MSC_VER) && defined(_WIN64) && !defined(__INTEL_COMPILER)
+// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
+// Direct of the struct members fixed bug #62.
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
+template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
+#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
+template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
+#else
+template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
+template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
+{ return _mm_shuffle_ps(a,a,0x1B); }
+template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
+{ return _mm_shuffle_pd(a,a,0x1); }
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
+{ return _mm_shuffle_epi32(a,0x1B); }
+
+
+template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
+{
+ const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
+ return _mm_and_ps(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
+{
+ const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
+ return _mm_and_pd(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
+{
+ #ifdef EIGEN_VECTORIZE_SSSE3
+ return _mm_abs_epi32(a);
+ #else
+ Packet4i aux = _mm_srai_epi32(a,31);
+ return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
+ #endif
+}
+
+EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
+{
+ vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
+ vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
+ vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
+ vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
+}
+
+#ifdef EIGEN_VECTORIZE_SSE3
+// TODO implement SSE2 versions as well as integer versions
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+ return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
+}
+template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
+{
+ return _mm_hadd_pd(vecs[0], vecs[1]);
+}
+// SSSE3 version:
+// EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs)
+// {
+// return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
+// }
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+ Packet4f tmp0 = _mm_hadd_ps(a,a);
+ return pfirst(_mm_hadd_ps(tmp0, tmp0));
+}
+
+template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst(_mm_hadd_pd(a, a)); }
+
+// SSSE3 version:
+// EIGEN_STRONG_INLINE float predux(const Packet4i& a)
+// {
+// Packet4i tmp0 = _mm_hadd_epi32(a,a);
+// return pfirst(_mm_hadd_epi32(tmp0, tmp0));
+// }
+#else
+// SSE2 versions
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+ Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
+ return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
+{
+ return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+ Packet4f tmp0, tmp1, tmp2;
+ tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
+ tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
+ tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
+ tmp0 = _mm_add_ps(tmp0, tmp1);
+ tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
+ tmp1 = _mm_add_ps(tmp1, tmp2);
+ tmp2 = _mm_movehl_ps(tmp1, tmp0);
+ tmp0 = _mm_movelh_ps(tmp0, tmp1);
+ return _mm_add_ps(tmp0, tmp2);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
+{
+ return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
+}
+#endif // SSE3
+
+template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
+{
+ Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
+ return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
+{
+ Packet4i tmp0, tmp1, tmp2;
+ tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
+ tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
+ tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
+ tmp0 = _mm_add_epi32(tmp0, tmp1);
+ tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
+ tmp1 = _mm_add_epi32(tmp1, tmp2);
+ tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
+ tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
+ return _mm_add_epi32(tmp0, tmp2);
+}
+
+// Other reduction functions:
+
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+ Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
+ return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
+{
+ return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
+}
+template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
+{
+ // after some experiments, it is seems this is the fastest way to implement it
+ // for GCC (eg., reusing pmul is very slow !)
+ // TODO try to call _mm_mul_epu32 directly
+ EIGEN_ALIGN16 int aux[4];
+ pstore(aux, a);
+ return (aux[0] * aux[1]) * (aux[2] * aux[3]);;
+}
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+ Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
+ return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
+{
+ return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
+}
+template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
+{
+ // after some experiments, it is seems this is the fastest way to implement it
+ // for GCC (eg., it does not like using std::min after the pstore !!)
+ EIGEN_ALIGN16 int aux[4];
+ pstore(aux, a);
+ register int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
+ register int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
+ return aux0<aux2 ? aux0 : aux2;
+}
+
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+ Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
+ return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
+{
+ return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
+}
+template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
+{
+ // after some experiments, it is seems this is the fastest way to implement it
+ // for GCC (eg., it does not like using std::min after the pstore !!)
+ EIGEN_ALIGN16 int aux[4];
+ pstore(aux, a);
+ register int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
+ register int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
+ return aux0>aux2 ? aux0 : aux2;
+}
+
+#if (defined __GNUC__)
+// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
+// {
+// Packet4f res = b;
+// asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
+// return res;
+// }
+// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i& a, const Packet4i& b, const int i)
+// {
+// Packet4i res = a;
+// asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
+// return res;
+// }
+#endif
+
+#ifdef EIGEN_VECTORIZE_SSSE3
+// SSSE3 versions
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+ EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second)
+ {
+ if (Offset!=0)
+ first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
+ }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet4i>
+{
+ EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second)
+ {
+ if (Offset!=0)
+ first = _mm_alignr_epi8(second,first, Offset*4);
+ }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet2d>
+{
+ EIGEN_STRONG_INLINE static void run(Packet2d& first, const Packet2d& second)
+ {
+ if (Offset==1)
+ first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
+ }
+};
+#else
+// SSE2 versions
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+ EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second)
+ {
+ if (Offset==1)
+ {
+ first = _mm_move_ss(first,second);
+ first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
+ }
+ else if (Offset==2)
+ {
+ first = _mm_movehl_ps(first,first);
+ first = _mm_movelh_ps(first,second);
+ }
+ else if (Offset==3)
+ {
+ first = _mm_move_ss(first,second);
+ first = _mm_shuffle_ps(first,second,0x93);
+ }
+ }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet4i>
+{
+ EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second)
+ {
+ if (Offset==1)
+ {
+ first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
+ first = _mm_shuffle_epi32(first,0x39);
+ }
+ else if (Offset==2)
+ {
+ first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
+ first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
+ }
+ else if (Offset==3)
+ {
+ first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
+ first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
+ }
+ }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet2d>
+{
+ EIGEN_STRONG_INLINE static void run(Packet2d& first, const Packet2d& second)
+ {
+ if (Offset==1)
+ {
+ first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
+ first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
+ }
+ }
+};
+#endif
+
+} // end namespace internal
+
+#endif // EIGEN_PACKET_MATH_SSE_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h b/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h
new file mode 100644
index 00000000000..dc20f7e1e29
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h
@@ -0,0 +1,452 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COEFFBASED_PRODUCT_H
+#define EIGEN_COEFFBASED_PRODUCT_H
+
+namespace internal {
+
+/*********************************************************************************
+* Coefficient based product implementation.
+* It is designed for the following use cases:
+* - small fixed sizes
+* - lazy products
+*********************************************************************************/
+
+/* Since the all the dimensions of the product are small, here we can rely
+ * on the generic Assign mechanism to evaluate the product per coeff (or packet).
+ *
+ * Note that here the inner-loops should always be unrolled.
+ */
+
+template<int Traversal, int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl;
+
+template<int StorageOrder, int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl;
+
+template<typename LhsNested, typename RhsNested, int NestingFlags>
+struct traits<CoeffBasedProduct<LhsNested,RhsNested,NestingFlags> >
+{
+ typedef MatrixXpr XprKind;
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+ typedef typename scalar_product_traits<typename _LhsNested::Scalar, typename _RhsNested::Scalar>::ReturnType Scalar;
+ typedef typename promote_storage_type<typename traits<_LhsNested>::StorageKind,
+ typename traits<_RhsNested>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename traits<_LhsNested>::Index,
+ typename traits<_RhsNested>::Index>::type Index;
+
+ enum {
+ LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+ RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+ LhsFlags = _LhsNested::Flags,
+ RhsFlags = _RhsNested::Flags,
+
+ RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
+ ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
+ InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+ MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+ LhsRowMajor = LhsFlags & RowMajorBit,
+ RhsRowMajor = RhsFlags & RowMajorBit,
+
+ SameType = is_same<typename _LhsNested::Scalar,typename _RhsNested::Scalar>::value,
+
+ CanVectorizeRhs = RhsRowMajor && (RhsFlags & PacketAccessBit)
+ && (ColsAtCompileTime == Dynamic
+ || ( (ColsAtCompileTime % packet_traits<Scalar>::size) == 0
+ && (RhsFlags&AlignedBit)
+ )
+ ),
+
+ CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit)
+ && (RowsAtCompileTime == Dynamic
+ || ( (RowsAtCompileTime % packet_traits<Scalar>::size) == 0
+ && (LhsFlags&AlignedBit)
+ )
+ ),
+
+ EvalToRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
+ : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
+ : (RhsRowMajor && !CanVectorizeLhs),
+
+ Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & ~RowMajorBit)
+ | (EvalToRowMajor ? RowMajorBit : 0)
+ | NestingFlags
+ | (LhsFlags & RhsFlags & AlignedBit)
+ // TODO enable vectorization for mixed types
+ | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0),
+
+ CoeffReadCost = InnerSize == Dynamic ? Dynamic
+ : InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost)
+ + (InnerSize - 1) * NumTraits<Scalar>::AddCost,
+
+ /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside
+ * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner
+ * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect
+ * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI.
+ */
+ CanVectorizeInner = SameType
+ && LhsRowMajor
+ && (!RhsRowMajor)
+ && (LhsFlags & RhsFlags & ActualPacketAccessBit)
+ && (LhsFlags & RhsFlags & AlignedBit)
+ && (InnerSize % packet_traits<Scalar>::size == 0)
+ };
+};
+
+} // end namespace internal
+
+template<typename LhsNested, typename RhsNested, int NestingFlags>
+class CoeffBasedProduct
+ : internal::no_assignment_operator,
+ public MatrixBase<CoeffBasedProduct<LhsNested, RhsNested, NestingFlags> >
+{
+ public:
+
+ typedef MatrixBase<CoeffBasedProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(CoeffBasedProduct)
+ typedef typename Base::PlainObject PlainObject;
+
+ private:
+
+ typedef typename internal::traits<CoeffBasedProduct>::_LhsNested _LhsNested;
+ typedef typename internal::traits<CoeffBasedProduct>::_RhsNested _RhsNested;
+
+ enum {
+ PacketSize = internal::packet_traits<Scalar>::size,
+ InnerSize = internal::traits<CoeffBasedProduct>::InnerSize,
+ Unroll = CoeffReadCost != Dynamic && CoeffReadCost <= EIGEN_UNROLLING_LIMIT,
+ CanVectorizeInner = internal::traits<CoeffBasedProduct>::CanVectorizeInner
+ };
+
+ typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
+ Unroll ? InnerSize-1 : Dynamic,
+ _LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
+
+ typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
+
+ public:
+
+ inline CoeffBasedProduct(const CoeffBasedProduct& other)
+ : Base(), m_lhs(other.m_lhs), m_rhs(other.m_rhs)
+ {}
+
+ template<typename Lhs, typename Rhs>
+ inline CoeffBasedProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ // we don't allow taking products of matrices of different real types, as that wouldn't be vectorizable.
+ // We still allow to mix T and complex<T>.
+ EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ eigen_assert(lhs.cols() == rhs.rows()
+ && "invalid matrix product"
+ && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
+ {
+ Scalar res;
+ ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
+ return res;
+ }
+
+ /* Allow index-based non-packet access. It is impossible though to allow index-based packed access,
+ * which is why we don't set the LinearAccessBit.
+ */
+ EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+ {
+ Scalar res;
+ const Index row = RowsAtCompileTime == 1 ? 0 : index;
+ const Index col = RowsAtCompileTime == 1 ? index : 0;
+ ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
+ return res;
+ }
+
+ template<int LoadMode>
+ EIGEN_STRONG_INLINE const PacketScalar packet(Index row, Index col) const
+ {
+ PacketScalar res;
+ internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
+ Unroll ? InnerSize-1 : Dynamic,
+ _LhsNested, _RhsNested, PacketScalar, LoadMode>
+ ::run(row, col, m_lhs, m_rhs, res);
+ return res;
+ }
+
+ // Implicit conversion to the nested type (trigger the evaluation of the product)
+ EIGEN_STRONG_INLINE operator const PlainObject& () const
+ {
+ m_result.lazyAssign(*this);
+ return m_result;
+ }
+
+ const _LhsNested& lhs() const { return m_lhs; }
+ const _RhsNested& rhs() const { return m_rhs; }
+
+ const Diagonal<const LazyCoeffBasedProductType,0> diagonal() const
+ { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); }
+
+ template<int DiagonalIndex>
+ const Diagonal<const LazyCoeffBasedProductType,DiagonalIndex> diagonal() const
+ { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); }
+
+ const Diagonal<const LazyCoeffBasedProductType,Dynamic> diagonal(Index index) const
+ { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this).diagonal(index); }
+
+ protected:
+ const LhsNested m_lhs;
+ const RhsNested m_rhs;
+
+ mutable PlainObject m_result;
+};
+
+namespace internal {
+
+// here we need to overload the nested rule for products
+// such that the nested type is a const reference to a plain matrix
+template<typename Lhs, typename Rhs, int N, typename PlainObject>
+struct nested<CoeffBasedProduct<Lhs,Rhs,EvalBeforeNestingBit|EvalBeforeAssigningBit>, N, PlainObject>
+{
+ typedef PlainObject const& type;
+};
+
+/***************************************************************************
+* Normal product .coeff() implementation (with meta-unrolling)
+***************************************************************************/
+
+/**************************************
+*** Scalar path - no vectorization ***
+**************************************/
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
+ {
+ product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
+ res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
+ {
+ res = lhs.coeff(row, 0) * rhs.coeff(0, col);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res)
+ {
+ eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
+ res = lhs.coeff(row, 0) * rhs.coeff(0, col);
+ for(Index i = 1; i < lhs.cols(); ++i)
+ res += lhs.coeff(row, i) * rhs.coeff(i, col);
+ }
+};
+
+/*******************************************
+*** Scalar path with inner vectorization ***
+*******************************************/
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet>
+struct product_coeff_vectorized_unroller
+{
+ typedef typename Lhs::Index Index;
+ enum { PacketSize = packet_traits<typename Lhs::Scalar>::size };
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
+ {
+ product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
+ pres = padd(pres, pmul( lhs.template packet<Aligned>(row, UnrollingIndex) , rhs.template packet<Aligned>(UnrollingIndex, col) ));
+ }
+};
+
+template<typename Lhs, typename Rhs, typename Packet>
+struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
+ {
+ pres = pmul(lhs.template packet<Aligned>(row, 0) , rhs.template packet<Aligned>(0, col));
+ }
+};
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
+{
+ typedef typename Lhs::PacketScalar Packet;
+ typedef typename Lhs::Index Index;
+ enum { PacketSize = packet_traits<typename Lhs::Scalar>::size };
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
+ {
+ Packet pres;
+ product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
+ product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
+ res = predux(pres);
+ }
+};
+
+template<typename Lhs, typename Rhs, int LhsRows = Lhs::RowsAtCompileTime, int RhsCols = Rhs::ColsAtCompileTime>
+struct product_coeff_vectorized_dyn_selector
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+ {
+ res = lhs.row(row).transpose().cwiseProduct(rhs.col(col)).sum();
+ }
+};
+
+// NOTE the 3 following specializations are because taking .col(0) on a vector is a bit slower
+// NOTE maybe they are now useless since we have a specialization for Block<Matrix>
+template<typename Lhs, typename Rhs, int RhsCols>
+struct product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,RhsCols>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+ {
+ res = lhs.transpose().cwiseProduct(rhs.col(col)).sum();
+ }
+};
+
+template<typename Lhs, typename Rhs, int LhsRows>
+struct product_coeff_vectorized_dyn_selector<Lhs,Rhs,LhsRows,1>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+ {
+ res = lhs.row(row).transpose().cwiseProduct(rhs).sum();
+ }
+};
+
+template<typename Lhs, typename Rhs>
+struct product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,1>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+ {
+ res = lhs.transpose().cwiseProduct(rhs).sum();
+ }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<InnerVectorizedTraversal, Dynamic, Lhs, Rhs, RetScalar>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+ {
+ product_coeff_vectorized_dyn_selector<Lhs,Rhs>::run(row, col, lhs, rhs, res);
+ }
+};
+
+/*******************
+*** Packet path ***
+*******************/
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
+ {
+ product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
+ res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
+ }
+};
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
+ {
+ product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
+ res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), pset1<Packet>(rhs.coeff(UnrollingIndex, col)), res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
+ {
+ res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
+ }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
+ {
+ res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
+ }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
+ {
+ eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
+ res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
+ for(Index i = 1; i < lhs.cols(); ++i)
+ res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
+{
+ typedef typename Lhs::Index Index;
+ EIGEN_STRONG_INLINE static void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
+ {
+ eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
+ res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
+ for(Index i = 1; i < lhs.cols(); ++i)
+ res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
+ }
+};
+
+} // end namespace internal
+
+#endif // EIGEN_COEFFBASED_PRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h
new file mode 100644
index 00000000000..6f3f2717007
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -0,0 +1,1285 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
+#define EIGEN_GENERAL_BLOCK_PANEL_H
+
+namespace internal {
+
+template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
+class gebp_traits;
+
+/** \internal */
+inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0)
+{
+ static std::ptrdiff_t m_l1CacheSize = 0;
+ static std::ptrdiff_t m_l2CacheSize = 0;
+ if(m_l1CacheSize==0)
+ {
+ m_l1CacheSize = queryL1CacheSize();
+ m_l2CacheSize = queryTopLevelCacheSize();
+
+ if(m_l1CacheSize<=0) m_l1CacheSize = 8 * 1024;
+ if(m_l2CacheSize<=0) m_l2CacheSize = 1 * 1024 * 1024;
+ }
+
+ if(action==SetAction)
+ {
+ // set the cpu cache size and cache all block sizes from a global cache size in byte
+ eigen_internal_assert(l1!=0 && l2!=0);
+ m_l1CacheSize = *l1;
+ m_l2CacheSize = *l2;
+ }
+ else if(action==GetAction)
+ {
+ eigen_internal_assert(l1!=0 && l2!=0);
+ *l1 = m_l1CacheSize;
+ *l2 = m_l2CacheSize;
+ }
+ else
+ {
+ eigen_internal_assert(false);
+ }
+}
+
+/** \brief Computes the blocking parameters for a m x k times k x n matrix product
+ *
+ * \param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension.
+ * \param[in,out] m Input: the number of rows of the left hand side. Output: the blocking size along the same dimension.
+ * \param[in,out] n Input: the number of columns of the right hand side. Output: the blocking size along the same dimension.
+ *
+ * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar,
+ * this function computes the blocking size parameters along the respective dimensions
+ * for matrix products and related algorithms. The blocking sizes depends on various
+ * parameters:
+ * - the L1 and L2 cache sizes,
+ * - the register level blocking sizes defined by gebp_traits,
+ * - the number of scalars that fit into a packet (when vectorization is enabled).
+ *
+ * \sa setCpuCacheSizes */
+template<typename LhsScalar, typename RhsScalar, int KcFactor>
+void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n)
+{
+ EIGEN_UNUSED_VARIABLE(n);
+ // Explanations:
+ // Let's recall the product algorithms form kc x nc horizontal panels B' on the rhs and
+ // mc x kc blocks A' on the lhs. A' has to fit into L2 cache. Moreover, B' is processed
+ // per kc x nr vertical small panels where nr is the blocking size along the n dimension
+ // at the register level. For vectorization purpose, these small vertical panels are unpacked,
+ // e.g., each coefficient is replicated to fit a packet. This small vertical panel has to
+ // stay in L1 cache.
+ std::ptrdiff_t l1, l2;
+
+ typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+ enum {
+ kdiv = KcFactor * 2 * Traits::nr
+ * Traits::RhsProgress * sizeof(RhsScalar),
+ mr = gebp_traits<LhsScalar,RhsScalar>::mr,
+ mr_mask = (0xffffffff/mr)*mr
+ };
+
+ manage_caching_sizes(GetAction, &l1, &l2);
+ k = std::min<std::ptrdiff_t>(k, l1/kdiv);
+ std::ptrdiff_t _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0;
+ if(_m<m) m = _m & mr_mask;
+}
+
+template<typename LhsScalar, typename RhsScalar>
+inline void computeProductBlockingSizes(std::ptrdiff_t& k, std::ptrdiff_t& m, std::ptrdiff_t& n)
+{
+ computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n);
+}
+
+#ifdef EIGEN_HAS_FUSE_CJMADD
+ #define MADD(CJ,A,B,C,T) C = CJ.pmadd(A,B,C);
+#else
+
+ // FIXME (a bit overkill maybe ?)
+
+ template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector {
+ EIGEN_STRONG_INLINE EIGEN_ALWAYS_INLINE_ATTRIB static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/)
+ {
+ c = cj.pmadd(a,b,c);
+ }
+ };
+
+ template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> {
+ EIGEN_STRONG_INLINE EIGEN_ALWAYS_INLINE_ATTRIB static void run(const CJ& cj, T& a, T& b, T& c, T& t)
+ {
+ t = b; t = cj.pmul(a,t); c = padd(c,t);
+ }
+ };
+
+ template<typename CJ, typename A, typename B, typename C, typename T>
+ EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t)
+ {
+ gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
+ }
+
+ #define MADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T);
+// #define MADD(CJ,A,B,C,T) T = B; T = CJ.pmul(A,T); C = padd(C,T);
+#endif
+
+/* Vectorization logic
+ * real*real: unpack rhs to constant packets, ...
+ *
+ * cd*cd : unpack rhs to (b_r,b_r), (b_i,b_i), mul to get (a_r b_r,a_i b_r) (a_r b_i,a_i b_i),
+ * storing each res packet into two packets (2x2),
+ * at the end combine them: swap the second and addsub them
+ * cf*cf : same but with 2x4 blocks
+ * cplx*real : unpack rhs to constant packets, ...
+ * real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
+ */
+template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
+class gebp_traits
+{
+public:
+ typedef _LhsScalar LhsScalar;
+ typedef _RhsScalar RhsScalar;
+ typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+ enum {
+ ConjLhs = _ConjLhs,
+ ConjRhs = _ConjRhs,
+ Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
+ LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+ RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+ ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+
+ NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+
+ // register block size along the N direction (must be either 2 or 4)
+ nr = NumberOfRegisters/4,
+
+ // register block size along the M direction (currently, this one cannot be modified)
+ mr = 2 * LhsPacketSize,
+
+ WorkSpaceFactor = nr * RhsPacketSize,
+
+ LhsProgress = LhsPacketSize,
+ RhsProgress = RhsPacketSize
+ };
+
+ typedef typename packet_traits<LhsScalar>::type _LhsPacket;
+ typedef typename packet_traits<RhsScalar>::type _RhsPacket;
+ typedef typename packet_traits<ResScalar>::type _ResPacket;
+
+ typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+ typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+ typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+ typedef ResPacket AccPacket;
+
+ EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
+ {
+ p = pset1<ResPacket>(ResScalar(0));
+ }
+
+ EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
+ {
+ for(DenseIndex k=0; k<n; k++)
+ pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
+ }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = pload<RhsPacket>(b);
+ }
+
+ EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+ {
+ dest = pload<LhsPacket>(a);
+ }
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, AccPacket& tmp) const
+ {
+ tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);
+ }
+
+ EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+ {
+ r = pmadd(c,alpha,r);
+ }
+
+protected:
+// conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
+// conj_helper<LhsPacket,RhsPacket,ConjLhs,ConjRhs> pcj;
+};
+
+template<typename RealScalar, bool _ConjLhs>
+class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
+{
+public:
+ typedef std::complex<RealScalar> LhsScalar;
+ typedef RealScalar RhsScalar;
+ typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+ enum {
+ ConjLhs = _ConjLhs,
+ ConjRhs = false,
+ Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
+ LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+ RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+ ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+
+ NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+ nr = NumberOfRegisters/4,
+ mr = 2 * LhsPacketSize,
+ WorkSpaceFactor = nr*RhsPacketSize,
+
+ LhsProgress = LhsPacketSize,
+ RhsProgress = RhsPacketSize
+ };
+
+ typedef typename packet_traits<LhsScalar>::type _LhsPacket;
+ typedef typename packet_traits<RhsScalar>::type _RhsPacket;
+ typedef typename packet_traits<ResScalar>::type _ResPacket;
+
+ typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+ typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+ typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+ typedef ResPacket AccPacket;
+
+ EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
+ {
+ p = pset1<ResPacket>(ResScalar(0));
+ }
+
+ EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
+ {
+ for(DenseIndex k=0; k<n; k++)
+ pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
+ }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = pload<RhsPacket>(b);
+ }
+
+ EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+ {
+ dest = pload<LhsPacket>(a);
+ }
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
+ {
+ madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
+ }
+
+ EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
+ {
+ tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
+ }
+
+ EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
+ {
+ c += a * b;
+ }
+
+ EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+ {
+ r = cj.pmadd(c,alpha,r);
+ }
+
+protected:
+ conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
+};
+
+template<typename RealScalar, bool _ConjLhs, bool _ConjRhs>
+class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
+{
+public:
+ typedef std::complex<RealScalar> Scalar;
+ typedef std::complex<RealScalar> LhsScalar;
+ typedef std::complex<RealScalar> RhsScalar;
+ typedef std::complex<RealScalar> ResScalar;
+
+ enum {
+ ConjLhs = _ConjLhs,
+ ConjRhs = _ConjRhs,
+ Vectorizable = packet_traits<RealScalar>::Vectorizable
+ && packet_traits<Scalar>::Vectorizable,
+ RealPacketSize = Vectorizable ? packet_traits<RealScalar>::size : 1,
+ ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+
+ nr = 2,
+ mr = 2 * ResPacketSize,
+ WorkSpaceFactor = Vectorizable ? 2*nr*RealPacketSize : nr,
+
+ LhsProgress = ResPacketSize,
+ RhsProgress = Vectorizable ? 2*ResPacketSize : 1
+ };
+
+ typedef typename packet_traits<RealScalar>::type RealPacket;
+ typedef typename packet_traits<Scalar>::type ScalarPacket;
+ struct DoublePacket
+ {
+ RealPacket first;
+ RealPacket second;
+ };
+
+ typedef typename conditional<Vectorizable,RealPacket, Scalar>::type LhsPacket;
+ typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type RhsPacket;
+ typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
+ typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type AccPacket;
+
+ EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); }
+
+ EIGEN_STRONG_INLINE void initAcc(DoublePacket& p)
+ {
+ p.first = pset1<RealPacket>(RealScalar(0));
+ p.second = pset1<RealPacket>(RealScalar(0));
+ }
+
+ /* Unpack the rhs coeff such that each complex coefficient is spread into
+ * two packects containing respectively the real and imaginary coefficient
+ * duplicated as many time as needed: (x+iy) => [x, ..., x] [y, ..., y]
+ */
+ EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const Scalar* rhs, Scalar* b)
+ {
+ for(DenseIndex k=0; k<n; k++)
+ {
+ if(Vectorizable)
+ {
+ pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+0], real(rhs[k]));
+ pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+ResPacketSize], imag(rhs[k]));
+ }
+ else
+ b[k] = rhs[k];
+ }
+ }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const { dest = *b; }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacket& dest) const
+ {
+ dest.first = pload<RealPacket>((const RealScalar*)b);
+ dest.second = pload<RealPacket>((const RealScalar*)(b+ResPacketSize));
+ }
+
+ // nothing special here
+ EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+ {
+ dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
+ }
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacket& c, RhsPacket& /*tmp*/) const
+ {
+ c.first = padd(pmul(a,b.first), c.first);
+ c.second = padd(pmul(a,b.second),c.second);
+ }
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const
+ {
+ c = cj.pmadd(a,b,c);
+ }
+
+ EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; }
+
+ EIGEN_STRONG_INLINE void acc(const DoublePacket& c, const ResPacket& alpha, ResPacket& r) const
+ {
+ // assemble c
+ ResPacket tmp;
+ if((!ConjLhs)&&(!ConjRhs))
+ {
+ tmp = pcplxflip(pconj(ResPacket(c.second)));
+ tmp = padd(ResPacket(c.first),tmp);
+ }
+ else if((!ConjLhs)&&(ConjRhs))
+ {
+ tmp = pconj(pcplxflip(ResPacket(c.second)));
+ tmp = padd(ResPacket(c.first),tmp);
+ }
+ else if((ConjLhs)&&(!ConjRhs))
+ {
+ tmp = pcplxflip(ResPacket(c.second));
+ tmp = padd(pconj(ResPacket(c.first)),tmp);
+ }
+ else if((ConjLhs)&&(ConjRhs))
+ {
+ tmp = pcplxflip(ResPacket(c.second));
+ tmp = psub(pconj(ResPacket(c.first)),tmp);
+ }
+
+ r = pmadd(tmp,alpha,r);
+ }
+
+protected:
+ conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
+};
+
+template<typename RealScalar, bool _ConjRhs>
+class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
+{
+public:
+ typedef std::complex<RealScalar> Scalar;
+ typedef RealScalar LhsScalar;
+ typedef Scalar RhsScalar;
+ typedef Scalar ResScalar;
+
+ enum {
+ ConjLhs = false,
+ ConjRhs = _ConjRhs,
+ Vectorizable = packet_traits<RealScalar>::Vectorizable
+ && packet_traits<Scalar>::Vectorizable,
+ LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+ RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+ ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+
+ NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+ nr = 4,
+ mr = 2*ResPacketSize,
+ WorkSpaceFactor = nr*RhsPacketSize,
+
+ LhsProgress = ResPacketSize,
+ RhsProgress = ResPacketSize
+ };
+
+ typedef typename packet_traits<LhsScalar>::type _LhsPacket;
+ typedef typename packet_traits<RhsScalar>::type _RhsPacket;
+ typedef typename packet_traits<ResScalar>::type _ResPacket;
+
+ typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+ typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+ typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+ typedef ResPacket AccPacket;
+
+ EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
+ {
+ p = pset1<ResPacket>(ResScalar(0));
+ }
+
+ EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
+ {
+ for(DenseIndex k=0; k<n; k++)
+ pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
+ }
+
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = pload<RhsPacket>(b);
+ }
+
+ EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+ {
+ dest = ploaddup<LhsPacket>(a);
+ }
+
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
+ {
+ madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
+ }
+
+ EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
+ {
+ tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
+ }
+
+ EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
+ {
+ c += a * b;
+ }
+
+ EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+ {
+ r = cj.pmadd(alpha,c,r);
+ }
+
+protected:
+ conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
+};
+
+/* optimized GEneral packed Block * packed Panel product kernel
+ *
+ * Mixing type logic: C += A * B
+ * | A | B | comments
+ * |real |cplx | no vectorization yet, would require to pack A with duplication
+ * |cplx |real | easy vectorization
+ */
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel
+{
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
+ typedef typename Traits::ResScalar ResScalar;
+ typedef typename Traits::LhsPacket LhsPacket;
+ typedef typename Traits::RhsPacket RhsPacket;
+ typedef typename Traits::ResPacket ResPacket;
+ typedef typename Traits::AccPacket AccPacket;
+
+ enum {
+ Vectorizable = Traits::Vectorizable,
+ LhsProgress = Traits::LhsProgress,
+ RhsProgress = Traits::RhsProgress,
+ ResPacketSize = Traits::ResPacketSize
+ };
+
+ EIGEN_FLATTEN_ATTRIB
+ void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB = 0)
+ {
+ Traits traits;
+
+ if(strideA==-1) strideA = depth;
+ if(strideB==-1) strideB = depth;
+ conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
+// conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+ Index packet_cols = (cols/nr) * nr;
+ const Index peeled_mc = (rows/mr)*mr;
+ // FIXME:
+ const Index peeled_mc2 = peeled_mc + (rows-peeled_mc >= LhsProgress ? LhsProgress : 0);
+ const Index peeled_kc = (depth/4)*4;
+
+ if(unpackedB==0)
+ unpackedB = const_cast<RhsScalar*>(blockB - strideB * nr * RhsProgress);
+
+ // loops on each micro vertical panel of rhs (depth x nr)
+ for(Index j2=0; j2<packet_cols; j2+=nr)
+ {
+ traits.unpackRhs(depth*nr,&blockB[j2*strideB+offsetB*nr],unpackedB);
+
+ // loops on each largest micro horizontal panel of lhs (mr x depth)
+ // => we select a mr x nr micro block of res which is entirely
+ // stored into mr/packet_size x nr registers.
+ for(Index i=0; i<peeled_mc; i+=mr)
+ {
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
+ prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
+ traits.initAcc(C0);
+ traits.initAcc(C1);
+ if(nr==4) traits.initAcc(C2);
+ if(nr==4) traits.initAcc(C3);
+ traits.initAcc(C4);
+ traits.initAcc(C5);
+ if(nr==4) traits.initAcc(C6);
+ if(nr==4) traits.initAcc(C7);
+
+ ResScalar* r0 = &res[(j2+0)*resStride + i];
+ ResScalar* r1 = r0 + resStride;
+ ResScalar* r2 = r1 + resStride;
+ ResScalar* r3 = r2 + resStride;
+
+ prefetch(r0+16);
+ prefetch(r1+16);
+ prefetch(r2+16);
+ prefetch(r3+16);
+
+ // performs "inner" product
+ // TODO let's check wether the folowing peeled loop could not be
+ // optimized via optimal prefetching from one loop to the other
+ const RhsScalar* blB = unpackedB;
+ for(Index k=0; k<peeled_kc; k+=4)
+ {
+ if(nr==2)
+ {
+ LhsPacket A0, A1;
+ RhsPacket B0;
+ RhsPacket T0;
+
+EIGEN_ASM_COMMENT("mybegin2");
+ traits.loadLhs(&blA[0*LhsProgress], A0);
+ traits.loadLhs(&blA[1*LhsProgress], A1);
+ traits.loadRhs(&blB[0*RhsProgress], B0);
+ traits.madd(A0,B0,C0,T0);
+ traits.madd(A1,B0,C4,B0);
+ traits.loadRhs(&blB[1*RhsProgress], B0);
+ traits.madd(A0,B0,C1,T0);
+ traits.madd(A1,B0,C5,B0);
+
+ traits.loadLhs(&blA[2*LhsProgress], A0);
+ traits.loadLhs(&blA[3*LhsProgress], A1);
+ traits.loadRhs(&blB[2*RhsProgress], B0);
+ traits.madd(A0,B0,C0,T0);
+ traits.madd(A1,B0,C4,B0);
+ traits.loadRhs(&blB[3*RhsProgress], B0);
+ traits.madd(A0,B0,C1,T0);
+ traits.madd(A1,B0,C5,B0);
+
+ traits.loadLhs(&blA[4*LhsProgress], A0);
+ traits.loadLhs(&blA[5*LhsProgress], A1);
+ traits.loadRhs(&blB[4*RhsProgress], B0);
+ traits.madd(A0,B0,C0,T0);
+ traits.madd(A1,B0,C4,B0);
+ traits.loadRhs(&blB[5*RhsProgress], B0);
+ traits.madd(A0,B0,C1,T0);
+ traits.madd(A1,B0,C5,B0);
+
+ traits.loadLhs(&blA[6*LhsProgress], A0);
+ traits.loadLhs(&blA[7*LhsProgress], A1);
+ traits.loadRhs(&blB[6*RhsProgress], B0);
+ traits.madd(A0,B0,C0,T0);
+ traits.madd(A1,B0,C4,B0);
+ traits.loadRhs(&blB[7*RhsProgress], B0);
+ traits.madd(A0,B0,C1,T0);
+ traits.madd(A1,B0,C5,B0);
+EIGEN_ASM_COMMENT("myend");
+ }
+ else
+ {
+EIGEN_ASM_COMMENT("mybegin4");
+ LhsPacket A0, A1;
+ RhsPacket B0, B1, B2, B3;
+ RhsPacket T0;
+
+ traits.loadLhs(&blA[0*LhsProgress], A0);
+ traits.loadLhs(&blA[1*LhsProgress], A1);
+ traits.loadRhs(&blB[0*RhsProgress], B0);
+ traits.loadRhs(&blB[1*RhsProgress], B1);
+
+ traits.madd(A0,B0,C0,T0);
+ traits.loadRhs(&blB[2*RhsProgress], B2);
+ traits.madd(A1,B0,C4,B0);
+ traits.loadRhs(&blB[3*RhsProgress], B3);
+ traits.loadRhs(&blB[4*RhsProgress], B0);
+ traits.madd(A0,B1,C1,T0);
+ traits.madd(A1,B1,C5,B1);
+ traits.loadRhs(&blB[5*RhsProgress], B1);
+ traits.madd(A0,B2,C2,T0);
+ traits.madd(A1,B2,C6,B2);
+ traits.loadRhs(&blB[6*RhsProgress], B2);
+ traits.madd(A0,B3,C3,T0);
+ traits.loadLhs(&blA[2*LhsProgress], A0);
+ traits.madd(A1,B3,C7,B3);
+ traits.loadLhs(&blA[3*LhsProgress], A1);
+ traits.loadRhs(&blB[7*RhsProgress], B3);
+ traits.madd(A0,B0,C0,T0);
+ traits.madd(A1,B0,C4,B0);
+ traits.loadRhs(&blB[8*RhsProgress], B0);
+ traits.madd(A0,B1,C1,T0);
+ traits.madd(A1,B1,C5,B1);
+ traits.loadRhs(&blB[9*RhsProgress], B1);
+ traits.madd(A0,B2,C2,T0);
+ traits.madd(A1,B2,C6,B2);
+ traits.loadRhs(&blB[10*RhsProgress], B2);
+ traits.madd(A0,B3,C3,T0);
+ traits.loadLhs(&blA[4*LhsProgress], A0);
+ traits.madd(A1,B3,C7,B3);
+ traits.loadLhs(&blA[5*LhsProgress], A1);
+ traits.loadRhs(&blB[11*RhsProgress], B3);
+
+ traits.madd(A0,B0,C0,T0);
+ traits.madd(A1,B0,C4,B0);
+ traits.loadRhs(&blB[12*RhsProgress], B0);
+ traits.madd(A0,B1,C1,T0);
+ traits.madd(A1,B1,C5,B1);
+ traits.loadRhs(&blB[13*RhsProgress], B1);
+ traits.madd(A0,B2,C2,T0);
+ traits.madd(A1,B2,C6,B2);
+ traits.loadRhs(&blB[14*RhsProgress], B2);
+ traits.madd(A0,B3,C3,T0);
+ traits.loadLhs(&blA[6*LhsProgress], A0);
+ traits.madd(A1,B3,C7,B3);
+ traits.loadLhs(&blA[7*LhsProgress], A1);
+ traits.loadRhs(&blB[15*RhsProgress], B3);
+ traits.madd(A0,B0,C0,T0);
+ traits.madd(A1,B0,C4,B0);
+ traits.madd(A0,B1,C1,T0);
+ traits.madd(A1,B1,C5,B1);
+ traits.madd(A0,B2,C2,T0);
+ traits.madd(A1,B2,C6,B2);
+ traits.madd(A0,B3,C3,T0);
+ traits.madd(A1,B3,C7,B3);
+ }
+
+ blB += 4*nr*RhsProgress;
+ blA += 4*mr;
+ }
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
+ {
+ if(nr==2)
+ {
+ LhsPacket A0, A1;
+ RhsPacket B0;
+ RhsPacket T0;
+
+ traits.loadLhs(&blA[0*LhsProgress], A0);
+ traits.loadLhs(&blA[1*LhsProgress], A1);
+ traits.loadRhs(&blB[0*RhsProgress], B0);
+ traits.madd(A0,B0,C0,T0);
+ traits.madd(A1,B0,C4,B0);
+ traits.loadRhs(&blB[1*RhsProgress], B0);
+ traits.madd(A0,B0,C1,T0);
+ traits.madd(A1,B0,C5,B0);
+ }
+ else
+ {
+ LhsPacket A0, A1;
+ RhsPacket B0, B1, B2, B3;
+ RhsPacket T0;
+
+ traits.loadLhs(&blA[0*LhsProgress], A0);
+ traits.loadLhs(&blA[1*LhsProgress], A1);
+ traits.loadRhs(&blB[0*RhsProgress], B0);
+ traits.loadRhs(&blB[1*RhsProgress], B1);
+
+ traits.madd(A0,B0,C0,T0);
+ traits.loadRhs(&blB[2*RhsProgress], B2);
+ traits.madd(A1,B0,C4,B0);
+ traits.loadRhs(&blB[3*RhsProgress], B3);
+ traits.madd(A0,B1,C1,T0);
+ traits.madd(A1,B1,C5,B1);
+ traits.madd(A0,B2,C2,T0);
+ traits.madd(A1,B2,C6,B2);
+ traits.madd(A0,B3,C3,T0);
+ traits.madd(A1,B3,C7,B3);
+ }
+
+ blB += nr*RhsProgress;
+ blA += mr;
+ }
+
+ if(nr==4)
+ {
+ ResPacket R0, R1, R2, R3, R4, R5, R6;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+
+ R0 = ploadu<ResPacket>(r0);
+ R1 = ploadu<ResPacket>(r1);
+ R2 = ploadu<ResPacket>(r2);
+ R3 = ploadu<ResPacket>(r3);
+ R4 = ploadu<ResPacket>(r0 + ResPacketSize);
+ R5 = ploadu<ResPacket>(r1 + ResPacketSize);
+ R6 = ploadu<ResPacket>(r2 + ResPacketSize);
+ traits.acc(C0, alphav, R0);
+ pstoreu(r0, R0);
+ R0 = ploadu<ResPacket>(r3 + ResPacketSize);
+
+ traits.acc(C1, alphav, R1);
+ traits.acc(C2, alphav, R2);
+ traits.acc(C3, alphav, R3);
+ traits.acc(C4, alphav, R4);
+ traits.acc(C5, alphav, R5);
+ traits.acc(C6, alphav, R6);
+ traits.acc(C7, alphav, R0);
+
+ pstoreu(r1, R1);
+ pstoreu(r2, R2);
+ pstoreu(r3, R3);
+ pstoreu(r0 + ResPacketSize, R4);
+ pstoreu(r1 + ResPacketSize, R5);
+ pstoreu(r2 + ResPacketSize, R6);
+ pstoreu(r3 + ResPacketSize, R0);
+ }
+ else
+ {
+ ResPacket R0, R1, R4;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+
+ R0 = ploadu<ResPacket>(r0);
+ R1 = ploadu<ResPacket>(r1);
+ R4 = ploadu<ResPacket>(r0 + ResPacketSize);
+ traits.acc(C0, alphav, R0);
+ pstoreu(r0, R0);
+ R0 = ploadu<ResPacket>(r1 + ResPacketSize);
+ traits.acc(C1, alphav, R1);
+ traits.acc(C4, alphav, R4);
+ traits.acc(C5, alphav, R0);
+ pstoreu(r1, R1);
+ pstoreu(r0 + ResPacketSize, R4);
+ pstoreu(r1 + ResPacketSize, R0);
+ }
+
+ }
+
+ if(rows-peeled_mc>=LhsProgress)
+ {
+ Index i = peeled_mc;
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
+ prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0, C1, C2, C3;
+ traits.initAcc(C0);
+ traits.initAcc(C1);
+ if(nr==4) traits.initAcc(C2);
+ if(nr==4) traits.initAcc(C3);
+
+ // performs "inner" product
+ const RhsScalar* blB = unpackedB;
+ for(Index k=0; k<peeled_kc; k+=4)
+ {
+ if(nr==2)
+ {
+ LhsPacket A0;
+ RhsPacket B0, B1;
+
+ traits.loadLhs(&blA[0*LhsProgress], A0);
+ traits.loadRhs(&blB[0*RhsProgress], B0);
+ traits.loadRhs(&blB[1*RhsProgress], B1);
+ traits.madd(A0,B0,C0,B0);
+ traits.loadRhs(&blB[2*RhsProgress], B0);
+ traits.madd(A0,B1,C1,B1);
+ traits.loadLhs(&blA[1*LhsProgress], A0);
+ traits.loadRhs(&blB[3*RhsProgress], B1);
+ traits.madd(A0,B0,C0,B0);
+ traits.loadRhs(&blB[4*RhsProgress], B0);
+ traits.madd(A0,B1,C1,B1);
+ traits.loadLhs(&blA[2*LhsProgress], A0);
+ traits.loadRhs(&blB[5*RhsProgress], B1);
+ traits.madd(A0,B0,C0,B0);
+ traits.loadRhs(&blB[6*RhsProgress], B0);
+ traits.madd(A0,B1,C1,B1);
+ traits.loadLhs(&blA[3*LhsProgress], A0);
+ traits.loadRhs(&blB[7*RhsProgress], B1);
+ traits.madd(A0,B0,C0,B0);
+ traits.madd(A0,B1,C1,B1);
+ }
+ else
+ {
+ LhsPacket A0;
+ RhsPacket B0, B1, B2, B3;
+
+ traits.loadLhs(&blA[0*LhsProgress], A0);
+ traits.loadRhs(&blB[0*RhsProgress], B0);
+ traits.loadRhs(&blB[1*RhsProgress], B1);
+
+ traits.madd(A0,B0,C0,B0);
+ traits.loadRhs(&blB[2*RhsProgress], B2);
+ traits.loadRhs(&blB[3*RhsProgress], B3);
+ traits.loadRhs(&blB[4*RhsProgress], B0);
+ traits.madd(A0,B1,C1,B1);
+ traits.loadRhs(&blB[5*RhsProgress], B1);
+ traits.madd(A0,B2,C2,B2);
+ traits.loadRhs(&blB[6*RhsProgress], B2);
+ traits.madd(A0,B3,C3,B3);
+ traits.loadLhs(&blA[1*LhsProgress], A0);
+ traits.loadRhs(&blB[7*RhsProgress], B3);
+ traits.madd(A0,B0,C0,B0);
+ traits.loadRhs(&blB[8*RhsProgress], B0);
+ traits.madd(A0,B1,C1,B1);
+ traits.loadRhs(&blB[9*RhsProgress], B1);
+ traits.madd(A0,B2,C2,B2);
+ traits.loadRhs(&blB[10*RhsProgress], B2);
+ traits.madd(A0,B3,C3,B3);
+ traits.loadLhs(&blA[2*LhsProgress], A0);
+ traits.loadRhs(&blB[11*RhsProgress], B3);
+
+ traits.madd(A0,B0,C0,B0);
+ traits.loadRhs(&blB[12*RhsProgress], B0);
+ traits.madd(A0,B1,C1,B1);
+ traits.loadRhs(&blB[13*RhsProgress], B1);
+ traits.madd(A0,B2,C2,B2);
+ traits.loadRhs(&blB[14*RhsProgress], B2);
+ traits.madd(A0,B3,C3,B3);
+
+ traits.loadLhs(&blA[3*LhsProgress], A0);
+ traits.loadRhs(&blB[15*RhsProgress], B3);
+ traits.madd(A0,B0,C0,B0);
+ traits.madd(A0,B1,C1,B1);
+ traits.madd(A0,B2,C2,B2);
+ traits.madd(A0,B3,C3,B3);
+ }
+
+ blB += nr*4*RhsProgress;
+ blA += 4*LhsProgress;
+ }
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
+ {
+ if(nr==2)
+ {
+ LhsPacket A0;
+ RhsPacket B0, B1;
+
+ traits.loadLhs(&blA[0*LhsProgress], A0);
+ traits.loadRhs(&blB[0*RhsProgress], B0);
+ traits.loadRhs(&blB[1*RhsProgress], B1);
+ traits.madd(A0,B0,C0,B0);
+ traits.madd(A0,B1,C1,B1);
+ }
+ else
+ {
+ LhsPacket A0;
+ RhsPacket B0, B1, B2, B3;
+
+ traits.loadLhs(&blA[0*LhsProgress], A0);
+ traits.loadRhs(&blB[0*RhsProgress], B0);
+ traits.loadRhs(&blB[1*RhsProgress], B1);
+ traits.loadRhs(&blB[2*RhsProgress], B2);
+ traits.loadRhs(&blB[3*RhsProgress], B3);
+
+ traits.madd(A0,B0,C0,B0);
+ traits.madd(A0,B1,C1,B1);
+ traits.madd(A0,B2,C2,B2);
+ traits.madd(A0,B3,C3,B3);
+ }
+
+ blB += nr*RhsProgress;
+ blA += LhsProgress;
+ }
+
+ ResPacket R0, R1, R2, R3;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+
+ ResScalar* r0 = &res[(j2+0)*resStride + i];
+ ResScalar* r1 = r0 + resStride;
+ ResScalar* r2 = r1 + resStride;
+ ResScalar* r3 = r2 + resStride;
+
+ R0 = ploadu<ResPacket>(r0);
+ R1 = ploadu<ResPacket>(r1);
+ if(nr==4) R2 = ploadu<ResPacket>(r2);
+ if(nr==4) R3 = ploadu<ResPacket>(r3);
+
+ traits.acc(C0, alphav, R0);
+ traits.acc(C1, alphav, R1);
+ if(nr==4) traits.acc(C2, alphav, R2);
+ if(nr==4) traits.acc(C3, alphav, R3);
+
+ pstoreu(r0, R0);
+ pstoreu(r1, R1);
+ if(nr==4) pstoreu(r2, R2);
+ if(nr==4) pstoreu(r3, R3);
+ }
+ for(Index i=peeled_mc2; i<rows; i++)
+ {
+ const LhsScalar* blA = &blockA[i*strideA+offsetA];
+ prefetch(&blA[0]);
+
+ // gets a 1 x nr res block as registers
+ ResScalar C0(0), C1(0), C2(0), C3(0);
+ // TODO directly use blockB ???
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
+ for(Index k=0; k<depth; k++)
+ {
+ if(nr==2)
+ {
+ LhsScalar A0;
+ RhsScalar B0, B1;
+
+ A0 = blA[k];
+ B0 = blB[0];
+ B1 = blB[1];
+ MADD(cj,A0,B0,C0,B0);
+ MADD(cj,A0,B1,C1,B1);
+ }
+ else
+ {
+ LhsScalar A0;
+ RhsScalar B0, B1, B2, B3;
+
+ A0 = blA[k];
+ B0 = blB[0];
+ B1 = blB[1];
+ B2 = blB[2];
+ B3 = blB[3];
+
+ MADD(cj,A0,B0,C0,B0);
+ MADD(cj,A0,B1,C1,B1);
+ MADD(cj,A0,B2,C2,B2);
+ MADD(cj,A0,B3,C3,B3);
+ }
+
+ blB += nr;
+ }
+ res[(j2+0)*resStride + i] += alpha*C0;
+ res[(j2+1)*resStride + i] += alpha*C1;
+ if(nr==4) res[(j2+2)*resStride + i] += alpha*C2;
+ if(nr==4) res[(j2+3)*resStride + i] += alpha*C3;
+ }
+ }
+ // process remaining rhs/res columns one at a time
+ // => do the same but with nr==1
+ for(Index j2=packet_cols; j2<cols; j2++)
+ {
+ // unpack B
+ traits.unpackRhs(depth, &blockB[j2*strideB+offsetB], unpackedB);
+
+ for(Index i=0; i<peeled_mc; i+=mr)
+ {
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
+ prefetch(&blA[0]);
+
+ // TODO move the res loads to the stores
+
+ // get res block as registers
+ AccPacket C0, C4;
+ traits.initAcc(C0);
+ traits.initAcc(C4);
+
+ const RhsScalar* blB = unpackedB;
+ for(Index k=0; k<depth; k++)
+ {
+ LhsPacket A0, A1;
+ RhsPacket B0;
+ RhsPacket T0;
+
+ traits.loadLhs(&blA[0*LhsProgress], A0);
+ traits.loadLhs(&blA[1*LhsProgress], A1);
+ traits.loadRhs(&blB[0*RhsProgress], B0);
+ traits.madd(A0,B0,C0,T0);
+ traits.madd(A1,B0,C4,B0);
+
+ blB += RhsProgress;
+ blA += 2*LhsProgress;
+ }
+ ResPacket R0, R4;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+
+ ResScalar* r0 = &res[(j2+0)*resStride + i];
+
+ R0 = ploadu<ResPacket>(r0);
+ R4 = ploadu<ResPacket>(r0+ResPacketSize);
+
+ traits.acc(C0, alphav, R0);
+ traits.acc(C4, alphav, R4);
+
+ pstoreu(r0, R0);
+ pstoreu(r0+ResPacketSize, R4);
+ }
+ if(rows-peeled_mc>=LhsProgress)
+ {
+ Index i = peeled_mc;
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
+ prefetch(&blA[0]);
+
+ AccPacket C0;
+ traits.initAcc(C0);
+
+ const RhsScalar* blB = unpackedB;
+ for(Index k=0; k<depth; k++)
+ {
+ LhsPacket A0;
+ RhsPacket B0;
+ traits.loadLhs(blA, A0);
+ traits.loadRhs(blB, B0);
+ traits.madd(A0, B0, C0, B0);
+ blB += RhsProgress;
+ blA += LhsProgress;
+ }
+
+ ResPacket alphav = pset1<ResPacket>(alpha);
+ ResPacket R0 = ploadu<ResPacket>(&res[(j2+0)*resStride + i]);
+ traits.acc(C0, alphav, R0);
+ pstoreu(&res[(j2+0)*resStride + i], R0);
+ }
+ for(Index i=peeled_mc2; i<rows; i++)
+ {
+ const LhsScalar* blA = &blockA[i*strideA+offsetA];
+ prefetch(&blA[0]);
+
+ // gets a 1 x 1 res block as registers
+ ResScalar C0(0);
+ // FIXME directly use blockB ??
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB];
+ for(Index k=0; k<depth; k++)
+ {
+ LhsScalar A0 = blA[k];
+ RhsScalar B0 = blB[k];
+ MADD(cj, A0, B0, C0, B0);
+ }
+ res[(j2+0)*resStride + i] += alpha*C0;
+ }
+ }
+ }
+};
+
+#undef CJMADD
+
+// pack a block of the lhs
+// The travesal is as follow (mr==4):
+// 0 4 8 12 ...
+// 1 5 9 13 ...
+// 2 6 10 14 ...
+// 3 7 11 15 ...
+//
+// 16 20 24 28 ...
+// 17 21 25 29 ...
+// 18 22 26 30 ...
+// 19 23 27 31 ...
+//
+// 32 33 34 35 ...
+// 36 36 38 39 ...
+template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs
+{
+ void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows,
+ Index stride=0, Index offset=0)
+ {
+// enum { PacketSize = packet_traits<Scalar>::size };
+ eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+ conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+ const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
+ Index count = 0;
+ Index peeled_mc = (rows/Pack1)*Pack1;
+ for(Index i=0; i<peeled_mc; i+=Pack1)
+ {
+ if(PanelMode) count += Pack1 * offset;
+ for(Index k=0; k<depth; k++)
+ for(Index w=0; w<Pack1; w++)
+ blockA[count++] = cj(lhs(i+w, k));
+ if(PanelMode) count += Pack1 * (stride-offset-depth);
+ }
+ if(rows-peeled_mc>=Pack2)
+ {
+ if(PanelMode) count += Pack2*offset;
+ for(Index k=0; k<depth; k++)
+ for(Index w=0; w<Pack2; w++)
+ blockA[count++] = cj(lhs(peeled_mc+w, k));
+ if(PanelMode) count += Pack2 * (stride-offset-depth);
+ peeled_mc += Pack2;
+ }
+ for(Index i=peeled_mc; i<rows; i++)
+ {
+ if(PanelMode) count += offset;
+ for(Index k=0; k<depth; k++)
+ blockA[count++] = cj(lhs(i, k));
+ if(PanelMode) count += (stride-offset-depth);
+ }
+ }
+};
+
+// copy a complete panel of the rhs
+// this version is optimized for column major matrices
+// The traversal order is as follow: (nr==4):
+// 0 1 2 3 12 13 14 15 24 27
+// 4 5 6 7 16 17 18 19 25 28
+// 8 9 10 11 20 21 22 23 26 29
+// . . . . . . . . . .
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode>
+{
+ typedef typename packet_traits<Scalar>::type Packet;
+ enum { PacketSize = packet_traits<Scalar>::size };
+ void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols,
+ Index stride=0, Index offset=0)
+ {
+ eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+ conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+ Index packet_cols = (cols/nr) * nr;
+ Index count = 0;
+ for(Index j2=0; j2<packet_cols; j2+=nr)
+ {
+ // skip what we have before
+ if(PanelMode) count += nr * offset;
+ const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+ const Scalar* b1 = &rhs[(j2+1)*rhsStride];
+ const Scalar* b2 = &rhs[(j2+2)*rhsStride];
+ const Scalar* b3 = &rhs[(j2+3)*rhsStride];
+ for(Index k=0; k<depth; k++)
+ {
+ blockB[count+0] = cj(b0[k]);
+ blockB[count+1] = cj(b1[k]);
+ if(nr==4) blockB[count+2] = cj(b2[k]);
+ if(nr==4) blockB[count+3] = cj(b3[k]);
+ count += nr;
+ }
+ // skip what we have after
+ if(PanelMode) count += nr * (stride-offset-depth);
+ }
+
+ // copy the remaining columns one at a time (nr==1)
+ for(Index j2=packet_cols; j2<cols; ++j2)
+ {
+ if(PanelMode) count += offset;
+ const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+ for(Index k=0; k<depth; k++)
+ {
+ blockB[count] = cj(b0[k]);
+ count += 1;
+ }
+ if(PanelMode) count += (stride-offset-depth);
+ }
+ }
+};
+
+// this version is optimized for row major matrices
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode>
+{
+ enum { PacketSize = packet_traits<Scalar>::size };
+ void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols,
+ Index stride=0, Index offset=0)
+ {
+ eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+ conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+ Index packet_cols = (cols/nr) * nr;
+ Index count = 0;
+ for(Index j2=0; j2<packet_cols; j2+=nr)
+ {
+ // skip what we have before
+ if(PanelMode) count += nr * offset;
+ for(Index k=0; k<depth; k++)
+ {
+ const Scalar* b0 = &rhs[k*rhsStride + j2];
+ blockB[count+0] = cj(b0[0]);
+ blockB[count+1] = cj(b0[1]);
+ if(nr==4) blockB[count+2] = cj(b0[2]);
+ if(nr==4) blockB[count+3] = cj(b0[3]);
+ count += nr;
+ }
+ // skip what we have after
+ if(PanelMode) count += nr * (stride-offset-depth);
+ }
+ // copy the remaining columns one at a time (nr==1)
+ for(Index j2=packet_cols; j2<cols; ++j2)
+ {
+ if(PanelMode) count += offset;
+ const Scalar* b0 = &rhs[j2];
+ for(Index k=0; k<depth; k++)
+ {
+ blockB[count] = cj(b0[k*rhsStride]);
+ count += 1;
+ }
+ if(PanelMode) count += stride-offset-depth;
+ }
+ }
+};
+
+} // end namespace internal
+
+/** \returns the currently set level 1 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
+ * \sa setCpuCacheSize */
+inline std::ptrdiff_t l1CacheSize()
+{
+ std::ptrdiff_t l1, l2;
+ internal::manage_caching_sizes(GetAction, &l1, &l2);
+ return l1;
+}
+
+/** \returns the currently set level 2 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
+ * \sa setCpuCacheSize */
+inline std::ptrdiff_t l2CacheSize()
+{
+ std::ptrdiff_t l1, l2;
+ internal::manage_caching_sizes(GetAction, &l1, &l2);
+ return l2;
+}
+
+/** Set the cpu L1 and L2 cache sizes (in bytes).
+ * These values are use to adjust the size of the blocks
+ * for the algorithms working per blocks.
+ *
+ * \sa computeProductBlockingSizes */
+inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2)
+{
+ internal::manage_caching_sizes(SetAction, &l1, &l2);
+}
+
+#endif // EIGEN_GENERAL_BLOCK_PANEL_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h
new file mode 100644
index 00000000000..ae94a27953b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrix.h
@@ -0,0 +1,439 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
+#define EIGEN_GENERAL_MATRIX_MATRIX_H
+
+namespace internal {
+
+template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
+
+/* Specialization for a row-major destination matrix => simple transposition of the product */
+template<
+ typename Index,
+ typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
+struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
+{
+ typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+ static EIGEN_STRONG_INLINE void run(
+ Index rows, Index cols, Index depth,
+ const LhsScalar* lhs, Index lhsStride,
+ const RhsScalar* rhs, Index rhsStride,
+ ResScalar* res, Index resStride,
+ ResScalar alpha,
+ level3_blocking<RhsScalar,LhsScalar>& blocking,
+ GemmParallelInfo<Index>* info = 0)
+ {
+ // transpose the product such that the result is column major
+ general_matrix_matrix_product<Index,
+ RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
+ LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
+ ColMajor>
+ ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
+ }
+};
+
+/* Specialization for a col-major destination matrix
+ * => Blocking algorithm following Goto's paper */
+template<
+ typename Index,
+ typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
+struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
+{
+typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+static void run(Index rows, Index cols, Index depth,
+ const LhsScalar* _lhs, Index lhsStride,
+ const RhsScalar* _rhs, Index rhsStride,
+ ResScalar* res, Index resStride,
+ ResScalar alpha,
+ level3_blocking<LhsScalar,RhsScalar>& blocking,
+ GemmParallelInfo<Index>* info = 0)
+{
+ const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+ const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
+
+ typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+
+ Index kc = blocking.kc(); // cache block size along the K direction
+ Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
+ //Index nc = blocking.nc(); // cache block size along the N direction
+
+ gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
+ gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
+
+#ifdef EIGEN_HAS_OPENMP
+ if(info)
+ {
+ // this is the parallel version!
+ Index tid = omp_get_thread_num();
+ Index threads = omp_get_num_threads();
+
+ std::size_t sizeA = kc*mc;
+ std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+ ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
+ ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
+
+ RhsScalar* blockB = blocking.blockB();
+ eigen_internal_assert(blockB!=0);
+
+ // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
+ for(Index k=0; k<depth; k+=kc)
+ {
+ const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
+
+ // In order to reduce the chance that a thread has to wait for the other,
+ // let's start by packing A'.
+ pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
+
+ // Pack B_k to B' in a parallel fashion:
+ // each thread packs the sub block B_k,j to B'_j where j is the thread id.
+
+ // However, before copying to B'_j, we have to make sure that no other thread is still using it,
+ // i.e., we test that info[tid].users equals 0.
+ // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
+ while(info[tid].users!=0) {}
+ info[tid].users += threads;
+
+ pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
+
+ // Notify the other threads that the part B'_j is ready to go.
+ info[tid].sync = k;
+
+ // Computes C_i += A' * B' per B'_j
+ for(Index shift=0; shift<threads; ++shift)
+ {
+ Index j = (tid+shift)%threads;
+
+ // At this point we have to make sure that B'_j has been updated by the thread j,
+ // we use testAndSetOrdered to mimic a volatile access.
+ // However, no need to wait for the B' part which has been updated by the current thread!
+ if(shift>0)
+ while(info[j].sync!=k) {}
+
+ gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
+ }
+
+ // Then keep going as usual with the remaining A'
+ for(Index i=mc; i<rows; i+=mc)
+ {
+ const Index actual_mc = (std::min)(i+mc,rows)-i;
+
+ // pack A_i,k to A'
+ pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
+
+ // C_i += A' * B'
+ gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
+ }
+
+ // Release all the sub blocks B'_j of B' for the current thread,
+ // i.e., we simply decrement the number of users by 1
+ for(Index j=0; j<threads; ++j)
+ #pragma omp atomic
+ --(info[j].users);
+ }
+ }
+ else
+#endif // EIGEN_HAS_OPENMP
+ {
+ EIGEN_UNUSED_VARIABLE(info);
+
+ // this is the sequential version!
+ std::size_t sizeA = kc*mc;
+ std::size_t sizeB = kc*cols;
+ std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+
+ ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
+ ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
+ ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
+
+ // For each horizontal panel of the rhs, and corresponding panel of the lhs...
+ // (==GEMM_VAR1)
+ for(Index k2=0; k2<depth; k2+=kc)
+ {
+ const Index actual_kc = (std::min)(k2+kc,depth)-k2;
+
+ // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
+ // => Pack rhs's panel into a sequential chunk of memory (L2 caching)
+ // Note that this panel will be read as many times as the number of blocks in the lhs's
+ // vertical panel which is, in practice, a very low number.
+ pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
+
+
+ // For each mc x kc block of the lhs's vertical panel...
+ // (==GEPP_VAR1)
+ for(Index i2=0; i2<rows; i2+=mc)
+ {
+ const Index actual_mc = (std::min)(i2+mc,rows)-i2;
+
+ // We pack the lhs's block into a sequential chunk of memory (L1 caching)
+ // Note that this block will be read a very high number of times, which is equal to the number of
+ // micro vertical panel of the large rhs's panel (e.g., cols/4 times).
+ pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
+
+ // Everything is packed, we can now call the block * panel kernel:
+ gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
+
+ }
+ }
+ }
+}
+
+};
+
+/*********************************************************************************
+* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
+* implementation of the high level wrapper to general_matrix_matrix_product
+**********************************************************************************/
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
+ : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
+{};
+
+template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
+struct gemm_functor
+{
+ gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, Scalar actualAlpha,
+ BlockingType& blocking)
+ : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
+ {}
+
+ void initParallelSession() const
+ {
+ m_blocking.allocateB();
+ }
+
+ void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
+ {
+ if(cols==-1)
+ cols = m_rhs.cols();
+
+ Gemm::run(rows, cols, m_lhs.cols(),
+ /*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
+ /*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
+ (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
+ m_actualAlpha, m_blocking, info);
+ }
+
+ protected:
+ const Lhs& m_lhs;
+ const Rhs& m_rhs;
+ Dest& m_dest;
+ Scalar m_actualAlpha;
+ BlockingType& m_blocking;
+};
+
+template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth,
+bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
+
+template<typename _LhsScalar, typename _RhsScalar>
+class level3_blocking
+{
+ typedef _LhsScalar LhsScalar;
+ typedef _RhsScalar RhsScalar;
+
+ protected:
+ LhsScalar* m_blockA;
+ RhsScalar* m_blockB;
+ RhsScalar* m_blockW;
+
+ DenseIndex m_mc;
+ DenseIndex m_nc;
+ DenseIndex m_kc;
+
+ public:
+
+ level3_blocking()
+ : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
+ {}
+
+ inline DenseIndex mc() const { return m_mc; }
+ inline DenseIndex nc() const { return m_nc; }
+ inline DenseIndex kc() const { return m_kc; }
+
+ inline LhsScalar* blockA() { return m_blockA; }
+ inline RhsScalar* blockB() { return m_blockB; }
+ inline RhsScalar* blockW() { return m_blockW; }
+};
+
+template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
+class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, true>
+ : public level3_blocking<
+ typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
+ typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
+{
+ enum {
+ Transpose = StorageOrder==RowMajor,
+ ActualRows = Transpose ? MaxCols : MaxRows,
+ ActualCols = Transpose ? MaxRows : MaxCols
+ };
+ typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
+ typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
+ typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+ enum {
+ SizeA = ActualRows * MaxDepth,
+ SizeB = ActualCols * MaxDepth,
+ SizeW = MaxDepth * Traits::WorkSpaceFactor
+ };
+
+ EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
+ EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
+ EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
+
+ public:
+
+ gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
+ {
+ this->m_mc = ActualRows;
+ this->m_nc = ActualCols;
+ this->m_kc = MaxDepth;
+ this->m_blockA = m_staticA;
+ this->m_blockB = m_staticB;
+ this->m_blockW = m_staticW;
+ }
+
+ inline void allocateA() {}
+ inline void allocateB() {}
+ inline void allocateW() {}
+ inline void allocateAll() {}
+};
+
+template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth>
+class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, false>
+ : public level3_blocking<
+ typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
+ typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
+{
+ enum {
+ Transpose = StorageOrder==RowMajor
+ };
+ typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
+ typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
+ typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+
+ DenseIndex m_sizeA;
+ DenseIndex m_sizeB;
+ DenseIndex m_sizeW;
+
+ public:
+
+ gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
+ {
+ this->m_mc = Transpose ? cols : rows;
+ this->m_nc = Transpose ? rows : cols;
+ this->m_kc = depth;
+
+ computeProductBlockingSizes<LhsScalar,RhsScalar>(this->m_kc, this->m_mc, this->m_nc);
+ m_sizeA = this->m_mc * this->m_kc;
+ m_sizeB = this->m_kc * this->m_nc;
+ m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
+ }
+
+ void allocateA()
+ {
+ if(this->m_blockA==0)
+ this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
+ }
+
+ void allocateB()
+ {
+ if(this->m_blockB==0)
+ this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
+ }
+
+ void allocateW()
+ {
+ if(this->m_blockW==0)
+ this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
+ }
+
+ void allocateAll()
+ {
+ allocateA();
+ allocateB();
+ allocateW();
+ }
+
+ ~gemm_blocking_space()
+ {
+ aligned_delete(this->m_blockA, m_sizeA);
+ aligned_delete(this->m_blockB, m_sizeB);
+ aligned_delete(this->m_blockW, m_sizeW);
+ }
+};
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, GemmProduct>
+ : public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
+{
+ enum {
+ MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
+ };
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
+
+ typedef typename Lhs::Scalar LhsScalar;
+ typedef typename Rhs::Scalar RhsScalar;
+ typedef Scalar ResScalar;
+
+ GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {
+ typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
+ EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
+ }
+
+ template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ {
+ eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
+
+ const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs);
+ const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs);
+
+ Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
+ * RhsBlasTraits::extractScalarFactor(m_rhs);
+
+ typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
+ Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
+
+ typedef internal::gemm_functor<
+ Scalar, Index,
+ internal::general_matrix_matrix_product<
+ Index,
+ LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
+ RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
+ (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
+ _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
+
+ BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
+
+ internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
+ }
+};
+
+#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
new file mode 100644
index 00000000000..5043b64fe2e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
@@ -0,0 +1,225 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H
+#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H
+
+namespace internal {
+
+/**********************************************************************
+* This file implements a general A * B product while
+* evaluating only one triangular part of the product.
+* This is more general version of self adjoint product (C += A A^T)
+* as the level 3 SYRK Blas routine.
+**********************************************************************/
+
+// forward declarations (defined at the end of this file)
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int UpLo>
+struct tribb_kernel;
+
+/* Optimized matrix-matrix product evaluating only one triangular half */
+template <typename Index,
+ typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
+ int ResStorageOrder, int UpLo>
+struct general_matrix_matrix_triangular_product;
+
+// as usual if the result is row major => we transpose the product
+template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int UpLo>
+struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,UpLo>
+{
+ typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+ static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride,
+ const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha)
+ {
+ general_matrix_matrix_triangular_product<Index,
+ RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
+ LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
+ ColMajor, UpLo==Lower?Upper:Lower>
+ ::run(size,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha);
+ }
+};
+
+template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int UpLo>
+struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo>
+{
+ typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+ static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride,
+ const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha)
+ {
+ const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+ const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
+
+ typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+
+ Index kc = depth; // cache block size along the K direction
+ Index mc = size; // cache block size along the M direction
+ Index nc = size; // cache block size along the N direction
+ computeProductBlockingSizes<LhsScalar,RhsScalar>(kc, mc, nc);
+ // !!! mc must be a multiple of nr:
+ if(mc > Traits::nr)
+ mc = (mc/Traits::nr)*Traits::nr;
+
+ std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+ std::size_t sizeB = sizeW + kc*size;
+ ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, kc*mc, 0);
+ ei_declare_aligned_stack_constructed_variable(RhsScalar, allocatedBlockB, sizeB, 0);
+ RhsScalar* blockB = allocatedBlockB + sizeW;
+
+ gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
+ gebp_kernel <LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
+ tribb_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs, UpLo> sybb;
+
+ for(Index k2=0; k2<depth; k2+=kc)
+ {
+ const Index actual_kc = (std::min)(k2+kc,depth)-k2;
+
+ // note that the actual rhs is the transpose/adjoint of mat
+ pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, size);
+
+ for(Index i2=0; i2<size; i2+=mc)
+ {
+ const Index actual_mc = (std::min)(i2+mc,size)-i2;
+
+ pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
+
+ // the selected actual_mc * size panel of res is split into three different part:
+ // 1 - before the diagonal => processed with gebp or skipped
+ // 2 - the actual_mc x actual_mc symmetric block => processed with a special kernel
+ // 3 - after the diagonal => processed with gebp or skipped
+ if (UpLo==Lower)
+ gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, (std::min)(size,i2), alpha,
+ -1, -1, 0, 0, allocatedBlockB);
+
+ sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha, allocatedBlockB);
+
+ if (UpLo==Upper)
+ {
+ Index j2 = i2+actual_mc;
+ gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, (std::max)(Index(0), size-j2), alpha,
+ -1, -1, 0, 0, allocatedBlockB);
+ }
+ }
+ }
+ }
+};
+
+// Optimized packed Block * packed Block product kernel evaluating only one given triangular part
+// This kernel is built on top of the gebp kernel:
+// - the current destination block is processed per panel of actual_mc x BlockSize
+// where BlockSize is set to the minimal value allowing gebp to be as fast as possible
+// - then, as usual, each panel is split into three parts along the diagonal,
+// the sub blocks above and below the diagonal are processed as usual,
+// while the triangular block overlapping the diagonal is evaluated into a
+// small temporary buffer which is then accumulated into the result using a
+// triangular traversal.
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int UpLo>
+struct tribb_kernel
+{
+ typedef gebp_traits<LhsScalar,RhsScalar,ConjLhs,ConjRhs> Traits;
+ typedef typename Traits::ResScalar ResScalar;
+
+ enum {
+ BlockSize = EIGEN_PLAIN_ENUM_MAX(mr,nr)
+ };
+ void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, ResScalar alpha, RhsScalar* workspace)
+ {
+ gebp_kernel<LhsScalar, RhsScalar, Index, mr, nr, ConjLhs, ConjRhs> gebp_kernel;
+ Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer;
+
+ // let's process the block per panel of actual_mc x BlockSize,
+ // again, each is split into three parts, etc.
+ for (Index j=0; j<size; j+=BlockSize)
+ {
+ Index actualBlockSize = std::min<Index>(BlockSize,size - j);
+ const RhsScalar* actual_b = blockB+j*depth;
+
+ if(UpLo==Upper)
+ gebp_kernel(res+j*resStride, resStride, blockA, actual_b, j, depth, actualBlockSize, alpha,
+ -1, -1, 0, 0, workspace);
+
+ // selfadjoint micro block
+ {
+ Index i = j;
+ buffer.setZero();
+ // 1 - apply the kernel on the temporary buffer
+ gebp_kernel(buffer.data(), BlockSize, blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha,
+ -1, -1, 0, 0, workspace);
+ // 2 - triangular accumulation
+ for(Index j1=0; j1<actualBlockSize; ++j1)
+ {
+ ResScalar* r = res + (j+j1)*resStride + i;
+ for(Index i1=UpLo==Lower ? j1 : 0;
+ UpLo==Lower ? i1<actualBlockSize : i1<=j1; ++i1)
+ r[i1] += buffer(i1,j1);
+ }
+ }
+
+ if(UpLo==Lower)
+ {
+ Index i = j+actualBlockSize;
+ gebp_kernel(res+j*resStride+i, resStride, blockA+depth*i, actual_b, size-i, depth, actualBlockSize, alpha,
+ -1, -1, 0, 0, workspace);
+ }
+ }
+ }
+};
+
+} // end namespace internal
+
+// high level API
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename ProductDerived, typename _Lhs, typename _Rhs>
+TriangularView<MatrixType,UpLo>& TriangularView<MatrixType,UpLo>::assignProduct(const ProductBase<ProductDerived, _Lhs,_Rhs>& prod, const Scalar& alpha)
+{
+ typedef typename internal::remove_all<typename ProductDerived::LhsNested>::type Lhs;
+ typedef internal::blas_traits<Lhs> LhsBlasTraits;
+ typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
+ typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
+ const ActualLhs actualLhs = LhsBlasTraits::extract(prod.lhs());
+
+ typedef typename internal::remove_all<typename ProductDerived::RhsNested>::type Rhs;
+ typedef internal::blas_traits<Rhs> RhsBlasTraits;
+ typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
+ typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;
+ const ActualRhs actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+ typename ProductDerived::Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());
+
+ internal::general_matrix_matrix_triangular_product<Index,
+ typename Lhs::Scalar, _ActualLhs::Flags&RowMajorBit ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
+ typename Rhs::Scalar, _ActualRhs::Flags&RowMajorBit ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
+ MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor, UpLo>
+ ::run(m_matrix.cols(), actualLhs.cols(),
+ &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &actualRhs.coeffRef(0,0), actualRhs.outerStride(),
+ const_cast<Scalar*>(m_matrix.data()), m_matrix.outerStride(), actualAlpha);
+
+ return *this;
+}
+
+#endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector.h b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector.h
new file mode 100644
index 00000000000..e0e2cbf8f62
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/GeneralMatrixVector.h
@@ -0,0 +1,559 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_GENERAL_MATRIX_VECTOR_H
+#define EIGEN_GENERAL_MATRIX_VECTOR_H
+
+namespace internal {
+
+/* Optimized col-major matrix * vector product:
+ * This algorithm processes 4 columns at onces that allows to both reduce
+ * the number of load/stores of the result by a factor 4 and to reduce
+ * the instruction dependency. Moreover, we know that all bands have the
+ * same alignment pattern.
+ *
+ * Mixing type logic: C += alpha * A * B
+ * | A | B |alpha| comments
+ * |real |cplx |cplx | no vectorization
+ * |real |cplx |real | alpha is converted to a cplx when calling the run function, no vectorization
+ * |cplx |real |cplx | invalid, the caller has to do tmp: = A * B; C += alpha*tmp
+ * |cplx |real |real | optimal case, vectorization possible via real-cplx mul
+ */
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
+struct general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjugateLhs,RhsScalar,ConjugateRhs>
+{
+typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+enum {
+ Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
+ && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
+ LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+ RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+ ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
+};
+
+typedef typename packet_traits<LhsScalar>::type _LhsPacket;
+typedef typename packet_traits<RhsScalar>::type _RhsPacket;
+typedef typename packet_traits<ResScalar>::type _ResPacket;
+
+typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+EIGEN_DONT_INLINE static void run(
+ Index rows, Index cols,
+ const LhsScalar* lhs, Index lhsStride,
+ const RhsScalar* rhs, Index rhsIncr,
+ ResScalar* res, Index
+ #ifdef EIGEN_INTERNAL_DEBUGGING
+ resIncr
+ #endif
+ , RhsScalar alpha)
+{
+ eigen_internal_assert(resIncr==1);
+ #ifdef _EIGEN_ACCUMULATE_PACKETS
+ #error _EIGEN_ACCUMULATE_PACKETS has already been defined
+ #endif
+ #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) \
+ pstore(&res[j], \
+ padd(pload<ResPacket>(&res[j]), \
+ padd( \
+ padd(pcj.pmul(EIGEN_CAT(ploa , A0)<LhsPacket>(&lhs0[j]), ptmp0), \
+ pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs1[j]), ptmp1)), \
+ padd(pcj.pmul(EIGEN_CAT(ploa , A2)<LhsPacket>(&lhs2[j]), ptmp2), \
+ pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs3[j]), ptmp3)) )))
+
+ conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
+ conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+ if(ConjugateRhs)
+ alpha = conj(alpha);
+
+ enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
+ const Index columnsAtOnce = 4;
+ const Index peels = 2;
+ const Index LhsPacketAlignedMask = LhsPacketSize-1;
+ const Index ResPacketAlignedMask = ResPacketSize-1;
+ const Index PeelAlignedMask = ResPacketSize*peels-1;
+ const Index size = rows;
+
+ // How many coeffs of the result do we have to skip to be aligned.
+ // Here we assume data are at least aligned on the base scalar type.
+ Index alignedStart = first_aligned(res,size);
+ Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0;
+ const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
+
+ const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
+ Index alignmentPattern = alignmentStep==0 ? AllAligned
+ : alignmentStep==(LhsPacketSize/2) ? EvenAligned
+ : FirstAligned;
+
+ // we cannot assume the first element is aligned because of sub-matrices
+ const Index lhsAlignmentOffset = first_aligned(lhs,size);
+
+ // find how many columns do we have to skip to be aligned with the result (if possible)
+ Index skipColumns = 0;
+ // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
+ if( (size_t(lhs)%sizeof(LhsScalar)) || (size_t(res)%sizeof(ResScalar)) )
+ {
+ alignedSize = 0;
+ alignedStart = 0;
+ }
+ else if (LhsPacketSize>1)
+ {
+ eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || size<LhsPacketSize);
+
+ while (skipColumns<LhsPacketSize &&
+ alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%LhsPacketSize))
+ ++skipColumns;
+ if (skipColumns==LhsPacketSize)
+ {
+ // nothing can be aligned, no need to skip any column
+ alignmentPattern = NoneAligned;
+ skipColumns = 0;
+ }
+ else
+ {
+ skipColumns = (std::min)(skipColumns,cols);
+ // note that the skiped columns are processed later.
+ }
+
+ eigen_internal_assert( (alignmentPattern==NoneAligned)
+ || (skipColumns + columnsAtOnce >= cols)
+ || LhsPacketSize > size
+ || (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(LhsPacket))==0);
+ }
+ else if(Vectorizable)
+ {
+ alignedStart = 0;
+ alignedSize = size;
+ alignmentPattern = AllAligned;
+ }
+
+ Index offset1 = (FirstAligned && alignmentStep==1?3:1);
+ Index offset3 = (FirstAligned && alignmentStep==1?1:3);
+
+ Index columnBound = ((cols-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
+ for (Index i=skipColumns; i<columnBound; i+=columnsAtOnce)
+ {
+ RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[i*rhsIncr]),
+ ptmp1 = pset1<RhsPacket>(alpha*rhs[(i+offset1)*rhsIncr]),
+ ptmp2 = pset1<RhsPacket>(alpha*rhs[(i+2)*rhsIncr]),
+ ptmp3 = pset1<RhsPacket>(alpha*rhs[(i+offset3)*rhsIncr]);
+
+ // this helps a lot generating better binary code
+ const LhsScalar *lhs0 = lhs + i*lhsStride, *lhs1 = lhs + (i+offset1)*lhsStride,
+ *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
+
+ if (Vectorizable)
+ {
+ /* explicit vectorization */
+ // process initial unaligned coeffs
+ for (Index j=0; j<alignedStart; ++j)
+ {
+ res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]);
+ res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]);
+ res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]);
+ res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]);
+ }
+
+ if (alignedSize>alignedStart)
+ {
+ switch(alignmentPattern)
+ {
+ case AllAligned:
+ for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
+ _EIGEN_ACCUMULATE_PACKETS(d,d,d);
+ break;
+ case EvenAligned:
+ for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
+ _EIGEN_ACCUMULATE_PACKETS(d,du,d);
+ break;
+ case FirstAligned:
+ if(peels>1)
+ {
+ LhsPacket A00, A01, A02, A03, A10, A11, A12, A13;
+ ResPacket T0, T1;
+
+ A01 = pload<LhsPacket>(&lhs1[alignedStart-1]);
+ A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
+ A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
+
+ for (Index j = alignedStart; j<peeledSize; j+=peels*ResPacketSize)
+ {
+ A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11);
+ A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]); palign<2>(A02,A12);
+ A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]); palign<3>(A03,A13);
+
+ A00 = pload<LhsPacket>(&lhs0[j]);
+ A10 = pload<LhsPacket>(&lhs0[j+LhsPacketSize]);
+ T0 = pcj.pmadd(A00, ptmp0, pload<ResPacket>(&res[j]));
+ T1 = pcj.pmadd(A10, ptmp0, pload<ResPacket>(&res[j+ResPacketSize]));
+
+ T0 = pcj.pmadd(A01, ptmp1, T0);
+ A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]); palign<1>(A11,A01);
+ T0 = pcj.pmadd(A02, ptmp2, T0);
+ A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]); palign<2>(A12,A02);
+ T0 = pcj.pmadd(A03, ptmp3, T0);
+ pstore(&res[j],T0);
+ A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]); palign<3>(A13,A03);
+ T1 = pcj.pmadd(A11, ptmp1, T1);
+ T1 = pcj.pmadd(A12, ptmp2, T1);
+ T1 = pcj.pmadd(A13, ptmp3, T1);
+ pstore(&res[j+ResPacketSize],T1);
+ }
+ }
+ for (Index j = peeledSize; j<alignedSize; j+=ResPacketSize)
+ _EIGEN_ACCUMULATE_PACKETS(d,du,du);
+ break;
+ default:
+ for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
+ _EIGEN_ACCUMULATE_PACKETS(du,du,du);
+ break;
+ }
+ }
+ } // end explicit vectorization
+
+ /* process remaining coeffs (or all if there is no explicit vectorization) */
+ for (Index j=alignedSize; j<size; ++j)
+ {
+ res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]);
+ res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]);
+ res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]);
+ res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]);
+ }
+ }
+
+ // process remaining first and last columns (at most columnsAtOnce-1)
+ Index end = cols;
+ Index start = columnBound;
+ do
+ {
+ for (Index k=start; k<end; ++k)
+ {
+ RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[k*rhsIncr]);
+ const LhsScalar* lhs0 = lhs + k*lhsStride;
+
+ if (Vectorizable)
+ {
+ /* explicit vectorization */
+ // process first unaligned result's coeffs
+ for (Index j=0; j<alignedStart; ++j)
+ res[j] += cj.pmul(lhs0[j], pfirst(ptmp0));
+ // process aligned result's coeffs
+ if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
+ for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
+ pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
+ else
+ for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
+ pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
+ }
+
+ // process remaining scalars (or all if no explicit vectorization)
+ for (Index i=alignedSize; i<size; ++i)
+ res[i] += cj.pmul(lhs0[i], pfirst(ptmp0));
+ }
+ if (skipColumns)
+ {
+ start = 0;
+ end = skipColumns;
+ skipColumns = 0;
+ }
+ else
+ break;
+ } while(Vectorizable);
+ #undef _EIGEN_ACCUMULATE_PACKETS
+}
+};
+
+/* Optimized row-major matrix * vector product:
+ * This algorithm processes 4 rows at onces that allows to both reduce
+ * the number of load/stores of the result by a factor 4 and to reduce
+ * the instruction dependency. Moreover, we know that all bands have the
+ * same alignment pattern.
+ *
+ * Mixing type logic:
+ * - alpha is always a complex (or converted to a complex)
+ * - no vectorization
+ */
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
+struct general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjugateLhs,RhsScalar,ConjugateRhs>
+{
+typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+enum {
+ Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
+ && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
+ LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+ RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+ ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
+};
+
+typedef typename packet_traits<LhsScalar>::type _LhsPacket;
+typedef typename packet_traits<RhsScalar>::type _RhsPacket;
+typedef typename packet_traits<ResScalar>::type _ResPacket;
+
+typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+EIGEN_DONT_INLINE static void run(
+ Index rows, Index cols,
+ const LhsScalar* lhs, Index lhsStride,
+ const RhsScalar* rhs, Index rhsIncr,
+ ResScalar* res, Index resIncr,
+ ResScalar alpha)
+{
+ EIGEN_UNUSED_VARIABLE(rhsIncr);
+ eigen_internal_assert(rhsIncr==1);
+ #ifdef _EIGEN_ACCUMULATE_PACKETS
+ #error _EIGEN_ACCUMULATE_PACKETS has already been defined
+ #endif
+
+ #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) {\
+ RhsPacket b = pload<RhsPacket>(&rhs[j]); \
+ ptmp0 = pcj.pmadd(EIGEN_CAT(ploa,A0) <LhsPacket>(&lhs0[j]), b, ptmp0); \
+ ptmp1 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs1[j]), b, ptmp1); \
+ ptmp2 = pcj.pmadd(EIGEN_CAT(ploa,A2) <LhsPacket>(&lhs2[j]), b, ptmp2); \
+ ptmp3 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs3[j]), b, ptmp3); }
+
+ conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
+ conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+
+ enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
+ const Index rowsAtOnce = 4;
+ const Index peels = 2;
+ const Index RhsPacketAlignedMask = RhsPacketSize-1;
+ const Index LhsPacketAlignedMask = LhsPacketSize-1;
+ const Index PeelAlignedMask = RhsPacketSize*peels-1;
+ const Index depth = cols;
+
+ // How many coeffs of the result do we have to skip to be aligned.
+ // Here we assume data are at least aligned on the base scalar type
+ // if that's not the case then vectorization is discarded, see below.
+ Index alignedStart = first_aligned(rhs, depth);
+ Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0;
+ const Index peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
+
+ const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
+ Index alignmentPattern = alignmentStep==0 ? AllAligned
+ : alignmentStep==(LhsPacketSize/2) ? EvenAligned
+ : FirstAligned;
+
+ // we cannot assume the first element is aligned because of sub-matrices
+ const Index lhsAlignmentOffset = first_aligned(lhs,depth);
+
+ // find how many rows do we have to skip to be aligned with rhs (if possible)
+ Index skipRows = 0;
+ // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
+ if( (sizeof(LhsScalar)!=sizeof(RhsScalar)) || (size_t(lhs)%sizeof(LhsScalar)) || (size_t(rhs)%sizeof(RhsScalar)) )
+ {
+ alignedSize = 0;
+ alignedStart = 0;
+ }
+ else if (LhsPacketSize>1)
+ {
+ eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || depth<LhsPacketSize);
+
+ while (skipRows<LhsPacketSize &&
+ alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%LhsPacketSize))
+ ++skipRows;
+ if (skipRows==LhsPacketSize)
+ {
+ // nothing can be aligned, no need to skip any column
+ alignmentPattern = NoneAligned;
+ skipRows = 0;
+ }
+ else
+ {
+ skipRows = (std::min)(skipRows,Index(rows));
+ // note that the skiped columns are processed later.
+ }
+ eigen_internal_assert( alignmentPattern==NoneAligned
+ || LhsPacketSize==1
+ || (skipRows + rowsAtOnce >= rows)
+ || LhsPacketSize > depth
+ || (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(LhsPacket))==0);
+ }
+ else if(Vectorizable)
+ {
+ alignedStart = 0;
+ alignedSize = depth;
+ alignmentPattern = AllAligned;
+ }
+
+ Index offset1 = (FirstAligned && alignmentStep==1?3:1);
+ Index offset3 = (FirstAligned && alignmentStep==1?1:3);
+
+ Index rowBound = ((rows-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
+ for (Index i=skipRows; i<rowBound; i+=rowsAtOnce)
+ {
+ EIGEN_ALIGN16 ResScalar tmp0 = ResScalar(0);
+ ResScalar tmp1 = ResScalar(0), tmp2 = ResScalar(0), tmp3 = ResScalar(0);
+
+ // this helps the compiler generating good binary code
+ const LhsScalar *lhs0 = lhs + i*lhsStride, *lhs1 = lhs + (i+offset1)*lhsStride,
+ *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
+
+ if (Vectorizable)
+ {
+ /* explicit vectorization */
+ ResPacket ptmp0 = pset1<ResPacket>(ResScalar(0)), ptmp1 = pset1<ResPacket>(ResScalar(0)),
+ ptmp2 = pset1<ResPacket>(ResScalar(0)), ptmp3 = pset1<ResPacket>(ResScalar(0));
+
+ // process initial unaligned coeffs
+ // FIXME this loop get vectorized by the compiler !
+ for (Index j=0; j<alignedStart; ++j)
+ {
+ RhsScalar b = rhs[j];
+ tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
+ tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b);
+ }
+
+ if (alignedSize>alignedStart)
+ {
+ switch(alignmentPattern)
+ {
+ case AllAligned:
+ for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
+ _EIGEN_ACCUMULATE_PACKETS(d,d,d);
+ break;
+ case EvenAligned:
+ for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
+ _EIGEN_ACCUMULATE_PACKETS(d,du,d);
+ break;
+ case FirstAligned:
+ if (peels>1)
+ {
+ /* Here we proccess 4 rows with with two peeled iterations to hide
+ * tghe overhead of unaligned loads. Moreover unaligned loads are handled
+ * using special shift/move operations between the two aligned packets
+ * overlaping the desired unaligned packet. This is *much* more efficient
+ * than basic unaligned loads.
+ */
+ LhsPacket A01, A02, A03, A11, A12, A13;
+ A01 = pload<LhsPacket>(&lhs1[alignedStart-1]);
+ A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
+ A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
+
+ for (Index j = alignedStart; j<peeledSize; j+=peels*RhsPacketSize)
+ {
+ RhsPacket b = pload<RhsPacket>(&rhs[j]);
+ A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]); palign<1>(A01,A11);
+ A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]); palign<2>(A02,A12);
+ A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]); palign<3>(A03,A13);
+
+ ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), b, ptmp0);
+ ptmp1 = pcj.pmadd(A01, b, ptmp1);
+ A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]); palign<1>(A11,A01);
+ ptmp2 = pcj.pmadd(A02, b, ptmp2);
+ A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]); palign<2>(A12,A02);
+ ptmp3 = pcj.pmadd(A03, b, ptmp3);
+ A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]); palign<3>(A13,A03);
+
+ b = pload<RhsPacket>(&rhs[j+RhsPacketSize]);
+ ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j+LhsPacketSize]), b, ptmp0);
+ ptmp1 = pcj.pmadd(A11, b, ptmp1);
+ ptmp2 = pcj.pmadd(A12, b, ptmp2);
+ ptmp3 = pcj.pmadd(A13, b, ptmp3);
+ }
+ }
+ for (Index j = peeledSize; j<alignedSize; j+=RhsPacketSize)
+ _EIGEN_ACCUMULATE_PACKETS(d,du,du);
+ break;
+ default:
+ for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
+ _EIGEN_ACCUMULATE_PACKETS(du,du,du);
+ break;
+ }
+ tmp0 += predux(ptmp0);
+ tmp1 += predux(ptmp1);
+ tmp2 += predux(ptmp2);
+ tmp3 += predux(ptmp3);
+ }
+ } // end explicit vectorization
+
+ // process remaining coeffs (or all if no explicit vectorization)
+ // FIXME this loop get vectorized by the compiler !
+ for (Index j=alignedSize; j<depth; ++j)
+ {
+ RhsScalar b = rhs[j];
+ tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
+ tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b);
+ }
+ res[i*resIncr] += alpha*tmp0;
+ res[(i+offset1)*resIncr] += alpha*tmp1;
+ res[(i+2)*resIncr] += alpha*tmp2;
+ res[(i+offset3)*resIncr] += alpha*tmp3;
+ }
+
+ // process remaining first and last rows (at most columnsAtOnce-1)
+ Index end = rows;
+ Index start = rowBound;
+ do
+ {
+ for (Index i=start; i<end; ++i)
+ {
+ EIGEN_ALIGN16 ResScalar tmp0 = ResScalar(0);
+ ResPacket ptmp0 = pset1<ResPacket>(tmp0);
+ const LhsScalar* lhs0 = lhs + i*lhsStride;
+ // process first unaligned result's coeffs
+ // FIXME this loop get vectorized by the compiler !
+ for (Index j=0; j<alignedStart; ++j)
+ tmp0 += cj.pmul(lhs0[j], rhs[j]);
+
+ if (alignedSize>alignedStart)
+ {
+ // process aligned rhs coeffs
+ if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
+ for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
+ ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0);
+ else
+ for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
+ ptmp0 = pcj.pmadd(ploadu<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0);
+ tmp0 += predux(ptmp0);
+ }
+
+ // process remaining scalars
+ // FIXME this loop get vectorized by the compiler !
+ for (Index j=alignedSize; j<depth; ++j)
+ tmp0 += cj.pmul(lhs0[j], rhs[j]);
+ res[i*resIncr] += alpha*tmp0;
+ }
+ if (skipRows)
+ {
+ start = 0;
+ end = skipRows;
+ skipRows = 0;
+ }
+ else
+ break;
+ } while(Vectorizable);
+
+ #undef _EIGEN_ACCUMULATE_PACKETS
+}
+};
+
+} // end namespace internal
+
+#endif // EIGEN_GENERAL_MATRIX_VECTOR_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/Parallelizer.h b/extern/Eigen3/Eigen/src/Core/products/Parallelizer.h
new file mode 100644
index 00000000000..ecdedc363ce
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/Parallelizer.h
@@ -0,0 +1,154 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PARALLELIZER_H
+#define EIGEN_PARALLELIZER_H
+
+namespace internal {
+
+/** \internal */
+inline void manage_multi_threading(Action action, int* v)
+{
+ static EIGEN_UNUSED int m_maxThreads = -1;
+
+ if(action==SetAction)
+ {
+ eigen_internal_assert(v!=0);
+ m_maxThreads = *v;
+ }
+ else if(action==GetAction)
+ {
+ eigen_internal_assert(v!=0);
+ #ifdef EIGEN_HAS_OPENMP
+ if(m_maxThreads>0)
+ *v = m_maxThreads;
+ else
+ *v = omp_get_max_threads();
+ #else
+ *v = 1;
+ #endif
+ }
+ else
+ {
+ eigen_internal_assert(false);
+ }
+}
+
+/** \returns the max number of threads reserved for Eigen
+ * \sa setNbThreads */
+inline int nbThreads()
+{
+ int ret;
+ manage_multi_threading(GetAction, &ret);
+ return ret;
+}
+
+/** Sets the max number of threads reserved for Eigen
+ * \sa nbThreads */
+inline void setNbThreads(int v)
+{
+ manage_multi_threading(SetAction, &v);
+}
+
+template<typename Index> struct GemmParallelInfo
+{
+ GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {}
+
+ int volatile sync;
+ int volatile users;
+
+ Index rhs_start;
+ Index rhs_length;
+};
+
+template<bool Condition, typename Functor, typename Index>
+void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose)
+{
+#ifndef EIGEN_HAS_OPENMP
+ // FIXME the transpose variable is only needed to properly split
+ // the matrix product when multithreading is enabled. This is a temporary
+ // fix to support row-major destination matrices. This whole
+ // parallelizer mechanism has to be redisigned anyway.
+ EIGEN_UNUSED_VARIABLE(transpose);
+ func(0,rows, 0,cols);
+#else
+
+ // Dynamically check whether we should enable or disable OpenMP.
+ // The conditions are:
+ // - the max number of threads we can create is greater than 1
+ // - we are not already in a parallel code
+ // - the sizes are large enough
+
+ // 1- are we already in a parallel session?
+ // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
+ if((!Condition) || (omp_get_num_threads()>1))
+ return func(0,rows, 0,cols);
+
+ Index size = transpose ? cols : rows;
+
+ // 2- compute the maximal number of threads from the size of the product:
+ // FIXME this has to be fine tuned
+ Index max_threads = std::max<Index>(1,size / 32);
+
+ // 3 - compute the number of threads we are going to use
+ Index threads = std::min<Index>(nbThreads(), max_threads);
+
+ if(threads==1)
+ return func(0,rows, 0,cols);
+
+ func.initParallelSession();
+
+ if(transpose)
+ std::swap(rows,cols);
+
+ Index blockCols = (cols / threads) & ~Index(0x3);
+ Index blockRows = (rows / threads) & ~Index(0x7);
+
+ GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
+
+ #pragma omp parallel for schedule(static,1) num_threads(threads)
+ for(Index i=0; i<threads; ++i)
+ {
+ Index r0 = i*blockRows;
+ Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
+
+ Index c0 = i*blockCols;
+ Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
+
+ info[i].rhs_start = c0;
+ info[i].rhs_length = actualBlockCols;
+
+ if(transpose)
+ func(0, cols, r0, actualBlockRows, info);
+ else
+ func(r0, actualBlockRows, 0,cols, info);
+ }
+
+ delete[] info;
+#endif
+}
+
+} // end namespace internal
+
+#endif // EIGEN_PARALLELIZER_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
new file mode 100644
index 00000000000..ccd757cfaf8
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -0,0 +1,427 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_H
+#define EIGEN_SELFADJOINT_MATRIX_MATRIX_H
+
+namespace internal {
+
+// pack a selfadjoint block diagonal for use with the gebp_kernel
+template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder>
+struct symm_pack_lhs
+{
+ template<int BlockRows> inline
+ void pack(Scalar* blockA, const const_blas_data_mapper<Scalar,Index,StorageOrder>& lhs, Index cols, Index i, Index& count)
+ {
+ // normal copy
+ for(Index k=0; k<i; k++)
+ for(Index w=0; w<BlockRows; w++)
+ blockA[count++] = lhs(i+w,k); // normal
+ // symmetric copy
+ Index h = 0;
+ for(Index k=i; k<i+BlockRows; k++)
+ {
+ for(Index w=0; w<h; w++)
+ blockA[count++] = conj(lhs(k, i+w)); // transposed
+
+ blockA[count++] = real(lhs(k,k)); // real (diagonal)
+
+ for(Index w=h+1; w<BlockRows; w++)
+ blockA[count++] = lhs(i+w, k); // normal
+ ++h;
+ }
+ // transposed copy
+ for(Index k=i+BlockRows; k<cols; k++)
+ for(Index w=0; w<BlockRows; w++)
+ blockA[count++] = conj(lhs(k, i+w)); // transposed
+ }
+ void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
+ {
+ const_blas_data_mapper<Scalar,Index,StorageOrder> lhs(_lhs,lhsStride);
+ Index count = 0;
+ Index peeled_mc = (rows/Pack1)*Pack1;
+ for(Index i=0; i<peeled_mc; i+=Pack1)
+ {
+ pack<Pack1>(blockA, lhs, cols, i, count);
+ }
+
+ if(rows-peeled_mc>=Pack2)
+ {
+ pack<Pack2>(blockA, lhs, cols, peeled_mc, count);
+ peeled_mc += Pack2;
+ }
+
+ // do the same with mr==1
+ for(Index i=peeled_mc; i<rows; i++)
+ {
+ for(Index k=0; k<i; k++)
+ blockA[count++] = lhs(i, k); // normal
+
+ blockA[count++] = real(lhs(i, i)); // real (diagonal)
+
+ for(Index k=i+1; k<cols; k++)
+ blockA[count++] = conj(lhs(k, i)); // transposed
+ }
+ }
+};
+
+template<typename Scalar, typename Index, int nr, int StorageOrder>
+struct symm_pack_rhs
+{
+ enum { PacketSize = packet_traits<Scalar>::size };
+ void operator()(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Index rows, Index cols, Index k2)
+ {
+ Index end_k = k2 + rows;
+ Index count = 0;
+ const_blas_data_mapper<Scalar,Index,StorageOrder> rhs(_rhs,rhsStride);
+ Index packet_cols = (cols/nr)*nr;
+
+ // first part: normal case
+ for(Index j2=0; j2<k2; j2+=nr)
+ {
+ for(Index k=k2; k<end_k; k++)
+ {
+ blockB[count+0] = rhs(k,j2+0);
+ blockB[count+1] = rhs(k,j2+1);
+ if (nr==4)
+ {
+ blockB[count+2] = rhs(k,j2+2);
+ blockB[count+3] = rhs(k,j2+3);
+ }
+ count += nr;
+ }
+ }
+
+ // second part: diagonal block
+ for(Index j2=k2; j2<(std::min)(k2+rows,packet_cols); j2+=nr)
+ {
+ // again we can split vertically in three different parts (transpose, symmetric, normal)
+ // transpose
+ for(Index k=k2; k<j2; k++)
+ {
+ blockB[count+0] = conj(rhs(j2+0,k));
+ blockB[count+1] = conj(rhs(j2+1,k));
+ if (nr==4)
+ {
+ blockB[count+2] = conj(rhs(j2+2,k));
+ blockB[count+3] = conj(rhs(j2+3,k));
+ }
+ count += nr;
+ }
+ // symmetric
+ Index h = 0;
+ for(Index k=j2; k<j2+nr; k++)
+ {
+ // normal
+ for (Index w=0 ; w<h; ++w)
+ blockB[count+w] = rhs(k,j2+w);
+
+ blockB[count+h] = real(rhs(k,k));
+
+ // transpose
+ for (Index w=h+1 ; w<nr; ++w)
+ blockB[count+w] = conj(rhs(j2+w,k));
+ count += nr;
+ ++h;
+ }
+ // normal
+ for(Index k=j2+nr; k<end_k; k++)
+ {
+ blockB[count+0] = rhs(k,j2+0);
+ blockB[count+1] = rhs(k,j2+1);
+ if (nr==4)
+ {
+ blockB[count+2] = rhs(k,j2+2);
+ blockB[count+3] = rhs(k,j2+3);
+ }
+ count += nr;
+ }
+ }
+
+ // third part: transposed
+ for(Index j2=k2+rows; j2<packet_cols; j2+=nr)
+ {
+ for(Index k=k2; k<end_k; k++)
+ {
+ blockB[count+0] = conj(rhs(j2+0,k));
+ blockB[count+1] = conj(rhs(j2+1,k));
+ if (nr==4)
+ {
+ blockB[count+2] = conj(rhs(j2+2,k));
+ blockB[count+3] = conj(rhs(j2+3,k));
+ }
+ count += nr;
+ }
+ }
+
+ // copy the remaining columns one at a time (=> the same with nr==1)
+ for(Index j2=packet_cols; j2<cols; ++j2)
+ {
+ // transpose
+ Index half = (std::min)(end_k,j2);
+ for(Index k=k2; k<half; k++)
+ {
+ blockB[count] = conj(rhs(j2,k));
+ count += 1;
+ }
+
+ if(half==j2 && half<k2+rows)
+ {
+ blockB[count] = real(rhs(j2,j2));
+ count += 1;
+ }
+ else
+ half--;
+
+ // normal
+ for(Index k=half+1; k<k2+rows; k++)
+ {
+ blockB[count] = rhs(k,j2);
+ count += 1;
+ }
+ }
+ }
+};
+
+/* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of
+ * the general matrix matrix product.
+ */
+template <typename Scalar, typename Index,
+ int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
+ int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs,
+ int ResStorageOrder>
+struct product_selfadjoint_matrix;
+
+template <typename Scalar, typename Index,
+ int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
+ int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs>
+struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,RowMajor>
+{
+
+ static EIGEN_STRONG_INLINE void run(
+ Index rows, Index cols,
+ const Scalar* lhs, Index lhsStride,
+ const Scalar* rhs, Index rhsStride,
+ Scalar* res, Index resStride,
+ Scalar alpha)
+ {
+ product_selfadjoint_matrix<Scalar, Index,
+ EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
+ RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs),
+ EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
+ LhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsSelfAdjoint,ConjugateLhs),
+ ColMajor>
+ ::run(cols, rows, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha);
+ }
+};
+
+template <typename Scalar, typename Index,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs>
+struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>
+{
+
+ static EIGEN_DONT_INLINE void run(
+ Index rows, Index cols,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
+ Scalar alpha)
+ {
+ Index size = rows;
+
+ const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+ const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
+
+ typedef gebp_traits<Scalar,Scalar> Traits;
+
+ Index kc = size; // cache block size along the K direction
+ Index mc = rows; // cache block size along the M direction
+ Index nc = cols; // cache block size along the N direction
+ computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
+ // kc must smaller than mc
+ kc = (std::min)(kc,mc);
+
+ std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+ std::size_t sizeB = sizeW + kc*cols;
+ ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
+ ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
+ Scalar* blockB = allocatedBlockB + sizeW;
+
+ gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
+ symm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
+ gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
+
+ for(Index k2=0; k2<size; k2+=kc)
+ {
+ const Index actual_kc = (std::min)(k2+kc,size)-k2;
+
+ // we have selected one row panel of rhs and one column panel of lhs
+ // pack rhs's panel into a sequential chunk of memory
+ // and expand each coeff to a constant packet for further reuse
+ pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
+
+ // the select lhs's panel has to be split in three different parts:
+ // 1 - the transposed panel above the diagonal block => transposed packed copy
+ // 2 - the diagonal block => special packed copy
+ // 3 - the panel below the diagonal block => generic packed copy
+ for(Index i2=0; i2<k2; i2+=mc)
+ {
+ const Index actual_mc = (std::min)(i2+mc,k2)-i2;
+ // transposed packed copy
+ pack_lhs_transposed(blockA, &lhs(k2, i2), lhsStride, actual_kc, actual_mc);
+
+ gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
+ }
+ // the block diagonal
+ {
+ const Index actual_mc = (std::min)(k2+kc,size)-k2;
+ // symmetric packed copy
+ pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
+
+ gebp_kernel(res+k2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
+ }
+
+ for(Index i2=k2+kc; i2<size; i2+=mc)
+ {
+ const Index actual_mc = (std::min)(i2+mc,size)-i2;
+ gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()
+ (blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
+
+ gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
+ }
+ }
+ }
+};
+
+// matrix * selfadjoint product
+template <typename Scalar, typename Index,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs>
+struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>
+{
+
+ static EIGEN_DONT_INLINE void run(
+ Index rows, Index cols,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
+ Scalar alpha)
+ {
+ Index size = cols;
+
+ const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+
+ typedef gebp_traits<Scalar,Scalar> Traits;
+
+ Index kc = size; // cache block size along the K direction
+ Index mc = rows; // cache block size along the M direction
+ Index nc = cols; // cache block size along the N direction
+ computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
+ std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+ std::size_t sizeB = sizeW + kc*cols;
+ ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
+ ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
+ Scalar* blockB = allocatedBlockB + sizeW;
+
+ gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
+ gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ symm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
+
+ for(Index k2=0; k2<size; k2+=kc)
+ {
+ const Index actual_kc = (std::min)(k2+kc,size)-k2;
+
+ pack_rhs(blockB, _rhs, rhsStride, actual_kc, cols, k2);
+
+ // => GEPP
+ for(Index i2=0; i2<rows; i2+=mc)
+ {
+ const Index actual_mc = (std::min)(i2+mc,rows)-i2;
+ pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
+
+ gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
+ }
+ }
+ }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Wrapper to product_selfadjoint_matrix
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, int LhsMode, typename Rhs, int RhsMode>
+struct traits<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false> >
+ : traits<ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, int LhsMode, typename Rhs, int RhsMode>
+struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false>
+ : public ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false>, Lhs, Rhs >
+{
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
+
+ SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+ enum {
+ LhsIsUpper = (LhsMode&(Upper|Lower))==Upper,
+ LhsIsSelfAdjoint = (LhsMode&SelfAdjoint)==SelfAdjoint,
+ RhsIsUpper = (RhsMode&(Upper|Lower))==Upper,
+ RhsIsSelfAdjoint = (RhsMode&SelfAdjoint)==SelfAdjoint
+ };
+
+ template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ {
+ eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
+
+ const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs);
+ const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs);
+
+ Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
+ * RhsBlasTraits::extractScalarFactor(m_rhs);
+
+ internal::product_selfadjoint_matrix<Scalar, Index,
+ EIGEN_LOGICAL_XOR(LhsIsUpper,
+ internal::traits<Lhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint,
+ NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)),
+ EIGEN_LOGICAL_XOR(RhsIsUpper,
+ internal::traits<Rhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint,
+ NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsIsUpper,bool(RhsBlasTraits::NeedToConjugate)),
+ internal::traits<Dest>::Flags&RowMajorBit ? RowMajor : ColMajor>
+ ::run(
+ lhs.rows(), rhs.cols(), // sizes
+ &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
+ &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info
+ &dst.coeffRef(0,0), dst.outerStride(), // result info
+ actualAlpha // alpha
+ );
+ }
+};
+
+#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h
new file mode 100644
index 00000000000..d6121fc07bd
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointMatrixVector.h
@@ -0,0 +1,278 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H
+#define EIGEN_SELFADJOINT_MATRIX_VECTOR_H
+
+namespace internal {
+
+/* Optimized selfadjoint matrix * vector product:
+ * This algorithm processes 2 columns at onces that allows to both reduce
+ * the number of load/stores of the result by a factor 2 and to reduce
+ * the instruction dependency.
+ */
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs>
+static EIGEN_DONT_INLINE void product_selfadjoint_vector(
+ Index size,
+ const Scalar* lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsIncr,
+ Scalar* res,
+ Scalar alpha)
+{
+ typedef typename packet_traits<Scalar>::type Packet;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
+
+ enum {
+ IsRowMajor = StorageOrder==RowMajor ? 1 : 0,
+ IsLower = UpLo == Lower ? 1 : 0,
+ FirstTriangular = IsRowMajor == IsLower
+ };
+
+ conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> cj0;
+ conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1;
+ conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex, ConjugateRhs> cjd;
+
+ conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0;
+ conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
+
+ Scalar cjAlpha = ConjugateRhs ? conj(alpha) : alpha;
+
+ // FIXME this copy is now handled outside product_selfadjoint_vector, so it could probably be removed.
+ // if the rhs is not sequentially stored in memory we copy it to a temporary buffer,
+ // this is because we need to extract packets
+ ei_declare_aligned_stack_constructed_variable(Scalar,rhs,size,rhsIncr==1 ? const_cast<Scalar*>(_rhs) : 0);
+ if (rhsIncr!=1)
+ {
+ const Scalar* it = _rhs;
+ for (Index i=0; i<size; ++i, it+=rhsIncr)
+ rhs[i] = *it;
+ }
+
+ Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;
+ if (FirstTriangular)
+ bound = size - bound;
+
+ for (Index j=FirstTriangular ? bound : 0;
+ j<(FirstTriangular ? size : bound);j+=2)
+ {
+ register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
+ register const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
+
+ Scalar t0 = cjAlpha * rhs[j];
+ Packet ptmp0 = pset1<Packet>(t0);
+ Scalar t1 = cjAlpha * rhs[j+1];
+ Packet ptmp1 = pset1<Packet>(t1);
+
+ Scalar t2 = 0;
+ Packet ptmp2 = pset1<Packet>(t2);
+ Scalar t3 = 0;
+ Packet ptmp3 = pset1<Packet>(t3);
+
+ size_t starti = FirstTriangular ? 0 : j+2;
+ size_t endi = FirstTriangular ? j : size;
+ size_t alignedStart = (starti) + first_aligned(&res[starti], endi-starti);
+ size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);
+
+ // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
+ res[j] += cjd.pmul(internal::real(A0[j]), t0);
+ res[j+1] += cjd.pmul(internal::real(A1[j+1]), t1);
+ if(FirstTriangular)
+ {
+ res[j] += cj0.pmul(A1[j], t1);
+ t3 += cj1.pmul(A1[j], rhs[j]);
+ }
+ else
+ {
+ res[j+1] += cj0.pmul(A0[j+1],t0);
+ t2 += cj1.pmul(A0[j+1], rhs[j+1]);
+ }
+
+ for (size_t i=starti; i<alignedStart; ++i)
+ {
+ res[i] += t0 * A0[i] + t1 * A1[i];
+ t2 += conj(A0[i]) * rhs[i];
+ t3 += conj(A1[i]) * rhs[i];
+ }
+ // Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)
+ // gcc 4.2 does this optimization automatically.
+ const Scalar* EIGEN_RESTRICT a0It = A0 + alignedStart;
+ const Scalar* EIGEN_RESTRICT a1It = A1 + alignedStart;
+ const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart;
+ Scalar* EIGEN_RESTRICT resIt = res + alignedStart;
+ for (size_t i=alignedStart; i<alignedEnd; i+=PacketSize)
+ {
+ Packet A0i = ploadu<Packet>(a0It); a0It += PacketSize;
+ Packet A1i = ploadu<Packet>(a1It); a1It += PacketSize;
+ Packet Bi = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases
+ Packet Xi = pload <Packet>(resIt);
+
+ Xi = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi));
+ ptmp2 = pcj1.pmadd(A0i, Bi, ptmp2);
+ ptmp3 = pcj1.pmadd(A1i, Bi, ptmp3);
+ pstore(resIt,Xi); resIt += PacketSize;
+ }
+ for (size_t i=alignedEnd; i<endi; i++)
+ {
+ res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);
+ t2 += cj1.pmul(A0[i], rhs[i]);
+ t3 += cj1.pmul(A1[i], rhs[i]);
+ }
+
+ res[j] += alpha * (t2 + predux(ptmp2));
+ res[j+1] += alpha * (t3 + predux(ptmp3));
+ }
+ for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
+ {
+ register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
+
+ Scalar t1 = cjAlpha * rhs[j];
+ Scalar t2 = 0;
+ // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
+ res[j] += cjd.pmul(internal::real(A0[j]), t1);
+ for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)
+ {
+ res[i] += cj0.pmul(A0[i], t1);
+ t2 += cj1.pmul(A0[i], rhs[i]);
+ }
+ res[j] += alpha * t2;
+ }
+}
+
+} // end namespace internal
+
+/***************************************************************************
+* Wrapper to product_selfadjoint_vector
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, int LhsMode, typename Rhs>
+struct traits<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true> >
+ : traits<ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, int LhsMode, typename Rhs>
+struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
+ : public ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>, Lhs, Rhs >
+{
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
+
+ enum {
+ LhsUpLo = LhsMode&(Upper|Lower)
+ };
+
+ SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ typedef typename Dest::Scalar ResScalar;
+ typedef typename Base::RhsScalar RhsScalar;
+ typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
+
+ eigen_assert(dest.rows()==m_lhs.rows() && dest.cols()==m_rhs.cols());
+
+ const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs);
+ const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs);
+
+ Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
+ * RhsBlasTraits::extractScalarFactor(m_rhs);
+
+ enum {
+ EvalToDest = (Dest::InnerStrideAtCompileTime==1),
+ UseRhs = (_ActualRhsType::InnerStrideAtCompileTime==1)
+ };
+
+ internal::gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,!EvalToDest> static_dest;
+ internal::gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!UseRhs> static_rhs;
+
+ ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
+ EvalToDest ? dest.data() : static_dest.data());
+
+ ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(),
+ UseRhs ? const_cast<RhsScalar*>(rhs.data()) : static_rhs.data());
+
+ if(!EvalToDest)
+ {
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ int size = dest.size();
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ #endif
+ MappedDest(actualDestPtr, dest.size()) = dest;
+ }
+
+ if(!UseRhs)
+ {
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ int size = rhs.size();
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ #endif
+ Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, rhs.size()) = rhs;
+ }
+
+
+ internal::product_selfadjoint_vector<Scalar, Index, (internal::traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>
+ (
+ lhs.rows(), // size
+ &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
+ actualRhsPtr, 1, // rhs info
+ actualDestPtr, // result info
+ actualAlpha // scale factor
+ );
+
+ if(!EvalToDest)
+ dest = MappedDest(actualDestPtr, dest.size());
+ }
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs, int RhsMode>
+struct traits<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false> >
+ : traits<ProductBase<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, typename Rhs, int RhsMode>
+struct SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>
+ : public ProductBase<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>, Lhs, Rhs >
+{
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
+
+ enum {
+ RhsUpLo = RhsMode&(Upper|Lower)
+ };
+
+ SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ // let's simply transpose the product
+ Transpose<Dest> destT(dest);
+ SelfadjointProductMatrix<Transpose<const Rhs>, int(RhsUpLo)==Upper ? Lower : Upper, false,
+ Transpose<const Lhs>, 0, true>(m_rhs.transpose(), m_lhs.transpose()).scaleAndAddTo(destT, alpha);
+ }
+};
+
+
+#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointProduct.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointProduct.h
new file mode 100644
index 00000000000..3a4523fa4a9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointProduct.h
@@ -0,0 +1,136 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SELFADJOINT_PRODUCT_H
+#define EIGEN_SELFADJOINT_PRODUCT_H
+
+/**********************************************************************
+* This file implements a self adjoint product: C += A A^T updating only
+* half of the selfadjoint matrix C.
+* It corresponds to the level 3 SYRK and level 2 SYR Blas routines.
+**********************************************************************/
+
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjLhs, bool ConjRhs>
+struct selfadjoint_rank1_update;
+
+template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
+struct selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs>
+{
+ static void run(Index size, Scalar* mat, Index stride, const Scalar* vec, Scalar alpha)
+ {
+ internal::conj_if<ConjRhs> cj;
+ typedef Map<const Matrix<Scalar,Dynamic,1> > OtherMap;
+ typedef typename internal::conditional<ConjLhs,typename OtherMap::ConjugateReturnType,const OtherMap&>::type ConjRhsType;
+ for (Index i=0; i<size; ++i)
+ {
+ Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+(UpLo==Lower ? i : 0), (UpLo==Lower ? size-i : (i+1)))
+ += (alpha * cj(vec[i])) * ConjRhsType(OtherMap(vec+(UpLo==Lower ? i : 0),UpLo==Lower ? size-i : (i+1)));
+ }
+ }
+};
+
+template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
+struct selfadjoint_rank1_update<Scalar,Index,RowMajor,UpLo,ConjLhs,ConjRhs>
+{
+ static void run(Index size, Scalar* mat, Index stride, const Scalar* vec, Scalar alpha)
+ {
+ selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo==Lower?Upper:Lower,ConjRhs,ConjLhs>::run(size,mat,stride,vec,alpha);
+ }
+};
+
+template<typename MatrixType, typename OtherType, int UpLo, bool OtherIsVector = OtherType::IsVectorAtCompileTime>
+struct selfadjoint_product_selector;
+
+template<typename MatrixType, typename OtherType, int UpLo>
+struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
+{
+ static void run(MatrixType& mat, const OtherType& other, typename MatrixType::Scalar alpha)
+ {
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef internal::blas_traits<OtherType> OtherBlasTraits;
+ typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType;
+ typedef typename internal::remove_all<ActualOtherType>::type _ActualOtherType;
+ const ActualOtherType actualOther = OtherBlasTraits::extract(other.derived());
+
+ Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived());
+
+ enum {
+ StorageOrder = (internal::traits<MatrixType>::Flags&RowMajorBit) ? RowMajor : ColMajor,
+ UseOtherDirectly = _ActualOtherType::InnerStrideAtCompileTime==1
+ };
+ internal::gemv_static_vector_if<Scalar,OtherType::SizeAtCompileTime,OtherType::MaxSizeAtCompileTime,!UseOtherDirectly> static_other;
+
+ ei_declare_aligned_stack_constructed_variable(Scalar, actualOtherPtr, other.size(),
+ (UseOtherDirectly ? const_cast<Scalar*>(actualOther.data()) : static_other.data()));
+
+ if(!UseOtherDirectly)
+ Map<typename _ActualOtherType::PlainObject>(actualOtherPtr, actualOther.size()) = actualOther;
+
+ selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
+ OtherBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
+ (!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex>
+ ::run(other.size(), mat.data(), mat.outerStride(), actualOtherPtr, actualAlpha);
+ }
+};
+
+template<typename MatrixType, typename OtherType, int UpLo>
+struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
+{
+ static void run(MatrixType& mat, const OtherType& other, typename MatrixType::Scalar alpha)
+ {
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef internal::blas_traits<OtherType> OtherBlasTraits;
+ typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType;
+ typedef typename internal::remove_all<ActualOtherType>::type _ActualOtherType;
+ const ActualOtherType actualOther = OtherBlasTraits::extract(other.derived());
+
+ Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived());
+
+ enum { IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
+
+ internal::general_matrix_matrix_triangular_product<Index,
+ Scalar, _ActualOtherType::Flags&RowMajorBit ? RowMajor : ColMajor, OtherBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
+ Scalar, _ActualOtherType::Flags&RowMajorBit ? ColMajor : RowMajor, (!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex,
+ MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor, UpLo>
+ ::run(mat.cols(), actualOther.cols(),
+ &actualOther.coeffRef(0,0), actualOther.outerStride(), &actualOther.coeffRef(0,0), actualOther.outerStride(),
+ mat.data(), mat.outerStride(), actualAlpha);
+ }
+};
+
+// high level API
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU>
+SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
+::rankUpdate(const MatrixBase<DerivedU>& u, Scalar alpha)
+{
+ selfadjoint_product_selector<MatrixType,DerivedU,UpLo>::run(_expression().const_cast_derived(), u.derived(), alpha);
+
+ return *this;
+}
+
+#endif // EIGEN_SELFADJOINT_PRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h b/extern/Eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h
new file mode 100644
index 00000000000..9f8b8438a5d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/SelfadjointRank2Update.h
@@ -0,0 +1,104 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SELFADJOINTRANK2UPTADE_H
+#define EIGEN_SELFADJOINTRANK2UPTADE_H
+
+namespace internal {
+
+/* Optimized selfadjoint matrix += alpha * uv' + conj(alpha)*vu'
+ * It corresponds to the Level2 syr2 BLAS routine
+ */
+
+template<typename Scalar, typename Index, typename UType, typename VType, int UpLo>
+struct selfadjoint_rank2_update_selector;
+
+template<typename Scalar, typename Index, typename UType, typename VType>
+struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
+{
+ static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
+ {
+ const Index size = u.size();
+ for (Index i=0; i<size; ++i)
+ {
+ Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) +=
+ (conj(alpha) * conj(u.coeff(i))) * v.tail(size-i)
+ + (alpha * conj(v.coeff(i))) * u.tail(size-i);
+ }
+ }
+};
+
+template<typename Scalar, typename Index, typename UType, typename VType>
+struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Upper>
+{
+ static void run(Scalar* mat, Index stride, const UType& u, const VType& v, Scalar alpha)
+ {
+ const Index size = u.size();
+ for (Index i=0; i<size; ++i)
+ Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) +=
+ (conj(alpha) * conj(u.coeff(i))) * v.head(i+1)
+ + (alpha * conj(v.coeff(i))) * u.head(i+1);
+ }
+};
+
+template<bool Cond, typename T> struct conj_expr_if
+ : conditional<!Cond, const T&,
+ CwiseUnaryOp<scalar_conjugate_op<typename traits<T>::Scalar>,T> > {};
+
+} // end namespace internal
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU, typename DerivedV>
+SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
+::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, Scalar alpha)
+{
+ typedef internal::blas_traits<DerivedU> UBlasTraits;
+ typedef typename UBlasTraits::DirectLinearAccessType ActualUType;
+ typedef typename internal::remove_all<ActualUType>::type _ActualUType;
+ const ActualUType actualU = UBlasTraits::extract(u.derived());
+
+ typedef internal::blas_traits<DerivedV> VBlasTraits;
+ typedef typename VBlasTraits::DirectLinearAccessType ActualVType;
+ typedef typename internal::remove_all<ActualVType>::type _ActualVType;
+ const ActualVType actualV = VBlasTraits::extract(v.derived());
+
+ // If MatrixType is row major, then we use the routine for lower triangular in the upper triangular case and
+ // vice versa, and take the complex conjugate of all coefficients and vector entries.
+
+ enum { IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
+ Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived())
+ * internal::conj(VBlasTraits::extractScalarFactor(v.derived()));
+ if (IsRowMajor)
+ actualAlpha = internal::conj(actualAlpha);
+
+ internal::selfadjoint_rank2_update_selector<Scalar, Index,
+ typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::type>::type,
+ typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ VBlasTraits::NeedToConjugate,_ActualVType>::type>::type,
+ (IsRowMajor ? int(UpLo==Upper ? Lower : Upper) : UpLo)>
+ ::run(_expression().const_cast_derived().data(),_expression().outerStride(),actualU,actualV,actualAlpha);
+
+ return *this;
+}
+
+#endif // EIGEN_SELFADJOINTRANK2UPTADE_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h
new file mode 100644
index 00000000000..0c48d2efb75
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix.h
@@ -0,0 +1,403 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_H
+#define EIGEN_TRIANGULAR_MATRIX_MATRIX_H
+
+namespace internal {
+
+// template<typename Scalar, int mr, int StorageOrder, bool Conjugate, int Mode>
+// struct gemm_pack_lhs_triangular
+// {
+// Matrix<Scalar,mr,mr,
+// void operator()(Scalar* blockA, const EIGEN_RESTRICT Scalar* _lhs, int lhsStride, int depth, int rows)
+// {
+// conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+// const_blas_data_mapper<Scalar, StorageOrder> lhs(_lhs,lhsStride);
+// int count = 0;
+// const int peeled_mc = (rows/mr)*mr;
+// for(int i=0; i<peeled_mc; i+=mr)
+// {
+// for(int k=0; k<depth; k++)
+// for(int w=0; w<mr; w++)
+// blockA[count++] = cj(lhs(i+w, k));
+// }
+// for(int i=peeled_mc; i<rows; i++)
+// {
+// for(int k=0; k<depth; k++)
+// blockA[count++] = cj(lhs(i, k));
+// }
+// }
+// };
+
+/* Optimized triangular matrix * matrix (_TRMM++) product built on top of
+ * the general matrix matrix product.
+ */
+template <typename Scalar, typename Index,
+ int Mode, bool LhsIsTriangular,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs,
+ int ResStorageOrder>
+struct product_triangular_matrix_matrix;
+
+template <typename Scalar, typename Index,
+ int Mode, bool LhsIsTriangular,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs>
+struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
+ LhsStorageOrder,ConjugateLhs,
+ RhsStorageOrder,ConjugateRhs,RowMajor>
+{
+ static EIGEN_STRONG_INLINE void run(
+ Index rows, Index cols, Index depth,
+ const Scalar* lhs, Index lhsStride,
+ const Scalar* rhs, Index rhsStride,
+ Scalar* res, Index resStride,
+ Scalar alpha)
+ {
+ product_triangular_matrix_matrix<Scalar, Index,
+ (Mode&(UnitDiag|ZeroDiag)) | ((Mode&Upper) ? Lower : Upper),
+ (!LhsIsTriangular),
+ RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
+ ConjugateRhs,
+ LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
+ ConjugateLhs,
+ ColMajor>
+ ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha);
+ }
+};
+
+// implements col-major += alpha * op(triangular) * op(general)
+template <typename Scalar, typename Index, int Mode,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs>
+struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
+ LhsStorageOrder,ConjugateLhs,
+ RhsStorageOrder,ConjugateRhs,ColMajor>
+{
+
+ typedef gebp_traits<Scalar,Scalar> Traits;
+ enum {
+ SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+ IsLower = (Mode&Lower) == Lower,
+ SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
+ };
+
+ static EIGEN_DONT_INLINE void run(
+ Index _rows, Index _cols, Index _depth,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
+ Scalar alpha)
+ {
+ // strip zeros
+ Index diagSize = (std::min)(_rows,_depth);
+ Index rows = IsLower ? _rows : diagSize;
+ Index depth = IsLower ? diagSize : _depth;
+ Index cols = _cols;
+
+ const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+ const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
+
+ Index kc = depth; // cache block size along the K direction
+ Index mc = rows; // cache block size along the M direction
+ Index nc = cols; // cache block size along the N direction
+ computeProductBlockingSizes<Scalar,Scalar,4>(kc, mc, nc);
+ std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+ std::size_t sizeB = sizeW + kc*cols;
+ ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
+ ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
+ Scalar* blockB = allocatedBlockB + sizeW;
+
+ Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer;
+ triangularBuffer.setZero();
+ if((Mode&ZeroDiag)==ZeroDiag)
+ triangularBuffer.diagonal().setZero();
+ else
+ triangularBuffer.diagonal().setOnes();
+
+ gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
+ gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
+
+ for(Index k2=IsLower ? depth : 0;
+ IsLower ? k2>0 : k2<depth;
+ IsLower ? k2-=kc : k2+=kc)
+ {
+ Index actual_kc = (std::min)(IsLower ? k2 : depth-k2, kc);
+ Index actual_k2 = IsLower ? k2-actual_kc : k2;
+
+ // align blocks with the end of the triangular part for trapezoidal lhs
+ if((!IsLower)&&(k2<rows)&&(k2+actual_kc>rows))
+ {
+ actual_kc = rows-k2;
+ k2 = k2+actual_kc-kc;
+ }
+
+ pack_rhs(blockB, &rhs(actual_k2,0), rhsStride, actual_kc, cols);
+
+ // the selected lhs's panel has to be split in three different parts:
+ // 1 - the part which is zero => skip it
+ // 2 - the diagonal block => special kernel
+ // 3 - the dense panel below (lower case) or above (upper case) the diagonal block => GEPP
+
+ // the block diagonal, if any:
+ if(IsLower || actual_k2<rows)
+ {
+ // for each small vertical panels of lhs
+ for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)
+ {
+ Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);
+ Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1;
+ Index startBlock = actual_k2+k1;
+ Index blockBOffset = k1;
+
+ // => GEBP with the micro triangular block
+ // The trick is to pack this micro block while filling the opposite triangular part with zeros.
+ // To this end we do an extra triangular copy to a small temporary buffer
+ for (Index k=0;k<actualPanelWidth;++k)
+ {
+ if (SetDiag)
+ triangularBuffer.coeffRef(k,k) = lhs(startBlock+k,startBlock+k);
+ for (Index i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i)
+ triangularBuffer.coeffRef(i,k) = lhs(startBlock+i,startBlock+k);
+ }
+ pack_lhs(blockA, triangularBuffer.data(), triangularBuffer.outerStride(), actualPanelWidth, actualPanelWidth);
+
+ gebp_kernel(res+startBlock, resStride, blockA, blockB, actualPanelWidth, actualPanelWidth, cols, alpha,
+ actualPanelWidth, actual_kc, 0, blockBOffset);
+
+ // GEBP with remaining micro panel
+ if (lengthTarget>0)
+ {
+ Index startTarget = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2;
+
+ pack_lhs(blockA, &lhs(startTarget,startBlock), lhsStride, actualPanelWidth, lengthTarget);
+
+ gebp_kernel(res+startTarget, resStride, blockA, blockB, lengthTarget, actualPanelWidth, cols, alpha,
+ actualPanelWidth, actual_kc, 0, blockBOffset);
+ }
+ }
+ }
+ // the part below (lower case) or above (upper case) the diagonal => GEPP
+ {
+ Index start = IsLower ? k2 : 0;
+ Index end = IsLower ? rows : (std::min)(actual_k2,rows);
+ for(Index i2=start; i2<end; i2+=mc)
+ {
+ const Index actual_mc = (std::min)(i2+mc,end)-i2;
+ gemm_pack_lhs<Scalar, Index, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
+ (blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
+
+ gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
+ }
+ }
+ }
+ }
+};
+
+// implements col-major += alpha * op(general) * op(triangular)
+template <typename Scalar, typename Index, int Mode,
+ int LhsStorageOrder, bool ConjugateLhs,
+ int RhsStorageOrder, bool ConjugateRhs>
+struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
+ LhsStorageOrder,ConjugateLhs,
+ RhsStorageOrder,ConjugateRhs,ColMajor>
+{
+ typedef gebp_traits<Scalar,Scalar> Traits;
+ enum {
+ SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+ IsLower = (Mode&Lower) == Lower,
+ SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
+ };
+
+ static EIGEN_DONT_INLINE void run(
+ Index _rows, Index _cols, Index _depth,
+ const Scalar* _lhs, Index lhsStride,
+ const Scalar* _rhs, Index rhsStride,
+ Scalar* res, Index resStride,
+ Scalar alpha)
+ {
+ // strip zeros
+ Index diagSize = (std::min)(_cols,_depth);
+ Index rows = _rows;
+ Index depth = IsLower ? _depth : diagSize;
+ Index cols = IsLower ? diagSize : _cols;
+
+ const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+ const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
+
+ Index kc = depth; // cache block size along the K direction
+ Index mc = rows; // cache block size along the M direction
+ Index nc = cols; // cache block size along the N direction
+ computeProductBlockingSizes<Scalar,Scalar,4>(kc, mc, nc);
+
+ std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+ std::size_t sizeB = sizeW + kc*cols;
+ ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
+ ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
+ Scalar* blockB = allocatedBlockB + sizeW;
+
+ Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer;
+ triangularBuffer.setZero();
+ if((Mode&ZeroDiag)==ZeroDiag)
+ triangularBuffer.diagonal().setZero();
+ else
+ triangularBuffer.diagonal().setOnes();
+
+ gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
+ gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+ gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
+ gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder,false,true> pack_rhs_panel;
+
+ for(Index k2=IsLower ? 0 : depth;
+ IsLower ? k2<depth : k2>0;
+ IsLower ? k2+=kc : k2-=kc)
+ {
+ Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc);
+ Index actual_k2 = IsLower ? k2 : k2-actual_kc;
+
+ // align blocks with the end of the triangular part for trapezoidal rhs
+ if(IsLower && (k2<cols) && (actual_k2+actual_kc>cols))
+ {
+ actual_kc = cols-k2;
+ k2 = actual_k2 + actual_kc - kc;
+ }
+
+ // remaining size
+ Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2;
+ // size of the triangular part
+ Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;
+
+ Scalar* geb = blockB+ts*ts;
+
+ pack_rhs(geb, &rhs(actual_k2,IsLower ? 0 : k2), rhsStride, actual_kc, rs);
+
+ // pack the triangular part of the rhs padding the unrolled blocks with zeros
+ if(ts>0)
+ {
+ for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
+ {
+ Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+ Index actual_j2 = actual_k2 + j2;
+ Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
+ Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
+ // general part
+ pack_rhs_panel(blockB+j2*actual_kc,
+ &rhs(actual_k2+panelOffset, actual_j2), rhsStride,
+ panelLength, actualPanelWidth,
+ actual_kc, panelOffset);
+
+ // append the triangular part via a temporary buffer
+ for (Index j=0;j<actualPanelWidth;++j)
+ {
+ if (SetDiag)
+ triangularBuffer.coeffRef(j,j) = rhs(actual_j2+j,actual_j2+j);
+ for (Index k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k)
+ triangularBuffer.coeffRef(k,j) = rhs(actual_j2+k,actual_j2+j);
+ }
+
+ pack_rhs_panel(blockB+j2*actual_kc,
+ triangularBuffer.data(), triangularBuffer.outerStride(),
+ actualPanelWidth, actualPanelWidth,
+ actual_kc, j2);
+ }
+ }
+
+ for (Index i2=0; i2<rows; i2+=mc)
+ {
+ const Index actual_mc = (std::min)(mc,rows-i2);
+ pack_lhs(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
+
+ // triangular kernel
+ if(ts>0)
+ {
+ for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
+ {
+ Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+ Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth;
+ Index blockOffset = IsLower ? j2 : 0;
+
+ gebp_kernel(res+i2+(actual_k2+j2)*resStride, resStride,
+ blockA, blockB+j2*actual_kc,
+ actual_mc, panelLength, actualPanelWidth,
+ alpha,
+ actual_kc, actual_kc, // strides
+ blockOffset, blockOffset,// offsets
+ allocatedBlockB); // workspace
+ }
+ }
+ gebp_kernel(res+i2+(IsLower ? 0 : k2)*resStride, resStride,
+ blockA, geb, actual_mc, actual_kc, rs,
+ alpha,
+ -1, -1, 0, 0, allocatedBlockB);
+ }
+ }
+ }
+};
+
+/***************************************************************************
+* Wrapper to product_triangular_matrix_matrix
+***************************************************************************/
+
+template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
+struct traits<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false> >
+ : traits<ProductBase<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>, Lhs, Rhs> >
+{};
+
+} // end namespace internal
+
+template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
+struct TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
+ : public ProductBase<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>, Lhs, Rhs >
+{
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(TriangularProduct)
+
+ TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ {
+ const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs);
+ const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs);
+
+ Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
+ * RhsBlasTraits::extractScalarFactor(m_rhs);
+
+ internal::product_triangular_matrix_matrix<Scalar, Index,
+ Mode, LhsIsTriangular,
+ (internal::traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
+ (internal::traits<_ActualRhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
+ (internal::traits<Dest >::Flags&RowMajorBit) ? RowMajor : ColMajor>
+ ::run(
+ lhs.rows(), rhs.cols(), lhs.cols(),// LhsIsTriangular ? rhs.cols() : lhs.rows(), // sizes
+ &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
+ &rhs.coeffRef(0,0), rhs.outerStride(), // rhs info
+ &dst.coeffRef(0,0), dst.outerStride(), // result info
+ actualAlpha // alpha
+ );
+ }
+};
+
+
+#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector.h b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector.h
new file mode 100644
index 00000000000..71b4a52ab80
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixVector.h
@@ -0,0 +1,325 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRIANGULARMATRIXVECTOR_H
+#define EIGEN_TRIANGULARMATRIXVECTOR_H
+
+namespace internal {
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder>
+struct product_triangular_matrix_vector;
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs>
+struct product_triangular_matrix_vector<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor>
+{
+ typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+ enum {
+ IsLower = ((Mode&Lower)==Lower),
+ HasUnitDiag = (Mode & UnitDiag)==UnitDiag
+ };
+ static EIGEN_DONT_INLINE void run(Index rows, Index cols, const LhsScalar* _lhs, Index lhsStride,
+ const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha)
+ {
+ static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+
+ typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap;
+ const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride));
+ typename conj_expr_if<ConjLhs,LhsMap>::type cjLhs(lhs);
+
+ typedef Map<const Matrix<RhsScalar,Dynamic,1>, 0, InnerStride<> > RhsMap;
+ const RhsMap rhs(_rhs,cols,InnerStride<>(rhsIncr));
+ typename conj_expr_if<ConjRhs,RhsMap>::type cjRhs(rhs);
+
+ typedef Map<Matrix<ResScalar,Dynamic,1> > ResMap;
+ ResMap res(_res,rows);
+
+ for (Index pi=0; pi<cols; pi+=PanelWidth)
+ {
+ Index actualPanelWidth = (std::min)(PanelWidth, cols-pi);
+ for (Index k=0; k<actualPanelWidth; ++k)
+ {
+ Index i = pi + k;
+ Index s = IsLower ? (HasUnitDiag ? i+1 : i ) : pi;
+ Index r = IsLower ? actualPanelWidth-k : k+1;
+ if ((!HasUnitDiag) || (--r)>0)
+ res.segment(s,r) += (alpha * cjRhs.coeff(i)) * cjLhs.col(i).segment(s,r);
+ if (HasUnitDiag)
+ res.coeffRef(i) += alpha * cjRhs.coeff(i);
+ }
+ Index r = IsLower ? cols - pi - actualPanelWidth : pi;
+ if (r>0)
+ {
+ Index s = IsLower ? pi+actualPanelWidth : 0;
+ general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjLhs,RhsScalar,ConjRhs>::run(
+ r, actualPanelWidth,
+ &lhs.coeffRef(s,pi), lhsStride,
+ &rhs.coeffRef(pi), rhsIncr,
+ &res.coeffRef(s), resIncr, alpha);
+ }
+ }
+ }
+};
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs>
+struct product_triangular_matrix_vector<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor>
+{
+ typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+ enum {
+ IsLower = ((Mode&Lower)==Lower),
+ HasUnitDiag = (Mode & UnitDiag)==UnitDiag
+ };
+ static void run(Index rows, Index cols, const LhsScalar* _lhs, Index lhsStride,
+ const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, ResScalar alpha)
+ {
+ static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+
+ typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap;
+ const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride));
+ typename conj_expr_if<ConjLhs,LhsMap>::type cjLhs(lhs);
+
+ typedef Map<const Matrix<RhsScalar,Dynamic,1> > RhsMap;
+ const RhsMap rhs(_rhs,cols);
+ typename conj_expr_if<ConjRhs,RhsMap>::type cjRhs(rhs);
+
+ typedef Map<Matrix<ResScalar,Dynamic,1>, 0, InnerStride<> > ResMap;
+ ResMap res(_res,rows,InnerStride<>(resIncr));
+
+ for (Index pi=0; pi<cols; pi+=PanelWidth)
+ {
+ Index actualPanelWidth = (std::min)(PanelWidth, cols-pi);
+ for (Index k=0; k<actualPanelWidth; ++k)
+ {
+ Index i = pi + k;
+ Index s = IsLower ? pi : (HasUnitDiag ? i+1 : i);
+ Index r = IsLower ? k+1 : actualPanelWidth-k;
+ if ((!HasUnitDiag) || (--r)>0)
+ res.coeffRef(i) += alpha * (cjLhs.row(i).segment(s,r).cwiseProduct(cjRhs.segment(s,r).transpose())).sum();
+ if (HasUnitDiag)
+ res.coeffRef(i) += alpha * cjRhs.coeff(i);
+ }
+ Index r = IsLower ? pi : cols - pi - actualPanelWidth;
+ if (r>0)
+ {
+ Index s = IsLower ? 0 : pi + actualPanelWidth;
+ general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjLhs,RhsScalar,ConjRhs>::run(
+ actualPanelWidth, r,
+ &lhs.coeffRef(pi,s), lhsStride,
+ &rhs.coeffRef(s), rhsIncr,
+ &res.coeffRef(pi), resIncr, alpha);
+ }
+ }
+ }
+};
+
+/***************************************************************************
+* Wrapper to product_triangular_vector
+***************************************************************************/
+
+template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
+struct traits<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,true> >
+ : traits<ProductBase<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,true>, Lhs, Rhs> >
+{};
+
+template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
+struct traits<TriangularProduct<Mode,LhsIsTriangular,Lhs,true,Rhs,false> >
+ : traits<ProductBase<TriangularProduct<Mode,LhsIsTriangular,Lhs,true,Rhs,false>, Lhs, Rhs> >
+{};
+
+
+template<int StorageOrder>
+struct trmv_selector;
+
+} // end namespace internal
+
+template<int Mode, typename Lhs, typename Rhs>
+struct TriangularProduct<Mode,true,Lhs,false,Rhs,true>
+ : public ProductBase<TriangularProduct<Mode,true,Lhs,false,Rhs,true>, Lhs, Rhs >
+{
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(TriangularProduct)
+
+ TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ {
+ eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
+
+ internal::trmv_selector<(int(internal::traits<Lhs>::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dst, alpha);
+ }
+};
+
+template<int Mode, typename Lhs, typename Rhs>
+struct TriangularProduct<Mode,false,Lhs,true,Rhs,false>
+ : public ProductBase<TriangularProduct<Mode,false,Lhs,true,Rhs,false>, Lhs, Rhs >
+{
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(TriangularProduct)
+
+ TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dst, Scalar alpha) const
+ {
+ eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
+
+ typedef TriangularProduct<(Mode & UnitDiag) | ((Mode & Lower) ? Upper : Lower),true,Transpose<const Rhs>,false,Transpose<const Lhs>,true> TriangularProductTranspose;
+ Transpose<Dest> dstT(dst);
+ internal::trmv_selector<(int(internal::traits<Rhs>::Flags)&RowMajorBit) ? ColMajor : RowMajor>::run(
+ TriangularProductTranspose(m_rhs.transpose(),m_lhs.transpose()), dstT, alpha);
+ }
+};
+
+namespace internal {
+
+// TODO: find a way to factorize this piece of code with gemv_selector since the logic is exactly the same.
+
+template<> struct trmv_selector<ColMajor>
+{
+ template<int Mode, typename Lhs, typename Rhs, typename Dest>
+ static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar alpha)
+ {
+ typedef TriangularProduct<Mode,true,Lhs,false,Rhs,true> ProductType;
+ typedef typename ProductType::Index Index;
+ typedef typename ProductType::LhsScalar LhsScalar;
+ typedef typename ProductType::RhsScalar RhsScalar;
+ typedef typename ProductType::Scalar ResScalar;
+ typedef typename ProductType::RealScalar RealScalar;
+ typedef typename ProductType::ActualLhsType ActualLhsType;
+ typedef typename ProductType::ActualRhsType ActualRhsType;
+ typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+ typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+ typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
+
+ const ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs());
+ const ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+ ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+ * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+ enum {
+ // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
+ // on, the other hand it is good for the cache to pack the vector anyways...
+ EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
+ ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
+ MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
+ };
+
+ gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
+
+ bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0));
+ bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
+
+ RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
+
+ ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
+ evalToDest ? dest.data() : static_dest.data());
+
+ if(!evalToDest)
+ {
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ int size = dest.size();
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ #endif
+ if(!alphaIsCompatible)
+ {
+ MappedDest(actualDestPtr, dest.size()).setZero();
+ compatibleAlpha = RhsScalar(1);
+ }
+ else
+ MappedDest(actualDestPtr, dest.size()) = dest;
+ }
+
+ internal::product_triangular_matrix_vector
+ <Index,Mode,
+ LhsScalar, LhsBlasTraits::NeedToConjugate,
+ RhsScalar, RhsBlasTraits::NeedToConjugate,
+ ColMajor>
+ ::run(actualLhs.rows(),actualLhs.cols(),
+ actualLhs.data(),actualLhs.outerStride(),
+ actualRhs.data(),actualRhs.innerStride(),
+ actualDestPtr,1,compatibleAlpha);
+
+ if (!evalToDest)
+ {
+ if(!alphaIsCompatible)
+ dest += actualAlpha * MappedDest(actualDestPtr, dest.size());
+ else
+ dest = MappedDest(actualDestPtr, dest.size());
+ }
+ }
+};
+
+template<> struct trmv_selector<RowMajor>
+{
+ template<int Mode, typename Lhs, typename Rhs, typename Dest>
+ static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar alpha)
+ {
+ typedef TriangularProduct<Mode,true,Lhs,false,Rhs,true> ProductType;
+ typedef typename ProductType::LhsScalar LhsScalar;
+ typedef typename ProductType::RhsScalar RhsScalar;
+ typedef typename ProductType::Scalar ResScalar;
+ typedef typename ProductType::Index Index;
+ typedef typename ProductType::ActualLhsType ActualLhsType;
+ typedef typename ProductType::ActualRhsType ActualRhsType;
+ typedef typename ProductType::_ActualRhsType _ActualRhsType;
+ typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+ typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+
+ typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+ typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+ ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+ * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+ enum {
+ DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1
+ };
+
+ gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
+
+ ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),
+ DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
+
+ if(!DirectlyUseRhs)
+ {
+ #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ int size = actualRhs.size();
+ EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+ #endif
+ Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
+ }
+
+ internal::product_triangular_matrix_vector
+ <Index,Mode,
+ LhsScalar, LhsBlasTraits::NeedToConjugate,
+ RhsScalar, RhsBlasTraits::NeedToConjugate,
+ RowMajor>
+ ::run(actualLhs.rows(),actualLhs.cols(),
+ actualLhs.data(),actualLhs.outerStride(),
+ actualRhsPtr,1,
+ dest.data(),dest.innerStride(),
+ actualAlpha);
+ }
+};
+
+} // end namespace internal
+
+#endif // EIGEN_TRIANGULARMATRIXVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
new file mode 100644
index 00000000000..4dced6b0eb9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
@@ -0,0 +1,319 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_H
+#define EIGEN_TRIANGULAR_SOLVER_MATRIX_H
+
+namespace internal {
+
+// if the rhs is row major, let's transpose the product
+template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder>
+struct triangular_solve_matrix<Scalar,Index,Side,Mode,Conjugate,TriStorageOrder,RowMajor>
+{
+ static EIGEN_DONT_INLINE void run(
+ Index size, Index cols,
+ const Scalar* tri, Index triStride,
+ Scalar* _other, Index otherStride)
+ {
+ triangular_solve_matrix<
+ Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft,
+ (Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper),
+ NumTraits<Scalar>::IsComplex && Conjugate,
+ TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor>
+ ::run(size, cols, tri, triStride, _other, otherStride);
+ }
+};
+
+/* Optimized triangular solver with multiple right hand side and the triangular matrix on the left
+ */
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>
+{
+ static EIGEN_DONT_INLINE void run(
+ Index size, Index otherSize,
+ const Scalar* _tri, Index triStride,
+ Scalar* _other, Index otherStride)
+ {
+ Index cols = otherSize;
+ const_blas_data_mapper<Scalar, Index, TriStorageOrder> tri(_tri,triStride);
+ blas_data_mapper<Scalar, Index, ColMajor> other(_other,otherStride);
+
+ typedef gebp_traits<Scalar,Scalar> Traits;
+ enum {
+ SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+ IsLower = (Mode&Lower) == Lower
+ };
+
+ Index kc = size; // cache block size along the K direction
+ Index mc = size; // cache block size along the M direction
+ Index nc = cols; // cache block size along the N direction
+ computeProductBlockingSizes<Scalar,Scalar,4>(kc, mc, nc);
+
+ std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+ std::size_t sizeB = sizeW + kc*cols;
+ ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
+ ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
+ Scalar* blockB = allocatedBlockB + sizeW;
+
+ conj_if<Conjugate> conj;
+ gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, Conjugate, false> gebp_kernel;
+ gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, TriStorageOrder> pack_lhs;
+ gemm_pack_rhs<Scalar, Index, Traits::nr, ColMajor, false, true> pack_rhs;
+
+ for(Index k2=IsLower ? 0 : size;
+ IsLower ? k2<size : k2>0;
+ IsLower ? k2+=kc : k2-=kc)
+ {
+ const Index actual_kc = (std::min)(IsLower ? size-k2 : k2, kc);
+
+ // We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,
+ // and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into
+ // A11 (the triangular part) and A21 the remaining rectangular part.
+ // Then the high level algorithm is:
+ // - B = R1 => general block copy (done during the next step)
+ // - R1 = L1^-1 B => tricky part
+ // - update B from the new R1 => actually this has to be performed continuously during the above step
+ // - R2 = L2 * B => GEPP
+
+ // The tricky part: compute R1 = L1^-1 B while updating B from R1
+ // The idea is to split L1 into multiple small vertical panels.
+ // Each panel can be split into a small triangular part A1 which is processed without optimization,
+ // and the remaining small part A2 which is processed using gebp with appropriate block strides
+ {
+ // for each small vertical panels of lhs
+ for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)
+ {
+ Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);
+ // tr solve
+ for (Index k=0; k<actualPanelWidth; ++k)
+ {
+ // TODO write a small kernel handling this (can be shared with trsv)
+ Index i = IsLower ? k2+k1+k : k2-k1-k-1;
+ Index s = IsLower ? k2+k1 : i+1;
+ Index rs = actualPanelWidth - k - 1; // remaining size
+
+ Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(tri(i,i));
+ for (Index j=0; j<cols; ++j)
+ {
+ if (TriStorageOrder==RowMajor)
+ {
+ Scalar b = 0;
+ const Scalar* l = &tri(i,s);
+ Scalar* r = &other(s,j);
+ for (Index i3=0; i3<k; ++i3)
+ b += conj(l[i3]) * r[i3];
+
+ other(i,j) = (other(i,j) - b)*a;
+ }
+ else
+ {
+ Index s = IsLower ? i+1 : i-rs;
+ Scalar b = (other(i,j) *= a);
+ Scalar* r = &other(s,j);
+ const Scalar* l = &tri(s,i);
+ for (Index i3=0;i3<rs;++i3)
+ r[i3] -= b * conj(l[i3]);
+ }
+ }
+ }
+
+ Index lengthTarget = actual_kc-k1-actualPanelWidth;
+ Index startBlock = IsLower ? k2+k1 : k2-k1-actualPanelWidth;
+ Index blockBOffset = IsLower ? k1 : lengthTarget;
+
+ // update the respective rows of B from other
+ pack_rhs(blockB, _other+startBlock, otherStride, actualPanelWidth, cols, actual_kc, blockBOffset);
+
+ // GEBP
+ if (lengthTarget>0)
+ {
+ Index startTarget = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc;
+
+ pack_lhs(blockA, &tri(startTarget,startBlock), triStride, actualPanelWidth, lengthTarget);
+
+ gebp_kernel(_other+startTarget, otherStride, blockA, blockB, lengthTarget, actualPanelWidth, cols, Scalar(-1),
+ actualPanelWidth, actual_kc, 0, blockBOffset);
+ }
+ }
+ }
+
+ // R2 = A2 * B => GEPP
+ {
+ Index start = IsLower ? k2+kc : 0;
+ Index end = IsLower ? size : k2-kc;
+ for(Index i2=start; i2<end; i2+=mc)
+ {
+ const Index actual_mc = (std::min)(mc,end-i2);
+ if (actual_mc>0)
+ {
+ pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc);
+
+ gebp_kernel(_other+i2, otherStride, blockA, blockB, actual_mc, actual_kc, cols, Scalar(-1));
+ }
+ }
+ }
+ }
+ }
+};
+
+/* Optimized triangular solver with multiple left hand sides and the trinagular matrix on the right
+ */
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>
+{
+ static EIGEN_DONT_INLINE void run(
+ Index size, Index otherSize,
+ const Scalar* _tri, Index triStride,
+ Scalar* _other, Index otherStride)
+ {
+ Index rows = otherSize;
+ const_blas_data_mapper<Scalar, Index, TriStorageOrder> rhs(_tri,triStride);
+ blas_data_mapper<Scalar, Index, ColMajor> lhs(_other,otherStride);
+
+ typedef gebp_traits<Scalar,Scalar> Traits;
+ enum {
+ RhsStorageOrder = TriStorageOrder,
+ SmallPanelWidth = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+ IsLower = (Mode&Lower) == Lower
+ };
+
+// Index kc = std::min<Index>(Traits::Max_kc/4,size); // cache block size along the K direction
+// Index mc = std::min<Index>(Traits::Max_mc,size); // cache block size along the M direction
+ // check that !!!!
+ Index kc = size; // cache block size along the K direction
+ Index mc = size; // cache block size along the M direction
+ Index nc = rows; // cache block size along the N direction
+ computeProductBlockingSizes<Scalar,Scalar,4>(kc, mc, nc);
+
+ std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+ std::size_t sizeB = sizeW + kc*size;
+ ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
+ ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
+ Scalar* blockB = allocatedBlockB + sizeW;
+
+ conj_if<Conjugate> conj;
+ gebp_kernel<Scalar,Scalar, Index, Traits::mr, Traits::nr, false, Conjugate> gebp_kernel;
+ gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
+ gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder,false,true> pack_rhs_panel;
+ gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, ColMajor, false, true> pack_lhs_panel;
+
+ for(Index k2=IsLower ? size : 0;
+ IsLower ? k2>0 : k2<size;
+ IsLower ? k2-=kc : k2+=kc)
+ {
+ const Index actual_kc = (std::min)(IsLower ? k2 : size-k2, kc);
+ Index actual_k2 = IsLower ? k2-actual_kc : k2 ;
+
+ Index startPanel = IsLower ? 0 : k2+actual_kc;
+ Index rs = IsLower ? actual_k2 : size - actual_k2 - actual_kc;
+ Scalar* geb = blockB+actual_kc*actual_kc;
+
+ if (rs>0) pack_rhs(geb, &rhs(actual_k2,startPanel), triStride, actual_kc, rs);
+
+ // triangular packing (we only pack the panels off the diagonal,
+ // neglecting the blocks overlapping the diagonal
+ {
+ for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
+ {
+ Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+ Index actual_j2 = actual_k2 + j2;
+ Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
+ Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
+
+ if (panelLength>0)
+ pack_rhs_panel(blockB+j2*actual_kc,
+ &rhs(actual_k2+panelOffset, actual_j2), triStride,
+ panelLength, actualPanelWidth,
+ actual_kc, panelOffset);
+ }
+ }
+
+ for(Index i2=0; i2<rows; i2+=mc)
+ {
+ const Index actual_mc = (std::min)(mc,rows-i2);
+
+ // triangular solver kernel
+ {
+ // for each small block of the diagonal (=> vertical panels of rhs)
+ for (Index j2 = IsLower
+ ? (actual_kc - ((actual_kc%SmallPanelWidth) ? Index(actual_kc%SmallPanelWidth)
+ : Index(SmallPanelWidth)))
+ : 0;
+ IsLower ? j2>=0 : j2<actual_kc;
+ IsLower ? j2-=SmallPanelWidth : j2+=SmallPanelWidth)
+ {
+ Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+ Index absolute_j2 = actual_k2 + j2;
+ Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
+ Index panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2;
+
+ // GEBP
+ if(panelLength>0)
+ {
+ gebp_kernel(&lhs(i2,absolute_j2), otherStride,
+ blockA, blockB+j2*actual_kc,
+ actual_mc, panelLength, actualPanelWidth,
+ Scalar(-1),
+ actual_kc, actual_kc, // strides
+ panelOffset, panelOffset, // offsets
+ allocatedBlockB); // workspace
+ }
+
+ // unblocked triangular solve
+ for (Index k=0; k<actualPanelWidth; ++k)
+ {
+ Index j = IsLower ? absolute_j2+actualPanelWidth-k-1 : absolute_j2+k;
+
+ Scalar* r = &lhs(i2,j);
+ for (Index k3=0; k3<k; ++k3)
+ {
+ Scalar b = conj(rhs(IsLower ? j+1+k3 : absolute_j2+k3,j));
+ Scalar* a = &lhs(i2,IsLower ? j+1+k3 : absolute_j2+k3);
+ for (Index i=0; i<actual_mc; ++i)
+ r[i] -= a[i] * b;
+ }
+ Scalar b = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(rhs(j,j));
+ for (Index i=0; i<actual_mc; ++i)
+ r[i] *= b;
+ }
+
+ // pack the just computed part of lhs to A
+ pack_lhs_panel(blockA, _other+absolute_j2*otherStride+i2, otherStride,
+ actualPanelWidth, actual_mc,
+ actual_kc, j2);
+ }
+ }
+
+ if (rs>0)
+ gebp_kernel(_other+i2+startPanel*otherStride, otherStride, blockA, geb,
+ actual_mc, actual_kc, rs, Scalar(-1),
+ -1, -1, 0, 0, allocatedBlockB);
+ }
+ }
+ }
+};
+
+} // end namespace internal
+
+#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularSolverVector.h b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverVector.h
new file mode 100644
index 00000000000..639d4a5b476
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverVector.h
@@ -0,0 +1,150 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRIANGULAR_SOLVER_VECTOR_H
+#define EIGEN_TRIANGULAR_SOLVER_VECTOR_H
+
+namespace internal {
+
+template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate, int StorageOrder>
+struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheRight, Mode, Conjugate, StorageOrder>
+{
+ static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)
+ {
+ triangular_solve_vector<LhsScalar,RhsScalar,Index,OnTheLeft,
+ ((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag),
+ Conjugate,StorageOrder==RowMajor?ColMajor:RowMajor
+ >::run(size, _lhs, lhsStride, rhs);
+ }
+};
+
+// forward and backward substitution, row-major, rhs is a vector
+template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate>
+struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, RowMajor>
+{
+ enum {
+ IsLower = ((Mode&Lower)==Lower)
+ };
+ static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)
+ {
+ typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap;
+ const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride));
+ typename internal::conditional<
+ Conjugate,
+ const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>,
+ const LhsMap&>
+ ::type cjLhs(lhs);
+ static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+ for(Index pi=IsLower ? 0 : size;
+ IsLower ? pi<size : pi>0;
+ IsLower ? pi+=PanelWidth : pi-=PanelWidth)
+ {
+ Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
+
+ Index r = IsLower ? pi : size - pi; // remaining size
+ if (r > 0)
+ {
+ // let's directly call the low level product function because:
+ // 1 - it is faster to compile
+ // 2 - it is slighlty faster at runtime
+ Index startRow = IsLower ? pi : pi-actualPanelWidth;
+ Index startCol = IsLower ? 0 : pi;
+
+ general_matrix_vector_product<Index,LhsScalar,RowMajor,Conjugate,RhsScalar,false>::run(
+ actualPanelWidth, r,
+ &lhs.coeffRef(startRow,startCol), lhsStride,
+ rhs + startCol, 1,
+ rhs + startRow, 1,
+ RhsScalar(-1));
+ }
+
+ for(Index k=0; k<actualPanelWidth; ++k)
+ {
+ Index i = IsLower ? pi+k : pi-k-1;
+ Index s = IsLower ? pi : i+1;
+ if (k>0)
+ rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map<const Matrix<RhsScalar,Dynamic,1> >(rhs+s,k))).sum();
+
+ if(!(Mode & UnitDiag))
+ rhs[i] /= cjLhs(i,i);
+ }
+ }
+ }
+};
+
+// forward and backward substitution, column-major, rhs is a vector
+template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate>
+struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, ColMajor>
+{
+ enum {
+ IsLower = ((Mode&Lower)==Lower)
+ };
+ static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)
+ {
+ typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap;
+ const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride));
+ typename internal::conditional<Conjugate,
+ const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>,
+ const LhsMap&
+ >::type cjLhs(lhs);
+ static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+
+ for(Index pi=IsLower ? 0 : size;
+ IsLower ? pi<size : pi>0;
+ IsLower ? pi+=PanelWidth : pi-=PanelWidth)
+ {
+ Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
+ Index startBlock = IsLower ? pi : pi-actualPanelWidth;
+ Index endBlock = IsLower ? pi + actualPanelWidth : 0;
+
+ for(Index k=0; k<actualPanelWidth; ++k)
+ {
+ Index i = IsLower ? pi+k : pi-k-1;
+ if(!(Mode & UnitDiag))
+ rhs[i] /= cjLhs.coeff(i,i);
+
+ Index r = actualPanelWidth - k - 1; // remaining size
+ Index s = IsLower ? i+1 : i-r;
+ if (r>0)
+ Map<Matrix<RhsScalar,Dynamic,1> >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r);
+ }
+ Index r = IsLower ? size - endBlock : startBlock; // remaining size
+ if (r > 0)
+ {
+ // let's directly call the low level product function because:
+ // 1 - it is faster to compile
+ // 2 - it is slighlty faster at runtime
+ general_matrix_vector_product<Index,LhsScalar,ColMajor,Conjugate,RhsScalar,false>::run(
+ r, actualPanelWidth,
+ &lhs.coeffRef(endBlock,startBlock), lhsStride,
+ rhs+startBlock, 1,
+ rhs+endBlock, 1, RhsScalar(-1));
+ }
+ }
+ }
+};
+
+} // end namespace internal
+
+#endif // EIGEN_TRIANGULAR_SOLVER_VECTOR_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/BlasUtil.h b/extern/Eigen3/Eigen/src/Core/util/BlasUtil.h
new file mode 100644
index 00000000000..f1d93d2f8b9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/BlasUtil.h
@@ -0,0 +1,271 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BLASUTIL_H
+#define EIGEN_BLASUTIL_H
+
+// This file contains many lightweight helper classes used to
+// implement and control fast level 2 and level 3 BLAS-like routines.
+
+namespace internal {
+
+// forward declarations
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs=false, bool ConjugateRhs=false>
+struct gebp_kernel;
+
+template<typename Scalar, typename Index, int nr, int StorageOrder, bool Conjugate = false, bool PanelMode=false>
+struct gemm_pack_rhs;
+
+template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
+struct gemm_pack_lhs;
+
+template<
+ typename Index,
+ typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+ typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
+ int ResStorageOrder>
+struct general_matrix_matrix_product;
+
+template<typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
+struct general_matrix_vector_product;
+
+
+template<bool Conjugate> struct conj_if;
+
+template<> struct conj_if<true> {
+ template<typename T>
+ inline T operator()(const T& x) { return conj(x); }
+};
+
+template<> struct conj_if<false> {
+ template<typename T>
+ inline const T& operator()(const T& x) { return x; }
+};
+
+template<typename Scalar> struct conj_helper<Scalar,Scalar,false,false>
+{
+ EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return internal::pmadd(x,y,c); }
+ EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const { return internal::pmul(x,y); }
+};
+
+template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, false,true>
+{
+ typedef std::complex<RealScalar> Scalar;
+ EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
+ { return c + pmul(x,y); }
+
+ EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
+ { return Scalar(real(x)*real(y) + imag(x)*imag(y), imag(x)*real(y) - real(x)*imag(y)); }
+};
+
+template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,false>
+{
+ typedef std::complex<RealScalar> Scalar;
+ EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
+ { return c + pmul(x,y); }
+
+ EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
+ { return Scalar(real(x)*real(y) + imag(x)*imag(y), real(x)*imag(y) - imag(x)*real(y)); }
+};
+
+template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,true>
+{
+ typedef std::complex<RealScalar> Scalar;
+ EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
+ { return c + pmul(x,y); }
+
+ EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
+ { return Scalar(real(x)*real(y) - imag(x)*imag(y), - real(x)*imag(y) - imag(x)*real(y)); }
+};
+
+template<typename RealScalar,bool Conj> struct conj_helper<std::complex<RealScalar>, RealScalar, Conj,false>
+{
+ typedef std::complex<RealScalar> Scalar;
+ EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const RealScalar& y, const Scalar& c) const
+ { return padd(c, pmul(x,y)); }
+ EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const RealScalar& y) const
+ { return conj_if<Conj>()(x)*y; }
+};
+
+template<typename RealScalar,bool Conj> struct conj_helper<RealScalar, std::complex<RealScalar>, false,Conj>
+{
+ typedef std::complex<RealScalar> Scalar;
+ EIGEN_STRONG_INLINE Scalar pmadd(const RealScalar& x, const Scalar& y, const Scalar& c) const
+ { return padd(c, pmul(x,y)); }
+ EIGEN_STRONG_INLINE Scalar pmul(const RealScalar& x, const Scalar& y) const
+ { return x*conj_if<Conj>()(y); }
+};
+
+template<typename From,typename To> struct get_factor {
+ EIGEN_STRONG_INLINE static To run(const From& x) { return x; }
+};
+
+template<typename Scalar> struct get_factor<Scalar,typename NumTraits<Scalar>::Real> {
+ EIGEN_STRONG_INLINE static typename NumTraits<Scalar>::Real run(const Scalar& x) { return real(x); }
+};
+
+// Lightweight helper class to access matrix coefficients.
+// Yes, this is somehow redundant with Map<>, but this version is much much lighter,
+// and so I hope better compilation performance (time and code quality).
+template<typename Scalar, typename Index, int StorageOrder>
+class blas_data_mapper
+{
+ public:
+ blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i, Index j)
+ { return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; }
+ protected:
+ Scalar* EIGEN_RESTRICT m_data;
+ Index m_stride;
+};
+
+// lightweight helper class to access matrix coefficients (const version)
+template<typename Scalar, typename Index, int StorageOrder>
+class const_blas_data_mapper
+{
+ public:
+ const_blas_data_mapper(const Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i, Index j) const
+ { return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; }
+ protected:
+ const Scalar* EIGEN_RESTRICT m_data;
+ Index m_stride;
+};
+
+
+/* Helper class to analyze the factors of a Product expression.
+ * In particular it allows to pop out operator-, scalar multiples,
+ * and conjugate */
+template<typename XprType> struct blas_traits
+{
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef const XprType& ExtractType;
+ typedef XprType _ExtractType;
+ enum {
+ IsComplex = NumTraits<Scalar>::IsComplex,
+ IsTransposed = false,
+ NeedToConjugate = false,
+ HasUsableDirectAccess = ( (int(XprType::Flags)&DirectAccessBit)
+ && ( bool(XprType::IsVectorAtCompileTime)
+ || int(inner_stride_at_compile_time<XprType>::ret) == 1)
+ ) ? 1 : 0
+ };
+ typedef typename conditional<bool(HasUsableDirectAccess),
+ ExtractType,
+ typename _ExtractType::PlainObject
+ >::type DirectLinearAccessType;
+ static inline const ExtractType extract(const XprType& x) { return x; }
+ static inline const Scalar extractScalarFactor(const XprType&) { return Scalar(1); }
+};
+
+// pop conjugate
+template<typename Scalar, typename NestedXpr>
+struct blas_traits<CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+ typedef blas_traits<NestedXpr> Base;
+ typedef CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> XprType;
+ typedef typename Base::ExtractType ExtractType;
+
+ enum {
+ IsComplex = NumTraits<Scalar>::IsComplex,
+ NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex
+ };
+ static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+ static inline Scalar extractScalarFactor(const XprType& x) { return conj(Base::extractScalarFactor(x.nestedExpression())); }
+};
+
+// pop scalar multiple
+template<typename Scalar, typename NestedXpr>
+struct blas_traits<CwiseUnaryOp<scalar_multiple_op<Scalar>, NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+ typedef blas_traits<NestedXpr> Base;
+ typedef CwiseUnaryOp<scalar_multiple_op<Scalar>, NestedXpr> XprType;
+ typedef typename Base::ExtractType ExtractType;
+ static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+ static inline Scalar extractScalarFactor(const XprType& x)
+ { return x.functor().m_other * Base::extractScalarFactor(x.nestedExpression()); }
+};
+
+// pop opposite
+template<typename Scalar, typename NestedXpr>
+struct blas_traits<CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+ typedef blas_traits<NestedXpr> Base;
+ typedef CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> XprType;
+ typedef typename Base::ExtractType ExtractType;
+ static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+ static inline Scalar extractScalarFactor(const XprType& x)
+ { return - Base::extractScalarFactor(x.nestedExpression()); }
+};
+
+// pop/push transpose
+template<typename NestedXpr>
+struct blas_traits<Transpose<NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+ typedef typename NestedXpr::Scalar Scalar;
+ typedef blas_traits<NestedXpr> Base;
+ typedef Transpose<NestedXpr> XprType;
+ typedef Transpose<const typename Base::_ExtractType> ExtractType; // const to get rid of a compile error; anyway blas traits are only used on the RHS
+ typedef Transpose<const typename Base::_ExtractType> _ExtractType;
+ typedef typename conditional<bool(Base::HasUsableDirectAccess),
+ ExtractType,
+ typename ExtractType::PlainObject
+ >::type DirectLinearAccessType;
+ enum {
+ IsTransposed = Base::IsTransposed ? 0 : 1
+ };
+ static inline const ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+ static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x.nestedExpression()); }
+};
+
+template<typename T>
+struct blas_traits<const T>
+ : blas_traits<T>
+{};
+
+template<typename T, bool HasUsableDirectAccess=blas_traits<T>::HasUsableDirectAccess>
+struct extract_data_selector {
+ static const typename T::Scalar* run(const T& m)
+ {
+ return const_cast<typename T::Scalar*>(&blas_traits<T>::extract(m).coeffRef(0,0)); // FIXME this should be .data()
+ }
+};
+
+template<typename T>
+struct extract_data_selector<T,false> {
+ static typename T::Scalar* run(const T&) { return 0; }
+};
+
+template<typename T> const typename T::Scalar* extract_data(const T& m)
+{
+ return extract_data_selector<T>::run(m);
+}
+
+} // end namespace internal
+
+#endif // EIGEN_BLASUTIL_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/Constants.h b/extern/Eigen3/Eigen/src/Core/util/Constants.h
new file mode 100644
index 00000000000..c3dd3a09d00
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/Constants.h
@@ -0,0 +1,439 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_CONSTANTS_H
+#define EIGEN_CONSTANTS_H
+
+/** This value means that a quantity is not known at compile-time, and that instead the value is
+ * stored in some runtime variable.
+ *
+ * Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix.
+ */
+const int Dynamic = -1;
+
+/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm<int>().
+ * The value Infinity there means the L-infinity norm.
+ */
+const int Infinity = -1;
+
+/** \defgroup flags Flags
+ * \ingroup Core_Module
+ *
+ * These are the possible bits which can be OR'ed to constitute the flags of a matrix or
+ * expression.
+ *
+ * It is important to note that these flags are a purely compile-time notion. They are a compile-time property of
+ * an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any
+ * runtime overhead.
+ *
+ * \sa MatrixBase::Flags
+ */
+
+/** \ingroup flags
+ *
+ * for a matrix, this means that the storage order is row-major.
+ * If this bit is not set, the storage order is column-major.
+ * For an expression, this determines the storage order of
+ * the matrix created by evaluation of that expression.
+ * \sa \ref TopicStorageOrders */
+const unsigned int RowMajorBit = 0x1;
+
+/** \ingroup flags
+ *
+ * means the expression should be evaluated by the calling expression */
+const unsigned int EvalBeforeNestingBit = 0x2;
+
+/** \ingroup flags
+ *
+ * means the expression should be evaluated before any assignment */
+const unsigned int EvalBeforeAssigningBit = 0x4;
+
+/** \ingroup flags
+ *
+ * Short version: means the expression might be vectorized
+ *
+ * Long version: means that the coefficients can be handled by packets
+ * and start at a memory location whose alignment meets the requirements
+ * of the present CPU architecture for optimized packet access. In the fixed-size
+ * case, there is the additional condition that it be possible to access all the
+ * coefficients by packets (this implies the requirement that the size be a multiple of 16 bytes,
+ * and that any nontrivial strides don't break the alignment). In the dynamic-size case,
+ * there is no such condition on the total size and strides, so it might not be possible to access
+ * all coeffs by packets.
+ *
+ * \note This bit can be set regardless of whether vectorization is actually enabled.
+ * To check for actual vectorizability, see \a ActualPacketAccessBit.
+ */
+const unsigned int PacketAccessBit = 0x8;
+
+#ifdef EIGEN_VECTORIZE
+/** \ingroup flags
+ *
+ * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant
+ * is set to the value \a PacketAccessBit.
+ *
+ * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant
+ * is set to the value 0.
+ */
+const unsigned int ActualPacketAccessBit = PacketAccessBit;
+#else
+const unsigned int ActualPacketAccessBit = 0x0;
+#endif
+
+/** \ingroup flags
+ *
+ * Short version: means the expression can be seen as 1D vector.
+ *
+ * Long version: means that one can access the coefficients
+ * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These
+ * index-based access methods are guaranteed
+ * to not have to do any runtime computation of a (row, col)-pair from the index, so that it
+ * is guaranteed that whenever it is available, index-based access is at least as fast as
+ * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit.
+ *
+ * If both PacketAccessBit and LinearAccessBit are set, then the
+ * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a
+ * lvalue expression.
+ *
+ * Typically, all vector expressions have the LinearAccessBit, but there is one exception:
+ * Product expressions don't have it, because it would be troublesome for vectorization, even when the
+ * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but
+ * not index-based packet access, so they don't have the LinearAccessBit.
+ */
+const unsigned int LinearAccessBit = 0x10;
+
+/** \ingroup flags
+ *
+ * Means the expression has a coeffRef() method, i.e. is writable as its individual coefficients are directly addressable.
+ * This rules out read-only expressions.
+ *
+ * Note that DirectAccessBit and LvalueBit are mutually orthogonal, as there are examples of expression having one but note
+ * the other:
+ * \li writable expressions that don't have a very simple memory layout as a strided array, have LvalueBit but not DirectAccessBit
+ * \li Map-to-const expressions, for example Map<const Matrix>, have DirectAccessBit but not LvalueBit
+ *
+ * Expressions having LvalueBit also have their coeff() method returning a const reference instead of returning a new value.
+ */
+const unsigned int LvalueBit = 0x20;
+
+/** \ingroup flags
+ *
+ * Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout
+ * of the array of coefficients must be exactly the natural one suggested by rows(), cols(),
+ * outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients,
+ * though referencable, do not have such a regular memory layout.
+ *
+ * See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal.
+ */
+const unsigned int DirectAccessBit = 0x40;
+
+/** \ingroup flags
+ *
+ * means the first coefficient packet is guaranteed to be aligned */
+const unsigned int AlignedBit = 0x80;
+
+const unsigned int NestByRefBit = 0x100;
+
+// list of flags that are inherited by default
+const unsigned int HereditaryBits = RowMajorBit
+ | EvalBeforeNestingBit
+ | EvalBeforeAssigningBit;
+
+/** \defgroup enums Enumerations
+ * \ingroup Core_Module
+ *
+ * Various enumerations used in %Eigen. Many of these are used as template parameters.
+ */
+
+/** \ingroup enums
+ * Enum containing possible values for the \p Mode parameter of
+ * MatrixBase::selfadjointView() and MatrixBase::triangularView(). */
+enum {
+ /** View matrix as a lower triangular matrix. */
+ Lower=0x1,
+ /** View matrix as an upper triangular matrix. */
+ Upper=0x2,
+ /** %Matrix has ones on the diagonal; to be used in combination with #Lower or #Upper. */
+ UnitDiag=0x4,
+ /** %Matrix has zeros on the diagonal; to be used in combination with #Lower or #Upper. */
+ ZeroDiag=0x8,
+ /** View matrix as a lower triangular matrix with ones on the diagonal. */
+ UnitLower=UnitDiag|Lower,
+ /** View matrix as an upper triangular matrix with ones on the diagonal. */
+ UnitUpper=UnitDiag|Upper,
+ /** View matrix as a lower triangular matrix with zeros on the diagonal. */
+ StrictlyLower=ZeroDiag|Lower,
+ /** View matrix as an upper triangular matrix with zeros on the diagonal. */
+ StrictlyUpper=ZeroDiag|Upper,
+ /** Used in BandMatrix and SelfAdjointView to indicate that the matrix is self-adjoint. */
+ SelfAdjoint=0x10
+};
+
+/** \ingroup enums
+ * Enum for indicating whether an object is aligned or not. */
+enum {
+ /** Object is not correctly aligned for vectorization. */
+ Unaligned=0,
+ /** Object is aligned for vectorization. */
+ Aligned=1
+};
+
+enum { ConditionalJumpCost = 5 };
+
+/** \ingroup enums
+ * Enum used by DenseBase::corner() in Eigen2 compatibility mode. */
+// FIXME after the corner() API change, this was not needed anymore, except by AlignedBox
+// TODO: find out what to do with that. Adapt the AlignedBox API ?
+enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight };
+
+/** \ingroup enums
+ * Enum containing possible values for the \p Direction parameter of
+ * Reverse, PartialReduxExpr and VectorwiseOp. */
+enum DirectionType {
+ /** For Reverse, all columns are reversed;
+ * for PartialReduxExpr and VectorwiseOp, act on columns. */
+ Vertical,
+ /** For Reverse, all rows are reversed;
+ * for PartialReduxExpr and VectorwiseOp, act on rows. */
+ Horizontal,
+ /** For Reverse, both rows and columns are reversed;
+ * not used for PartialReduxExpr and VectorwiseOp. */
+ BothDirections
+};
+
+enum ProductEvaluationMode { NormalProduct, CacheFriendlyProduct };
+
+/** \internal \ingroup enums
+ * Enum to specify how to traverse the entries of a matrix. */
+enum {
+ /** \internal Default traversal, no vectorization, no index-based access */
+ DefaultTraversal,
+ /** \internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */
+ LinearTraversal,
+ /** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignment
+ * and good size */
+ InnerVectorizedTraversal,
+ /** \internal Vectorization path using a single loop plus scalar loops for the
+ * unaligned boundaries */
+ LinearVectorizedTraversal,
+ /** \internal Generic vectorization path using one vectorized loop per row/column with some
+ * scalar loops to handle the unaligned boundaries */
+ SliceVectorizedTraversal,
+ /** \internal Special case to properly handle incompatible scalar types or other defecting cases*/
+ InvalidTraversal
+};
+
+/** \internal \ingroup enums
+ * Enum to specify whether to unroll loops when traversing over the entries of a matrix. */
+enum {
+ /** \internal Do not unroll loops. */
+ NoUnrolling,
+ /** \internal Unroll only the inner loop, but not the outer loop. */
+ InnerUnrolling,
+ /** \internal Unroll both the inner and the outer loop. If there is only one loop,
+ * because linear traversal is used, then unroll that loop. */
+ CompleteUnrolling
+};
+
+/** \ingroup enums
+ * Enum containing possible values for the \p _Options template parameter of
+ * Matrix, Array and BandMatrix. */
+enum {
+ /** Storage order is column major (see \ref TopicStorageOrders). */
+ ColMajor = 0,
+ /** Storage order is row major (see \ref TopicStorageOrders). */
+ RowMajor = 0x1, // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that
+ /** \internal Align the matrix itself if it is vectorizable fixed-size */
+ AutoAlign = 0,
+ /** \internal Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation
+ DontAlign = 0x2
+};
+
+/** \ingroup enums
+ * Enum for specifying whether to apply or solve on the left or right. */
+enum {
+ /** Apply transformation on the left. */
+ OnTheLeft = 1,
+ /** Apply transformation on the right. */
+ OnTheRight = 2
+};
+
+/* the following could as well be written:
+ * enum NoChange_t { NoChange };
+ * but it feels dangerous to disambiguate overloaded functions on enum/integer types.
+ * If on some platform it is really impossible to get rid of "unused variable" warnings, then
+ * we can always come back to that solution.
+ */
+struct NoChange_t {};
+namespace {
+ EIGEN_UNUSED NoChange_t NoChange;
+}
+
+struct Sequential_t {};
+namespace {
+ EIGEN_UNUSED Sequential_t Sequential;
+}
+
+struct Default_t {};
+namespace {
+ EIGEN_UNUSED Default_t Default;
+}
+
+/** \internal \ingroup enums
+ * Used in AmbiVector. */
+enum {
+ IsDense = 0,
+ IsSparse
+};
+
+/** \ingroup enums
+ * Used as template parameter in DenseCoeffBase and MapBase to indicate
+ * which accessors should be provided. */
+enum AccessorLevels {
+ /** Read-only access via a member function. */
+ ReadOnlyAccessors,
+ /** Read/write access via member functions. */
+ WriteAccessors,
+ /** Direct read-only access to the coefficients. */
+ DirectAccessors,
+ /** Direct read/write access to the coefficients. */
+ DirectWriteAccessors
+};
+
+/** \ingroup enums
+ * Enum with options to give to various decompositions. */
+enum DecompositionOptions {
+ /** \internal Not used (meant for LDLT?). */
+ Pivoting = 0x01,
+ /** \internal Not used (meant for LDLT?). */
+ NoPivoting = 0x02,
+ /** Used in JacobiSVD to indicate that the square matrix U is to be computed. */
+ ComputeFullU = 0x04,
+ /** Used in JacobiSVD to indicate that the thin matrix U is to be computed. */
+ ComputeThinU = 0x08,
+ /** Used in JacobiSVD to indicate that the square matrix V is to be computed. */
+ ComputeFullV = 0x10,
+ /** Used in JacobiSVD to indicate that the thin matrix V is to be computed. */
+ ComputeThinV = 0x20,
+ /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
+ * that only the eigenvalues are to be computed and not the eigenvectors. */
+ EigenvaluesOnly = 0x40,
+ /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
+ * that both the eigenvalues and the eigenvectors are to be computed. */
+ ComputeEigenvectors = 0x80,
+ /** \internal */
+ EigVecMask = EigenvaluesOnly | ComputeEigenvectors,
+ /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+ * solve the generalized eigenproblem \f$ Ax = \lambda B x \f$. */
+ Ax_lBx = 0x100,
+ /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+ * solve the generalized eigenproblem \f$ ABx = \lambda x \f$. */
+ ABx_lx = 0x200,
+ /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+ * solve the generalized eigenproblem \f$ BAx = \lambda x \f$. */
+ BAx_lx = 0x400,
+ /** \internal */
+ GenEigMask = Ax_lBx | ABx_lx | BAx_lx
+};
+
+/** \ingroup enums
+ * Possible values for the \p QRPreconditioner template parameter of JacobiSVD. */
+enum QRPreconditioners {
+ /** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */
+ NoQRPreconditioner,
+ /** Use a QR decomposition without pivoting as the first step. */
+ HouseholderQRPreconditioner,
+ /** Use a QR decomposition with column pivoting as the first step. */
+ ColPivHouseholderQRPreconditioner,
+ /** Use a QR decomposition with full pivoting as the first step. */
+ FullPivHouseholderQRPreconditioner
+};
+
+#ifdef Success
+#error The preprocessor symbol 'Success' is defined, possibly by the X11 header file X.h
+#endif
+
+/** \ingroups enums
+ * Enum for reporting the status of a computation. */
+enum ComputationInfo {
+ /** Computation was successful. */
+ Success = 0,
+ /** The provided data did not satisfy the prerequisites. */
+ NumericalIssue = 1,
+ /** Iterative procedure did not converge. */
+ NoConvergence = 2
+};
+
+/** \ingroup enums
+ * Enum used to specify how a particular transformation is stored in a matrix.
+ * \sa Transform, Hyperplane::transform(). */
+enum TransformTraits {
+ /** Transformation is an isometry. */
+ Isometry = 0x1,
+ /** Transformation is an affine transformation stored as a (Dim+1)^2 matrix whose last row is
+ * assumed to be [0 ... 0 1]. */
+ Affine = 0x2,
+ /** Transformation is an affine transformation stored as a (Dim) x (Dim+1) matrix. */
+ AffineCompact = 0x10 | Affine,
+ /** Transformation is a general projective transformation stored as a (Dim+1)^2 matrix. */
+ Projective = 0x20
+};
+
+/** \internal \ingroup enums
+ * Enum used to choose between implementation depending on the computer architecture. */
+namespace Architecture
+{
+ enum Type {
+ Generic = 0x0,
+ SSE = 0x1,
+ AltiVec = 0x2,
+#if defined EIGEN_VECTORIZE_SSE
+ Target = SSE
+#elif defined EIGEN_VECTORIZE_ALTIVEC
+ Target = AltiVec
+#else
+ Target = Generic
+#endif
+ };
+}
+
+/** \internal \ingroup enums
+ * Enum used as template parameter in GeneralProduct. */
+enum { CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
+
+/** \internal \ingroup enums
+ * Enum used in experimental parallel implementation. */
+enum Action {GetAction, SetAction};
+
+/** The type used to identify a dense storage. */
+struct Dense {};
+
+/** The type used to identify a matrix expression */
+struct MatrixXpr {};
+
+/** The type used to identify an array expression */
+struct ArrayXpr {};
+
+#endif // EIGEN_CONSTANTS_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/DisableStupidWarnings.h b/extern/Eigen3/Eigen/src/Core/util/DisableStupidWarnings.h
new file mode 100644
index 00000000000..00730524b26
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/DisableStupidWarnings.h
@@ -0,0 +1,42 @@
+#ifndef EIGEN_WARNINGS_DISABLED
+#define EIGEN_WARNINGS_DISABLED
+
+#ifdef _MSC_VER
+ // 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p))
+ // 4101 - unreferenced local variable
+ // 4127 - conditional expression is constant
+ // 4181 - qualifier applied to reference type ignored
+ // 4211 - nonstandard extension used : redefined extern to static
+ // 4244 - 'argument' : conversion from 'type1' to 'type2', possible loss of data
+ // 4273 - QtAlignedMalloc, inconsistent DLL linkage
+ // 4324 - structure was padded due to declspec(align())
+ // 4512 - assignment operator could not be generated
+ // 4522 - 'class' : multiple assignment operators specified
+ // 4700 - uninitialized local variable 'xyz' used
+ // 4717 - 'function' : recursive on all control paths, function will cause runtime stack overflow
+ #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+ #pragma warning( push )
+ #endif
+ #pragma warning( disable : 4100 4101 4127 4181 4211 4244 4273 4324 4512 4522 4700 4717 )
+#elif defined __INTEL_COMPILER
+ // 2196 - routine is both "inline" and "noinline" ("noinline" assumed)
+ // ICC 12 generates this warning even without any inline keyword, when defining class methods 'inline' i.e. inside of class body
+ // 2536 - type qualifiers are meaningless here
+ // ICC 12 generates this warning when a function return type is const qualified, even if that type is a template-parameter-dependent
+ // typedef that may be a reference type.
+ // 279 - controlling expression is constant
+ // ICC 12 generates this warning on assert(constant_expression_depending_on_template_params) and frankly this is a legitimate use case.
+ #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+ #pragma warning push
+ #endif
+ #pragma warning disable 2196 2536 279
+#elif defined __clang__
+ // -Wconstant-logical-operand - warning: use of logical && with constant operand; switch to bitwise & or remove constant
+ // this is really a stupid warning as it warns on compile-time expressions involving enums
+ #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+ #pragma clang diagnostic push
+ #endif
+ #pragma clang diagnostic ignored "-Wconstant-logical-operand"
+#endif
+
+#endif // not EIGEN_WARNINGS_DISABLED
diff --git a/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h b/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h
new file mode 100644
index 00000000000..7fbccf98c2b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h
@@ -0,0 +1,307 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_FORWARDDECLARATIONS_H
+#define EIGEN_FORWARDDECLARATIONS_H
+
+namespace internal {
+
+template<typename T> struct traits;
+
+// here we say once and for all that traits<const T> == traits<T>
+// When constness must affect traits, it has to be constness on template parameters on which T itself depends.
+// For example, traits<Map<const T> > != traits<Map<T> >, but
+// traits<const Map<T> > == traits<Map<T> >
+template<typename T> struct traits<const T> : traits<T> {};
+
+template<typename Derived> struct has_direct_access
+{
+ enum { ret = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0 };
+};
+
+template<typename Derived> struct accessors_level
+{
+ enum { has_direct_access = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0,
+ has_write_access = (traits<Derived>::Flags & LvalueBit) ? 1 : 0,
+ value = has_direct_access ? (has_write_access ? DirectWriteAccessors : DirectAccessors)
+ : (has_write_access ? WriteAccessors : ReadOnlyAccessors)
+ };
+};
+
+} // end namespace internal
+
+template<typename T> struct NumTraits;
+
+template<typename Derived> struct EigenBase;
+template<typename Derived> class DenseBase;
+template<typename Derived> class PlainObjectBase;
+
+
+template<typename Derived,
+ int Level = internal::accessors_level<Derived>::value >
+class DenseCoeffsBase;
+
+template<typename _Scalar, int _Rows, int _Cols,
+ int _Options = AutoAlign |
+#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
+ // workaround a bug in at least gcc 3.4.6
+ // the innermost ?: ternary operator is misparsed. We write it slightly
+ // differently and this makes gcc 3.4.6 happy, but it's ugly.
+ // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
+ // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
+ ( (_Rows==1 && _Cols!=1) ? RowMajor
+ : !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
+ : ColMajor ),
+#else
+ ( (_Rows==1 && _Cols!=1) ? RowMajor
+ : (_Cols==1 && _Rows!=1) ? ColMajor
+ : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+#endif
+ int _MaxRows = _Rows,
+ int _MaxCols = _Cols
+> class Matrix;
+
+template<typename Derived> class MatrixBase;
+template<typename Derived> class ArrayBase;
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged;
+template<typename ExpressionType, template <typename> class StorageBase > class NoAlias;
+template<typename ExpressionType> class NestByValue;
+template<typename ExpressionType> class ForceAlignedAccess;
+template<typename ExpressionType> class SwapWrapper;
+
+template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false,
+ bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class Block;
+
+template<typename MatrixType, int Size=Dynamic> class VectorBlock;
+template<typename MatrixType> class Transpose;
+template<typename MatrixType> class Conjugate;
+template<typename NullaryOp, typename MatrixType> class CwiseNullaryOp;
+template<typename UnaryOp, typename MatrixType> class CwiseUnaryOp;
+template<typename ViewOp, typename MatrixType> class CwiseUnaryView;
+template<typename BinaryOp, typename Lhs, typename Rhs> class CwiseBinaryOp;
+template<typename BinOp, typename Lhs, typename Rhs> class SelfCwiseBinaryOp;
+template<typename Derived, typename Lhs, typename Rhs> class ProductBase;
+template<typename Lhs, typename Rhs, int Mode> class GeneralProduct;
+template<typename Lhs, typename Rhs, int NestingFlags> class CoeffBasedProduct;
+
+template<typename Derived> class DiagonalBase;
+template<typename _DiagonalVectorType> class DiagonalWrapper;
+template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime=SizeAtCompileTime> class DiagonalMatrix;
+template<typename MatrixType, typename DiagonalType, int ProductOrder> class DiagonalProduct;
+template<typename MatrixType, int Index = 0> class Diagonal;
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class PermutationMatrix;
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class Transpositions;
+template<typename Derived> class PermutationBase;
+template<typename Derived> class TranspositionsBase;
+template<typename _IndicesType> class PermutationWrapper;
+template<typename _IndicesType> class TranspositionsWrapper;
+
+template<typename Derived,
+ int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
+> class MapBase;
+template<int InnerStrideAtCompileTime, int OuterStrideAtCompileTime> class Stride;
+template<typename MatrixType, int MapOptions=Unaligned, typename StrideType = Stride<0,0> > class Map;
+
+template<typename Derived> class TriangularBase;
+template<typename MatrixType, unsigned int Mode> class TriangularView;
+template<typename MatrixType, unsigned int Mode> class SelfAdjointView;
+template<typename MatrixType> class SparseView;
+template<typename ExpressionType> class WithFormat;
+template<typename MatrixType> struct CommaInitializer;
+template<typename Derived> class ReturnByValue;
+template<typename ExpressionType> class ArrayWrapper;
+
+namespace internal {
+template<typename DecompositionType, typename Rhs> struct solve_retval_base;
+template<typename DecompositionType, typename Rhs> struct solve_retval;
+template<typename DecompositionType> struct kernel_retval_base;
+template<typename DecompositionType> struct kernel_retval;
+template<typename DecompositionType> struct image_retval_base;
+template<typename DecompositionType> struct image_retval;
+} // end namespace internal
+
+namespace internal {
+template<typename _Scalar, int Rows=Dynamic, int Cols=Dynamic, int Supers=Dynamic, int Subs=Dynamic, int Options=0> class BandMatrix;
+}
+
+namespace internal {
+template<typename Lhs, typename Rhs> struct product_type;
+}
+
+template<typename Lhs, typename Rhs,
+ int ProductType = internal::product_type<Lhs,Rhs>::value>
+struct ProductReturnType;
+
+// this is a workaround for sun CC
+template<typename Lhs, typename Rhs> struct LazyProductReturnType;
+
+namespace internal {
+
+// Provides scalar/packet-wise product and product with accumulation
+// with optional conjugation of the arguments.
+template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRhs=false> struct conj_helper;
+
+template<typename Scalar> struct scalar_sum_op;
+template<typename Scalar> struct scalar_difference_op;
+template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op;
+template<typename Scalar> struct scalar_quotient_op;
+template<typename Scalar> struct scalar_opposite_op;
+template<typename Scalar> struct scalar_conjugate_op;
+template<typename Scalar> struct scalar_real_op;
+template<typename Scalar> struct scalar_imag_op;
+template<typename Scalar> struct scalar_abs_op;
+template<typename Scalar> struct scalar_abs2_op;
+template<typename Scalar> struct scalar_sqrt_op;
+template<typename Scalar> struct scalar_exp_op;
+template<typename Scalar> struct scalar_log_op;
+template<typename Scalar> struct scalar_cos_op;
+template<typename Scalar> struct scalar_sin_op;
+template<typename Scalar> struct scalar_acos_op;
+template<typename Scalar> struct scalar_asin_op;
+template<typename Scalar> struct scalar_tan_op;
+template<typename Scalar> struct scalar_pow_op;
+template<typename Scalar> struct scalar_inverse_op;
+template<typename Scalar> struct scalar_square_op;
+template<typename Scalar> struct scalar_cube_op;
+template<typename Scalar, typename NewType> struct scalar_cast_op;
+template<typename Scalar> struct scalar_multiple_op;
+template<typename Scalar> struct scalar_quotient1_op;
+template<typename Scalar> struct scalar_min_op;
+template<typename Scalar> struct scalar_max_op;
+template<typename Scalar> struct scalar_random_op;
+template<typename Scalar> struct scalar_add_op;
+template<typename Scalar> struct scalar_constant_op;
+template<typename Scalar> struct scalar_identity_op;
+
+template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_product_op;
+template<typename LhsScalar,typename RhsScalar> struct scalar_multiple2_op;
+
+} // end namespace internal
+
+struct IOFormat;
+
+// Array module
+template<typename _Scalar, int _Rows, int _Cols,
+ int _Options = AutoAlign |
+#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
+ // workaround a bug in at least gcc 3.4.6
+ // the innermost ?: ternary operator is misparsed. We write it slightly
+ // differently and this makes gcc 3.4.6 happy, but it's ugly.
+ // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
+ // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
+ ( (_Rows==1 && _Cols!=1) ? RowMajor
+ : !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
+ : ColMajor ),
+#else
+ ( (_Rows==1 && _Cols!=1) ? RowMajor
+ : (_Cols==1 && _Rows!=1) ? ColMajor
+ : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+#endif
+ int _MaxRows = _Rows, int _MaxCols = _Cols> class Array;
+template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> class Select;
+template<typename MatrixType, typename BinaryOp, int Direction> class PartialReduxExpr;
+template<typename ExpressionType, int Direction> class VectorwiseOp;
+template<typename MatrixType,int RowFactor,int ColFactor> class Replicate;
+template<typename MatrixType, int Direction = BothDirections> class Reverse;
+
+template<typename MatrixType> class FullPivLU;
+template<typename MatrixType> class PartialPivLU;
+namespace internal {
+template<typename MatrixType> struct inverse_impl;
+}
+template<typename MatrixType> class HouseholderQR;
+template<typename MatrixType> class ColPivHouseholderQR;
+template<typename MatrixType> class FullPivHouseholderQR;
+template<typename MatrixType, int QRPreconditioner = ColPivHouseholderQRPreconditioner> class JacobiSVD;
+template<typename MatrixType, int UpLo = Lower> class LLT;
+template<typename MatrixType, int UpLo = Lower> class LDLT;
+template<typename VectorsType, typename CoeffsType, int Side=OnTheLeft> class HouseholderSequence;
+template<typename Scalar> class JacobiRotation;
+
+// Geometry module:
+template<typename Derived, int _Dim> class RotationBase;
+template<typename Lhs, typename Rhs> class Cross;
+template<typename Derived> class QuaternionBase;
+template<typename Scalar> class Rotation2D;
+template<typename Scalar> class AngleAxis;
+template<typename Scalar,int Dim> class Translation;
+
+#ifdef EIGEN2_SUPPORT
+template<typename Derived, int _Dim> class eigen2_RotationBase;
+template<typename Lhs, typename Rhs> class eigen2_Cross;
+template<typename Scalar> class eigen2_Quaternion;
+template<typename Scalar> class eigen2_Rotation2D;
+template<typename Scalar> class eigen2_AngleAxis;
+template<typename Scalar,int Dim> class eigen2_Transform;
+template <typename _Scalar, int _AmbientDim> class eigen2_ParametrizedLine;
+template <typename _Scalar, int _AmbientDim> class eigen2_Hyperplane;
+template<typename Scalar,int Dim> class eigen2_Translation;
+template<typename Scalar,int Dim> class eigen2_Scaling;
+#endif
+
+#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+template<typename Scalar> class Quaternion;
+template<typename Scalar,int Dim> class Transform;
+template <typename _Scalar, int _AmbientDim> class ParametrizedLine;
+template <typename _Scalar, int _AmbientDim> class Hyperplane;
+template<typename Scalar,int Dim> class Scaling;
+#endif
+
+#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+template<typename Scalar, int Options = AutoAlign> class Quaternion;
+template<typename Scalar,int Dim,int Mode,int _Options=AutoAlign> class Transform;
+template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class ParametrizedLine;
+template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class Hyperplane;
+template<typename Scalar> class UniformScaling;
+template<typename MatrixType,int Direction> class Homogeneous;
+#endif
+
+// MatrixFunctions module
+template<typename Derived> struct MatrixExponentialReturnValue;
+template<typename Derived> class MatrixFunctionReturnValue;
+
+namespace internal {
+template <typename Scalar>
+struct stem_function
+{
+ typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+ typedef ComplexScalar type(ComplexScalar, int);
+};
+}
+
+
+#ifdef EIGEN2_SUPPORT
+template<typename ExpressionType> class Cwise;
+template<typename MatrixType> class Minor;
+template<typename MatrixType> class LU;
+template<typename MatrixType> class QR;
+template<typename MatrixType> class SVD;
+namespace internal {
+template<typename MatrixType, unsigned int Mode> struct eigen2_part_return_type;
+}
+#endif
+
+#endif // EIGEN_FORWARDDECLARATIONS_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/Macros.h b/extern/Eigen3/Eigen/src/Core/util/Macros.h
new file mode 100644
index 00000000000..6c3f1e421f0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/Macros.h
@@ -0,0 +1,418 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MACROS_H
+#define EIGEN_MACROS_H
+
+#define EIGEN_WORLD_VERSION 3
+#define EIGEN_MAJOR_VERSION 0
+#define EIGEN_MINOR_VERSION 2
+
+#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
+ (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
+ EIGEN_MINOR_VERSION>=z))))
+#ifdef __GNUC__
+ #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)
+#else
+ #define EIGEN_GNUC_AT_LEAST(x,y) 0
+#endif
+
+#ifdef __GNUC__
+ #define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
+#else
+ #define EIGEN_GNUC_AT_MOST(x,y) 0
+#endif
+
+#if EIGEN_GNUC_AT_MOST(4,3)
+ // see bug 89
+ #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
+#else
+ #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
+#endif
+
+#if defined(__GNUC__) && (__GNUC__ <= 3)
+#define EIGEN_GCC3_OR_OLDER 1
+#else
+#define EIGEN_GCC3_OR_OLDER 0
+#endif
+
+// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
+// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
+// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
+// certain common platform (compiler+architecture combinations) to avoid these problems.
+// Only static alignment is really problematic (relies on nonstandard compiler extensions that don't
+// work everywhere, for example don't work on GCC/ARM), try to keep heap alignment even
+// when we have to disable static alignment.
+#if defined(__GNUC__) && !(defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(__ppc__) || defined(__ia64__))
+#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
+#else
+#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
+#endif
+
+// static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
+#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
+ && !EIGEN_GCC3_OR_OLDER \
+ && !defined(__SUNPRO_CC) \
+ && !defined(__QNXNTO__)
+ #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
+#else
+ #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
+#endif
+
+#ifdef EIGEN_DONT_ALIGN
+ #ifndef EIGEN_DONT_ALIGN_STATICALLY
+ #define EIGEN_DONT_ALIGN_STATICALLY
+ #endif
+ #define EIGEN_ALIGN 0
+#else
+ #define EIGEN_ALIGN 1
+#endif
+
+// EIGEN_ALIGN_STATICALLY is the true test whether we want to align arrays on the stack or not. It takes into account both the user choice to explicitly disable
+// alignment (EIGEN_DONT_ALIGN_STATICALLY) and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). Henceforth, only EIGEN_ALIGN_STATICALLY should be used.
+#if EIGEN_ARCH_WANTS_STACK_ALIGNMENT && !defined(EIGEN_DONT_ALIGN_STATICALLY)
+ #define EIGEN_ALIGN_STATICALLY 1
+#else
+ #define EIGEN_ALIGN_STATICALLY 0
+ #ifndef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+ #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+ #endif
+#endif
+
+#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION RowMajor
+#else
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor
+#endif
+
+#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
+#endif
+
+/** Allows to disable some optimizations which might affect the accuracy of the result.
+ * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
+ * They currently include:
+ * - single precision Cwise::sin() and Cwise::cos() when SSE vectorization is enabled.
+ */
+#ifndef EIGEN_FAST_MATH
+#define EIGEN_FAST_MATH 1
+#endif
+
+#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl;
+
+// concatenate two tokens
+#define EIGEN_CAT2(a,b) a ## b
+#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b)
+
+// convert a token to a string
+#define EIGEN_MAKESTRING2(a) #a
+#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
+
+// EIGEN_ALWAYS_INLINE_ATTRIB should be use in the declaration of function
+// which should be inlined even in debug mode.
+// FIXME with the always_inline attribute,
+// gcc 3.4.x reports the following compilation error:
+// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
+// : function body not available
+#if EIGEN_GNUC_AT_LEAST(4,0)
+#define EIGEN_ALWAYS_INLINE_ATTRIB __attribute__((always_inline))
+#else
+#define EIGEN_ALWAYS_INLINE_ATTRIB
+#endif
+
+#if EIGEN_GNUC_AT_LEAST(4,1) && !defined(__clang__) && !defined(__INTEL_COMPILER)
+#define EIGEN_FLATTEN_ATTRIB __attribute__((flatten))
+#else
+#define EIGEN_FLATTEN_ATTRIB
+#endif
+
+// EIGEN_FORCE_INLINE means "inline as much as possible"
+#if (defined _MSC_VER) || (defined __INTEL_COMPILER)
+#define EIGEN_STRONG_INLINE __forceinline
+#else
+#define EIGEN_STRONG_INLINE inline
+#endif
+
+#if (defined __GNUC__)
+#define EIGEN_DONT_INLINE __attribute__((noinline))
+#elif (defined _MSC_VER)
+#define EIGEN_DONT_INLINE __declspec(noinline)
+#else
+#define EIGEN_DONT_INLINE
+#endif
+
+// this macro allows to get rid of linking errors about multiply defined functions.
+// - static is not very good because it prevents definitions from different object files to be merged.
+// So static causes the resulting linked executable to be bloated with multiple copies of the same function.
+// - inline is not perfect either as it unwantedly hints the compiler toward inlining the function.
+#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS inline
+
+#ifdef NDEBUG
+# ifndef EIGEN_NO_DEBUG
+# define EIGEN_NO_DEBUG
+# endif
+#endif
+
+// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89
+#ifdef EIGEN_NO_DEBUG
+ #define eigen_plain_assert(x)
+#else
+ #if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO
+ namespace Eigen {
+ namespace internal {
+ inline bool copy_bool(bool b) { return b; }
+ }
+ }
+ #define eigen_plain_assert(x) assert(x)
+ #else
+ // work around bug 89
+ #include <cstdlib> // for abort
+ #include <iostream> // for std::cerr
+
+ namespace Eigen {
+ namespace internal {
+ // trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers.
+ // see bug 89.
+ namespace {
+ EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; }
+ }
+ inline void assert_fail(const char *condition, const char *function, const char *file, int line)
+ {
+ std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl;
+ abort();
+ }
+ }
+ }
+ #define eigen_plain_assert(x) \
+ do { \
+ if(!Eigen::internal::copy_bool(x)) \
+ Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \
+ } while(false)
+ #endif
+#endif
+
+// eigen_assert can be overridden
+#ifndef eigen_assert
+#define eigen_assert(x) eigen_plain_assert(x)
+#endif
+
+#ifdef EIGEN_INTERNAL_DEBUGGING
+#define eigen_internal_assert(x) eigen_assert(x)
+#else
+#define eigen_internal_assert(x)
+#endif
+
+#ifdef EIGEN_NO_DEBUG
+#define EIGEN_ONLY_USED_FOR_DEBUG(x) (void)x
+#else
+#define EIGEN_ONLY_USED_FOR_DEBUG(x)
+#endif
+
+#if (defined __GNUC__)
+#define EIGEN_DEPRECATED __attribute__((deprecated))
+#elif (defined _MSC_VER)
+#define EIGEN_DEPRECATED __declspec(deprecated)
+#else
+#define EIGEN_DEPRECATED
+#endif
+
+#if (defined __GNUC__)
+#define EIGEN_UNUSED __attribute__((unused))
+#else
+#define EIGEN_UNUSED
+#endif
+
+// Suppresses 'unused variable' warnings.
+#define EIGEN_UNUSED_VARIABLE(var) (void)var;
+
+#if (defined __GNUC__)
+#define EIGEN_ASM_COMMENT(X) asm("#"X)
+#else
+#define EIGEN_ASM_COMMENT(X)
+#endif
+
+/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
+ * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
+ * so that vectorization doesn't affect binary compatibility.
+ *
+ * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
+ * vectorized and non-vectorized code.
+ */
+#if (defined __GNUC__) || (defined __PGI) || (defined __IBMCPP__)
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+#elif (defined _MSC_VER)
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
+#elif (defined __SUNPRO_CC)
+ // FIXME not sure about this one:
+ #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+#else
+ #error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
+#endif
+
+#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
+
+#if EIGEN_ALIGN_STATICALLY
+#define EIGEN_USER_ALIGN_TO_BOUNDARY(n) EIGEN_ALIGN_TO_BOUNDARY(n)
+#define EIGEN_USER_ALIGN16 EIGEN_ALIGN16
+#else
+#define EIGEN_USER_ALIGN_TO_BOUNDARY(n)
+#define EIGEN_USER_ALIGN16
+#endif
+
+#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD
+ #define EIGEN_RESTRICT
+#endif
+#ifndef EIGEN_RESTRICT
+ #define EIGEN_RESTRICT __restrict
+#endif
+
+#ifndef EIGEN_STACK_ALLOCATION_LIMIT
+#define EIGEN_STACK_ALLOCATION_LIMIT 20000
+#endif
+
+#ifndef EIGEN_DEFAULT_IO_FORMAT
+#ifdef EIGEN_MAKING_DOCS
+// format used in Eigen's documentation
+// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic.
+#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, " ", "\n", "", "")
+#else
+#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat()
+#endif
+#endif
+
+// just an empty macro !
+#define EIGEN_EMPTY
+
+#if defined(_MSC_VER) && (!defined(__INTEL_COMPILER))
+#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+ using Base::operator =;
+#else
+#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+ using Base::operator =; \
+ EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \
+ { \
+ Base::operator=(other); \
+ return *this; \
+ }
+#endif
+
+#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+ EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
+
+/**
+* Just a side note. Commenting within defines works only by documenting
+* behind the object (via '!<'). Comments cannot be multi-line and thus
+* we have these extra long lines. What is confusing doxygen over here is
+* that we use '\' and basically have a bunch of typedefs with their
+* documentation in a single line.
+**/
+
+#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
+ typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
+ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
+ typedef typename Eigen::internal::nested<Derived>::type Nested; \
+ typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::internal::traits<Derived>::Index Index; \
+ enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+ Flags = Eigen::internal::traits<Derived>::Flags, \
+ CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
+
+
+#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
+ typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
+ typedef typename Base::PacketScalar PacketScalar; \
+ typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
+ typedef typename Eigen::internal::nested<Derived>::type Nested; \
+ typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::internal::traits<Derived>::Index Index; \
+ enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+ MaxRowsAtCompileTime = Eigen::internal::traits<Derived>::MaxRowsAtCompileTime, \
+ MaxColsAtCompileTime = Eigen::internal::traits<Derived>::MaxColsAtCompileTime, \
+ Flags = Eigen::internal::traits<Derived>::Flags, \
+ CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+ using Base::derived; \
+ using Base::const_cast_derived;
+
+
+#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
+#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
+
+// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1,
+// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over
+// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3.
+#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
+ : ((int)a == 1 || (int)b == 1) ? 1 \
+ : ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
+ : ((int)a <= (int)b) ? (int)a : (int)b)
+
+// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values
+// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is
+// (between 0 and 3), it is not more than 3.
+#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
+ : ((int)a == 1 || (int)b == 1) ? 1 \
+ : ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \
+ : ((int)a == Dynamic) ? (int)b \
+ : ((int)b == Dynamic) ? (int)a \
+ : ((int)a <= (int)b) ? (int)a : (int)b)
+
+// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here.
+#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
+ : ((int)a >= (int)b) ? (int)a : (int)b)
+
+#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
+
+#define EIGEN_IMPLIES(a,b) (!(a) || (b))
+
+#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \
+ template<typename OtherDerived> \
+ EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
+ (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
+ { \
+ return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
+ }
+
+// the expression type of a cwise product
+#define EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS) \
+ CwiseBinaryOp< \
+ internal::scalar_product_op< \
+ typename internal::traits<LHS>::Scalar, \
+ typename internal::traits<RHS>::Scalar \
+ >, \
+ const LHS, \
+ const RHS \
+ >
+
+#endif // EIGEN_MACROS_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/Memory.h b/extern/Eigen3/Eigen/src/Core/util/Memory.h
new file mode 100644
index 00000000000..a580b95ad0d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/Memory.h
@@ -0,0 +1,911 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+// Copyright (C) 2010 Thomas Capricelli <orzel@freehackers.org>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+
+/*****************************************************************************
+*** Platform checks for aligned malloc functions ***
+*****************************************************************************/
+
+#ifndef EIGEN_MEMORY_H
+#define EIGEN_MEMORY_H
+
+// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:
+// http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html
+// This is true at least since glibc 2.8.
+// This leaves the question how to detect 64-bit. According to this document,
+// http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf
+// page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed
+// quite safe, at least within the context of glibc, to equate 64-bit with LP64.
+#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \
+ && defined(__LP64__)
+ #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1
+#else
+ #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0
+#endif
+
+// FreeBSD 6 seems to have 16-byte aligned malloc
+// See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup
+// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures
+// See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup
+#if defined(__FreeBSD__) && !defined(__arm__) && !defined(__mips__)
+ #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1
+#else
+ #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0
+#endif
+
+#if defined(__APPLE__) \
+ || defined(_WIN64) \
+ || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \
+ || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
+ #define EIGEN_MALLOC_ALREADY_ALIGNED 1
+#else
+ #define EIGEN_MALLOC_ALREADY_ALIGNED 0
+#endif
+
+#if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) \
+ && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
+ #define EIGEN_HAS_POSIX_MEMALIGN 1
+#else
+ #define EIGEN_HAS_POSIX_MEMALIGN 0
+#endif
+
+#ifdef EIGEN_VECTORIZE_SSE
+ #define EIGEN_HAS_MM_MALLOC 1
+#else
+ #define EIGEN_HAS_MM_MALLOC 0
+#endif
+
+namespace internal {
+
+/*****************************************************************************
+*** Implementation of handmade aligned functions ***
+*****************************************************************************/
+
+/* ----- Hand made implementations of aligned malloc/free and realloc ----- */
+
+/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
+ * Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
+ */
+inline void* handmade_aligned_malloc(size_t size)
+{
+ void *original = std::malloc(size+16);
+ if (original == 0) return 0;
+ void *aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(original) & ~(size_t(15))) + 16);
+ *(reinterpret_cast<void**>(aligned) - 1) = original;
+ return aligned;
+}
+
+/** \internal Frees memory allocated with handmade_aligned_malloc */
+inline void handmade_aligned_free(void *ptr)
+{
+ if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
+}
+
+/** \internal
+ * \brief Reallocates aligned memory.
+ * Since we know that our handmade version is based on std::realloc
+ * we can use std::realloc to implement efficient reallocation.
+ */
+inline void* handmade_aligned_realloc(void* ptr, size_t size, size_t = 0)
+{
+ if (ptr == 0) return handmade_aligned_malloc(size);
+ void *original = *(reinterpret_cast<void**>(ptr) - 1);
+ original = std::realloc(original,size+16);
+ if (original == 0) return 0;
+ void *aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(original) & ~(size_t(15))) + 16);
+ *(reinterpret_cast<void**>(aligned) - 1) = original;
+ return aligned;
+}
+
+/*****************************************************************************
+*** Implementation of generic aligned realloc (when no realloc can be used)***
+*****************************************************************************/
+
+void* aligned_malloc(size_t size);
+void aligned_free(void *ptr);
+
+/** \internal
+ * \brief Reallocates aligned memory.
+ * Allows reallocation with aligned ptr types. This implementation will
+ * always create a new memory chunk and copy the old data.
+ */
+inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size)
+{
+ if (ptr==0)
+ return aligned_malloc(size);
+
+ if (size==0)
+ {
+ aligned_free(ptr);
+ return 0;
+ }
+
+ void* newptr = aligned_malloc(size);
+ if (newptr == 0)
+ {
+ #ifdef EIGEN_HAS_ERRNO
+ errno = ENOMEM; // according to the standard
+ #endif
+ return 0;
+ }
+
+ if (ptr != 0)
+ {
+ std::memcpy(newptr, ptr, (std::min)(size,old_size));
+ aligned_free(ptr);
+ }
+
+ return newptr;
+}
+
+/*****************************************************************************
+*** Implementation of portable aligned versions of malloc/free/realloc ***
+*****************************************************************************/
+
+#ifdef EIGEN_NO_MALLOC
+inline void check_that_malloc_is_allowed()
+{
+ eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
+}
+#elif defined EIGEN_RUNTIME_NO_MALLOC
+inline bool is_malloc_allowed_impl(bool update, bool new_value = false)
+{
+ static bool value = true;
+ if (update == 1)
+ value = new_value;
+ return value;
+}
+inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); }
+inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); }
+inline void check_that_malloc_is_allowed()
+{
+ eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)");
+}
+#else
+inline void check_that_malloc_is_allowed()
+{}
+#endif
+
+/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment.
+ * On allocation error, the returned pointer is null, and if exceptions are enabled then a std::bad_alloc is thrown.
+ */
+inline void* aligned_malloc(size_t size)
+{
+ check_that_malloc_is_allowed();
+
+ void *result;
+ #if !EIGEN_ALIGN
+ result = std::malloc(size);
+ #elif EIGEN_MALLOC_ALREADY_ALIGNED
+ result = std::malloc(size);
+ #elif EIGEN_HAS_POSIX_MEMALIGN
+ if(posix_memalign(&result, 16, size)) result = 0;
+ #elif EIGEN_HAS_MM_MALLOC
+ result = _mm_malloc(size, 16);
+ #elif (defined _MSC_VER)
+ result = _aligned_malloc(size, 16);
+ #else
+ result = handmade_aligned_malloc(size);
+ #endif
+
+ #ifdef EIGEN_EXCEPTIONS
+ if(result == 0)
+ throw std::bad_alloc();
+ #endif
+ return result;
+}
+
+/** \internal Frees memory allocated with aligned_malloc. */
+inline void aligned_free(void *ptr)
+{
+ #if !EIGEN_ALIGN
+ std::free(ptr);
+ #elif EIGEN_MALLOC_ALREADY_ALIGNED
+ std::free(ptr);
+ #elif EIGEN_HAS_POSIX_MEMALIGN
+ std::free(ptr);
+ #elif EIGEN_HAS_MM_MALLOC
+ _mm_free(ptr);
+ #elif defined(_MSC_VER)
+ _aligned_free(ptr);
+ #else
+ handmade_aligned_free(ptr);
+ #endif
+}
+
+/**
+* \internal
+* \brief Reallocates an aligned block of memory.
+* \throws std::bad_alloc if EIGEN_EXCEPTIONS are defined.
+**/
+inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
+{
+ EIGEN_UNUSED_VARIABLE(old_size);
+
+ void *result;
+#if !EIGEN_ALIGN
+ result = std::realloc(ptr,new_size);
+#elif EIGEN_MALLOC_ALREADY_ALIGNED
+ result = std::realloc(ptr,new_size);
+#elif EIGEN_HAS_POSIX_MEMALIGN
+ result = generic_aligned_realloc(ptr,new_size,old_size);
+#elif EIGEN_HAS_MM_MALLOC
+ // The defined(_mm_free) is just here to verify that this MSVC version
+ // implements _mm_malloc/_mm_free based on the corresponding _aligned_
+ // functions. This may not always be the case and we just try to be safe.
+ #if defined(_MSC_VER) && defined(_mm_free)
+ result = _aligned_realloc(ptr,new_size,16);
+ #else
+ result = generic_aligned_realloc(ptr,new_size,old_size);
+ #endif
+#elif defined(_MSC_VER)
+ result = _aligned_realloc(ptr,new_size,16);
+#else
+ result = handmade_aligned_realloc(ptr,new_size,old_size);
+#endif
+
+#ifdef EIGEN_EXCEPTIONS
+ if (result==0 && new_size!=0)
+ throw std::bad_alloc();
+#endif
+ return result;
+}
+
+/*****************************************************************************
+*** Implementation of conditionally aligned functions ***
+*****************************************************************************/
+
+/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
+ * On allocation error, the returned pointer is null, and if exceptions are enabled then a std::bad_alloc is thrown.
+ */
+template<bool Align> inline void* conditional_aligned_malloc(size_t size)
+{
+ return aligned_malloc(size);
+}
+
+template<> inline void* conditional_aligned_malloc<false>(size_t size)
+{
+ check_that_malloc_is_allowed();
+
+ void *result = std::malloc(size);
+ #ifdef EIGEN_EXCEPTIONS
+ if(!result) throw std::bad_alloc();
+ #endif
+ return result;
+}
+
+/** \internal Frees memory allocated with conditional_aligned_malloc */
+template<bool Align> inline void conditional_aligned_free(void *ptr)
+{
+ aligned_free(ptr);
+}
+
+template<> inline void conditional_aligned_free<false>(void *ptr)
+{
+ std::free(ptr);
+}
+
+template<bool Align> inline void* conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size)
+{
+ return aligned_realloc(ptr, new_size, old_size);
+}
+
+template<> inline void* conditional_aligned_realloc<false>(void* ptr, size_t new_size, size_t)
+{
+ return std::realloc(ptr, new_size);
+}
+
+/*****************************************************************************
+*** Construction/destruction of array elements ***
+*****************************************************************************/
+
+/** \internal Constructs the elements of an array.
+ * The \a size parameter tells on how many objects to call the constructor of T.
+ */
+template<typename T> inline T* construct_elements_of_array(T *ptr, size_t size)
+{
+ for (size_t i=0; i < size; ++i) ::new (ptr + i) T;
+ return ptr;
+}
+
+/** \internal Destructs the elements of an array.
+ * The \a size parameters tells on how many objects to call the destructor of T.
+ */
+template<typename T> inline void destruct_elements_of_array(T *ptr, size_t size)
+{
+ // always destruct an array starting from the end.
+ if(ptr)
+ while(size) ptr[--size].~T();
+}
+
+/*****************************************************************************
+*** Implementation of aligned new/delete-like functions ***
+*****************************************************************************/
+
+/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
+ * On allocation error, the returned pointer is undefined, but if exceptions are enabled then a std::bad_alloc is thrown.
+ * The default constructor of T is called.
+ */
+template<typename T> inline T* aligned_new(size_t size)
+{
+ T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
+ return construct_elements_of_array(result, size);
+}
+
+template<typename T, bool Align> inline T* conditional_aligned_new(size_t size)
+{
+ T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
+ return construct_elements_of_array(result, size);
+}
+
+/** \internal Deletes objects constructed with aligned_new
+ * The \a size parameters tells on how many objects to call the destructor of T.
+ */
+template<typename T> inline void aligned_delete(T *ptr, size_t size)
+{
+ destruct_elements_of_array<T>(ptr, size);
+ aligned_free(ptr);
+}
+
+/** \internal Deletes objects constructed with conditional_aligned_new
+ * The \a size parameters tells on how many objects to call the destructor of T.
+ */
+template<typename T, bool Align> inline void conditional_aligned_delete(T *ptr, size_t size)
+{
+ destruct_elements_of_array<T>(ptr, size);
+ conditional_aligned_free<Align>(ptr);
+}
+
+template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size)
+{
+ if(new_size < old_size)
+ destruct_elements_of_array(pts+new_size, old_size-new_size);
+ T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
+ if(new_size > old_size)
+ construct_elements_of_array(result+old_size, new_size-old_size);
+ return result;
+}
+
+
+template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size)
+{
+ T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
+ if(NumTraits<T>::RequireInitialization)
+ construct_elements_of_array(result, size);
+ return result;
+}
+
+template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size)
+{
+ if(NumTraits<T>::RequireInitialization && (new_size < old_size))
+ destruct_elements_of_array(pts+new_size, old_size-new_size);
+ T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
+ if(NumTraits<T>::RequireInitialization && (new_size > old_size))
+ construct_elements_of_array(result+old_size, new_size-old_size);
+ return result;
+}
+
+template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *ptr, size_t size)
+{
+ if(NumTraits<T>::RequireInitialization)
+ destruct_elements_of_array<T>(ptr, size);
+ conditional_aligned_free<Align>(ptr);
+}
+
+/****************************************************************************/
+
+/** \internal Returns the index of the first element of the array that is well aligned for vectorization.
+ *
+ * \param array the address of the start of the array
+ * \param size the size of the array
+ *
+ * \note If no element of the array is well aligned, the size of the array is returned. Typically,
+ * for example with SSE, "well aligned" means 16-byte-aligned. If vectorization is disabled or if the
+ * packet size for the given scalar type is 1, then everything is considered well-aligned.
+ *
+ * \note If the scalar type is vectorizable, we rely on the following assumptions: sizeof(Scalar) is a
+ * power of 2, the packet size in bytes is also a power of 2, and is a multiple of sizeof(Scalar). On the
+ * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for
+ * example with Scalar=double on certain 32-bit platforms, see bug #79.
+ *
+ * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
+ */
+template<typename Scalar, typename Index>
+inline static Index first_aligned(const Scalar* array, Index size)
+{
+ typedef typename packet_traits<Scalar>::type Packet;
+ enum { PacketSize = packet_traits<Scalar>::size,
+ PacketAlignedMask = PacketSize-1
+ };
+
+ if(PacketSize==1)
+ {
+ // Either there is no vectorization, or a packet consists of exactly 1 scalar so that all elements
+ // of the array have the same alignment.
+ return 0;
+ }
+ else if(size_t(array) & (sizeof(Scalar)-1))
+ {
+ // There is vectorization for this scalar type, but the array is not aligned to the size of a single scalar.
+ // Consequently, no element of the array is well aligned.
+ return size;
+ }
+ else
+ {
+ return std::min<Index>( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
+ & PacketAlignedMask, size);
+ }
+}
+
+} // end namespace internal
+
+/*****************************************************************************
+*** Implementation of runtime stack allocation (falling back to malloc) ***
+*****************************************************************************/
+
+// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
+// to the appropriate stack allocation function
+#ifndef EIGEN_ALLOCA
+ #if (defined __linux__)
+ #define EIGEN_ALLOCA alloca
+ #elif defined(_MSC_VER)
+ #define EIGEN_ALLOCA _alloca
+ #endif
+#endif
+
+namespace internal {
+
+// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
+// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
+template<typename T> class aligned_stack_memory_handler
+{
+ public:
+ /* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size.
+ * Note that \a ptr can be 0 regardless of the other parameters.
+ * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization).
+ * In this case, the buffer elements will also be destructed when this handler will be destructed.
+ * Finally, if \a dealloc is true, then the pointer \a ptr is freed.
+ **/
+ aligned_stack_memory_handler(T* ptr, size_t size, bool dealloc)
+ : m_ptr(ptr), m_size(size), m_deallocate(dealloc)
+ {
+ if(NumTraits<T>::RequireInitialization && m_ptr)
+ Eigen::internal::construct_elements_of_array(m_ptr, size);
+ }
+ ~aligned_stack_memory_handler()
+ {
+ if(NumTraits<T>::RequireInitialization && m_ptr)
+ Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size);
+ if(m_deallocate)
+ Eigen::internal::aligned_free(m_ptr);
+ }
+ protected:
+ T* m_ptr;
+ size_t m_size;
+ bool m_deallocate;
+};
+
+}
+
+/** \internal
+ * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
+ * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
+ * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.
+ * The allocated buffer is automatically deleted when exiting the scope of this declaration.
+ * If BUFFER is non nul, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.
+ * Here is an example:
+ * \code
+ * {
+ * ei_declare_aligned_stack_constructed_variable(float,data,size,0);
+ * // use data[0] to data[size-1]
+ * }
+ * \endcode
+ * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
+ */
+#ifdef EIGEN_ALLOCA
+
+ #ifdef __arm__
+ #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16)
+ #else
+ #define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
+ #endif
+
+ #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
+ TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \
+ : reinterpret_cast<TYPE*>( \
+ (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \
+ : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \
+ Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
+
+#else
+
+ #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
+ TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \
+ Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
+
+#endif
+
+
+/*****************************************************************************
+*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF] ***
+*****************************************************************************/
+
+#if EIGEN_ALIGN
+ #ifdef EIGEN_EXCEPTIONS
+ #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+ void* operator new(size_t size, const std::nothrow_t&) throw() { \
+ try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
+ catch (...) { return 0; } \
+ return 0; \
+ }
+ #else
+ #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+ void* operator new(size_t size, const std::nothrow_t&) throw() { \
+ return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+ }
+ #endif
+
+ #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
+ void *operator new(size_t size) { \
+ return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+ } \
+ void *operator new[](size_t size) { \
+ return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+ } \
+ void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+ void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+ /* in-place new and delete. since (at least afaik) there is no actual */ \
+ /* memory allocated we can safely let the default implementation handle */ \
+ /* this particular case. */ \
+ static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \
+ void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \
+ /* nothrow-new (returns zero instead of std::bad_alloc) */ \
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+ void operator delete(void *ptr, const std::nothrow_t&) throw() { \
+ Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \
+ } \
+ typedef void eigen_aligned_operator_new_marker_type;
+#else
+ #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+#endif
+
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0))
+
+/****************************************************************************/
+
+/** \class aligned_allocator
+* \ingroup Core_Module
+*
+* \brief STL compatible allocator to use with with 16 byte aligned types
+*
+* Example:
+* \code
+* // Matrix4f requires 16 bytes alignment:
+* std::map< int, Matrix4f, std::less<int>,
+* aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
+* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
+* std::map< int, Vector3f > my_map_vec3;
+* \endcode
+*
+* \sa \ref TopicStlContainers.
+*/
+template<class T>
+class aligned_allocator
+{
+public:
+ typedef size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+
+ template<class U>
+ struct rebind
+ {
+ typedef aligned_allocator<U> other;
+ };
+
+ pointer address( reference value ) const
+ {
+ return &value;
+ }
+
+ const_pointer address( const_reference value ) const
+ {
+ return &value;
+ }
+
+ aligned_allocator() throw()
+ {
+ }
+
+ aligned_allocator( const aligned_allocator& ) throw()
+ {
+ }
+
+ template<class U>
+ aligned_allocator( const aligned_allocator<U>& ) throw()
+ {
+ }
+
+ ~aligned_allocator() throw()
+ {
+ }
+
+ size_type max_size() const throw()
+ {
+ return (std::numeric_limits<size_type>::max)();
+ }
+
+ pointer allocate( size_type num, const void* hint = 0 )
+ {
+ EIGEN_UNUSED_VARIABLE(hint);
+ return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) );
+ }
+
+ void construct( pointer p, const T& value )
+ {
+ ::new( p ) T( value );
+ }
+
+ void destroy( pointer p )
+ {
+ p->~T();
+ }
+
+ void deallocate( pointer p, size_type /*num*/ )
+ {
+ internal::aligned_free( p );
+ }
+
+ bool operator!=(const aligned_allocator<T>& ) const
+ { return false; }
+
+ bool operator==(const aligned_allocator<T>& ) const
+ { return true; }
+};
+
+//---------- Cache sizes ----------
+
+#if defined(__GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
+# if defined(__PIC__) && defined(__i386__)
+ // Case for x86 with PIC
+# define EIGEN_CPUID(abcd,func,id) \
+ __asm__ __volatile__ ("xchgl %%ebx, %%esi;cpuid; xchgl %%ebx,%%esi": "=a" (abcd[0]), "=S" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
+# else
+ // Case for x86_64 or x86 w/o PIC
+# define EIGEN_CPUID(abcd,func,id) \
+ __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id) );
+# endif
+#elif defined(_MSC_VER)
+# if (_MSC_VER > 1500)
+# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
+# endif
+#endif
+
+namespace internal {
+
+#ifdef EIGEN_CPUID
+
+inline bool cpuid_is_vendor(int abcd[4], const char* vendor)
+{
+ return abcd[1]==((int*)(vendor))[0] && abcd[3]==((int*)(vendor))[1] && abcd[2]==((int*)(vendor))[2];
+}
+
+inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
+{
+ int abcd[4];
+ l1 = l2 = l3 = 0;
+ int cache_id = 0;
+ int cache_type = 0;
+ do {
+ abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+ EIGEN_CPUID(abcd,0x4,cache_id);
+ cache_type = (abcd[0] & 0x0F) >> 0;
+ if(cache_type==1||cache_type==3) // data or unified cache
+ {
+ int cache_level = (abcd[0] & 0xE0) >> 5; // A[7:5]
+ int ways = (abcd[1] & 0xFFC00000) >> 22; // B[31:22]
+ int partitions = (abcd[1] & 0x003FF000) >> 12; // B[21:12]
+ int line_size = (abcd[1] & 0x00000FFF) >> 0; // B[11:0]
+ int sets = (abcd[2]); // C[31:0]
+
+ int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1);
+
+ switch(cache_level)
+ {
+ case 1: l1 = cache_size; break;
+ case 2: l2 = cache_size; break;
+ case 3: l3 = cache_size; break;
+ default: break;
+ }
+ }
+ cache_id++;
+ } while(cache_type>0 && cache_id<16);
+}
+
+inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3)
+{
+ int abcd[4];
+ abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+ l1 = l2 = l3 = 0;
+ EIGEN_CPUID(abcd,0x00000002,0);
+ unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2;
+ bool check_for_p2_core2 = false;
+ for(int i=0; i<14; ++i)
+ {
+ switch(bytes[i])
+ {
+ case 0x0A: l1 = 8; break; // 0Ah data L1 cache, 8 KB, 2 ways, 32 byte lines
+ case 0x0C: l1 = 16; break; // 0Ch data L1 cache, 16 KB, 4 ways, 32 byte lines
+ case 0x0E: l1 = 24; break; // 0Eh data L1 cache, 24 KB, 6 ways, 64 byte lines
+ case 0x10: l1 = 16; break; // 10h data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
+ case 0x15: l1 = 16; break; // 15h code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
+ case 0x2C: l1 = 32; break; // 2Ch data L1 cache, 32 KB, 8 ways, 64 byte lines
+ case 0x30: l1 = 32; break; // 30h code L1 cache, 32 KB, 8 ways, 64 byte lines
+ case 0x60: l1 = 16; break; // 60h data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored
+ case 0x66: l1 = 8; break; // 66h data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored
+ case 0x67: l1 = 16; break; // 67h data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored
+ case 0x68: l1 = 32; break; // 68h data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored
+ case 0x1A: l2 = 96; break; // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64)
+ case 0x22: l3 = 512; break; // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored
+ case 0x23: l3 = 1024; break; // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x25: l3 = 2048; break; // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x29: l3 = 4096; break; // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x39: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored
+ case 0x3A: l2 = 192; break; // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored
+ case 0x3B: l2 = 128; break; // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored
+ case 0x3C: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored
+ case 0x3D: l2 = 384; break; // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored
+ case 0x3E: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored
+ case 0x40: l2 = 0; break; // no integrated L2 cache (P6 core) or L3 cache (P4 core)
+ case 0x41: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 32 byte lines
+ case 0x42: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 32 byte lines
+ case 0x43: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 32 byte lines
+ case 0x44: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines
+ case 0x45: l2 = 2048; break; // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines
+ case 0x46: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines
+ case 0x47: l3 = 8192; break; // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines
+ case 0x48: l2 = 3072; break; // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines
+ case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2
+ case 0x4A: l3 = 6144; break; // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines
+ case 0x4B: l3 = 8192; break; // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines
+ case 0x4C: l3 = 12288; break; // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines
+ case 0x4D: l3 = 16384; break; // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines
+ case 0x4E: l2 = 6144; break; // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines
+ case 0x78: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines
+ case 0x79: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x7A: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x7B: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x7C: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
+ case 0x7D: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines
+ case 0x7E: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64)
+ case 0x7F: l2 = 512; break; // code and data L2 cache, 512 KB, 2 ways, 64 byte lines
+ case 0x80: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines
+ case 0x81: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 32 byte lines
+ case 0x82: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 32 byte lines
+ case 0x83: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 32 byte lines
+ case 0x84: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines
+ case 0x85: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines
+ case 0x86: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines
+ case 0x87: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines
+ case 0x88: l3 = 2048; break; // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64)
+ case 0x89: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64)
+ case 0x8A: l3 = 8192; break; // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64)
+ case 0x8D: l3 = 3072; break; // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64)
+
+ default: break;
+ }
+ }
+ if(check_for_p2_core2 && l2 == l3)
+ l3 = 0;
+ l1 *= 1024;
+ l2 *= 1024;
+ l3 *= 1024;
+}
+
+inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs)
+{
+ if(max_std_funcs>=4)
+ queryCacheSizes_intel_direct(l1,l2,l3);
+ else
+ queryCacheSizes_intel_codes(l1,l2,l3);
+}
+
+inline void queryCacheSizes_amd(int& l1, int& l2, int& l3)
+{
+ int abcd[4];
+ abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+ EIGEN_CPUID(abcd,0x80000005,0);
+ l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB
+ abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+ EIGEN_CPUID(abcd,0x80000006,0);
+ l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB
+ l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB
+}
+#endif
+
+/** \internal
+ * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */
+inline void queryCacheSizes(int& l1, int& l2, int& l3)
+{
+ #ifdef EIGEN_CPUID
+ int abcd[4];
+
+ // identify the CPU vendor
+ EIGEN_CPUID(abcd,0x0,0);
+ int max_std_funcs = abcd[1];
+ if(cpuid_is_vendor(abcd,"GenuineIntel"))
+ queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
+ else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!"))
+ queryCacheSizes_amd(l1,l2,l3);
+ else
+ // by default let's use Intel's API
+ queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
+
+ // here is the list of other vendors:
+// ||cpuid_is_vendor(abcd,"VIA VIA VIA ")
+// ||cpuid_is_vendor(abcd,"CyrixInstead")
+// ||cpuid_is_vendor(abcd,"CentaurHauls")
+// ||cpuid_is_vendor(abcd,"GenuineTMx86")
+// ||cpuid_is_vendor(abcd,"TransmetaCPU")
+// ||cpuid_is_vendor(abcd,"RiseRiseRise")
+// ||cpuid_is_vendor(abcd,"Geode by NSC")
+// ||cpuid_is_vendor(abcd,"SiS SiS SiS ")
+// ||cpuid_is_vendor(abcd,"UMC UMC UMC ")
+// ||cpuid_is_vendor(abcd,"NexGenDriven")
+ #else
+ l1 = l2 = l3 = -1;
+ #endif
+}
+
+/** \internal
+ * \returns the size in Bytes of the L1 data cache */
+inline int queryL1CacheSize()
+{
+ int l1(-1), l2, l3;
+ queryCacheSizes(l1,l2,l3);
+ return l1;
+}
+
+/** \internal
+ * \returns the size in Bytes of the L2 or L3 cache if this later is present */
+inline int queryTopLevelCacheSize()
+{
+ int l1, l2(-1), l3(-1);
+ queryCacheSizes(l1,l2,l3);
+ return (std::max)(l2,l3);
+}
+
+} // end namespace internal
+
+#endif // EIGEN_MEMORY_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/Meta.h b/extern/Eigen3/Eigen/src/Core/util/Meta.h
new file mode 100644
index 00000000000..4518261efef
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/Meta.h
@@ -0,0 +1,229 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_META_H
+#define EIGEN_META_H
+
+namespace internal {
+
+/** \internal
+ * \file Meta.h
+ * This file contains generic metaprogramming classes which are not specifically related to Eigen.
+ * \note In case you wonder, yes we're aware that Boost already provides all these features,
+ * we however don't want to add a dependency to Boost.
+ */
+
+struct true_type { enum { value = 1 }; };
+struct false_type { enum { value = 0 }; };
+
+template<bool Condition, typename Then, typename Else>
+struct conditional { typedef Then type; };
+
+template<typename Then, typename Else>
+struct conditional <false, Then, Else> { typedef Else type; };
+
+template<typename T, typename U> struct is_same { enum { value = 0 }; };
+template<typename T> struct is_same<T,T> { enum { value = 1 }; };
+
+template<typename T> struct remove_reference { typedef T type; };
+template<typename T> struct remove_reference<T&> { typedef T type; };
+
+template<typename T> struct remove_pointer { typedef T type; };
+template<typename T> struct remove_pointer<T*> { typedef T type; };
+template<typename T> struct remove_pointer<T*const> { typedef T type; };
+
+template <class T> struct remove_const { typedef T type; };
+template <class T> struct remove_const<const T> { typedef T type; };
+template <class T> struct remove_const<const T[]> { typedef T type[]; };
+template <class T, unsigned int Size> struct remove_const<const T[Size]> { typedef T type[Size]; };
+
+template<typename T> struct remove_all { typedef T type; };
+template<typename T> struct remove_all<const T> { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T const&> { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T&> { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T const*> { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T*> { typedef typename remove_all<T>::type type; };
+
+template<typename T> struct is_arithmetic { enum { value = false }; };
+template<> struct is_arithmetic<float> { enum { value = true }; };
+template<> struct is_arithmetic<double> { enum { value = true }; };
+template<> struct is_arithmetic<long double> { enum { value = true }; };
+template<> struct is_arithmetic<bool> { enum { value = true }; };
+template<> struct is_arithmetic<char> { enum { value = true }; };
+template<> struct is_arithmetic<signed char> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned char> { enum { value = true }; };
+template<> struct is_arithmetic<signed short> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned short>{ enum { value = true }; };
+template<> struct is_arithmetic<signed int> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned int> { enum { value = true }; };
+template<> struct is_arithmetic<signed long> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned long> { enum { value = true }; };
+template<> struct is_arithmetic<signed long long> { enum { value = true }; };
+template<> struct is_arithmetic<unsigned long long> { enum { value = true }; };
+
+template <typename T> struct add_const { typedef const T type; };
+template <typename T> struct add_const<T&> { typedef T& type; };
+
+template <typename T> struct is_const { enum { value = 0 }; };
+template <typename T> struct is_const<T const> { enum { value = 1 }; };
+
+template<typename T> struct add_const_on_value_type { typedef const T type; };
+template<typename T> struct add_const_on_value_type<T&> { typedef T const& type; };
+template<typename T> struct add_const_on_value_type<T*> { typedef T const* type; };
+template<typename T> struct add_const_on_value_type<T* const> { typedef T const* const type; };
+template<typename T> struct add_const_on_value_type<T const* const> { typedef T const* const type; };
+
+/** \internal Allows to enable/disable an overload
+ * according to a compile time condition.
+ */
+template<bool Condition, typename T> struct enable_if;
+
+template<typename T> struct enable_if<true,T>
+{ typedef T type; };
+
+/** \internal
+ * Convenient struct to get the result type of a unary or binary functor.
+ *
+ * It supports both the current STL mechanism (using the result_type member) as well as
+ * upcoming next STL generation (using a templated result member).
+ * If none of these members is provided, then the type of the first argument is returned. FIXME, that behavior is a pretty bad hack.
+ */
+template<typename T> struct result_of {};
+
+struct has_none {int a[1];};
+struct has_std_result_type {int a[2];};
+struct has_tr1_result {int a[3];};
+
+template<typename Func, typename ArgType, int SizeOf=sizeof(has_none)>
+struct unary_result_of_select {typedef ArgType type;};
+
+template<typename Func, typename ArgType>
+struct unary_result_of_select<Func, ArgType, sizeof(has_std_result_type)> {typedef typename Func::result_type type;};
+
+template<typename Func, typename ArgType>
+struct unary_result_of_select<Func, ArgType, sizeof(has_tr1_result)> {typedef typename Func::template result<Func(ArgType)>::type type;};
+
+template<typename Func, typename ArgType>
+struct result_of<Func(ArgType)> {
+ template<typename T>
+ static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
+ template<typename T>
+ static has_tr1_result testFunctor(T const *, typename T::template result<T(ArgType)>::type const * = 0);
+ static has_none testFunctor(...);
+
+ // note that the following indirection is needed for gcc-3.3
+ enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
+ typedef typename unary_result_of_select<Func, ArgType, FunctorType>::type type;
+};
+
+template<typename Func, typename ArgType0, typename ArgType1, int SizeOf=sizeof(has_none)>
+struct binary_result_of_select {typedef ArgType0 type;};
+
+template<typename Func, typename ArgType0, typename ArgType1>
+struct binary_result_of_select<Func, ArgType0, ArgType1, sizeof(has_std_result_type)>
+{typedef typename Func::result_type type;};
+
+template<typename Func, typename ArgType0, typename ArgType1>
+struct binary_result_of_select<Func, ArgType0, ArgType1, sizeof(has_tr1_result)>
+{typedef typename Func::template result<Func(ArgType0,ArgType1)>::type type;};
+
+template<typename Func, typename ArgType0, typename ArgType1>
+struct result_of<Func(ArgType0,ArgType1)> {
+ template<typename T>
+ static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
+ template<typename T>
+ static has_tr1_result testFunctor(T const *, typename T::template result<T(ArgType0,ArgType1)>::type const * = 0);
+ static has_none testFunctor(...);
+
+ // note that the following indirection is needed for gcc-3.3
+ enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
+ typedef typename binary_result_of_select<Func, ArgType0, ArgType1, FunctorType>::type type;
+};
+
+/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer.
+ * Usage example: \code meta_sqrt<1023>::ret \endcode
+ */
+template<int Y,
+ int InfX = 0,
+ int SupX = ((Y==1) ? 1 : Y/2),
+ bool Done = ((SupX-InfX)<=1 ? true : ((SupX*SupX <= Y) && ((SupX+1)*(SupX+1) > Y))) >
+ // use ?: instead of || just to shut up a stupid gcc 4.3 warning
+class meta_sqrt
+{
+ enum {
+ MidX = (InfX+SupX)/2,
+ TakeInf = MidX*MidX > Y ? 1 : 0,
+ NewInf = int(TakeInf) ? InfX : int(MidX),
+ NewSup = int(TakeInf) ? int(MidX) : SupX
+ };
+ public:
+ enum { ret = meta_sqrt<Y,NewInf,NewSup>::ret };
+};
+
+template<int Y, int InfX, int SupX>
+class meta_sqrt<Y, InfX, SupX, true> { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; };
+
+/** \internal determines whether the product of two numeric types is allowed and what the return type is */
+template<typename T, typename U> struct scalar_product_traits;
+
+template<typename T> struct scalar_product_traits<T,T>
+{
+ //enum { Cost = NumTraits<T>::MulCost };
+ typedef T ReturnType;
+};
+
+template<typename T> struct scalar_product_traits<T,std::complex<T> >
+{
+ //enum { Cost = 2*NumTraits<T>::MulCost };
+ typedef std::complex<T> ReturnType;
+};
+
+template<typename T> struct scalar_product_traits<std::complex<T>, T>
+{
+ //enum { Cost = 2*NumTraits<T>::MulCost };
+ typedef std::complex<T> ReturnType;
+};
+
+// FIXME quick workaround around current limitation of result_of
+// template<typename Scalar, typename ArgType0, typename ArgType1>
+// struct result_of<scalar_product_op<Scalar>(ArgType0,ArgType1)> {
+// typedef typename scalar_product_traits<typename remove_all<ArgType0>::type, typename remove_all<ArgType1>::type>::ReturnType type;
+// };
+
+template<typename T> struct is_diagonal
+{ enum { ret = false }; };
+
+template<typename T> struct is_diagonal<DiagonalBase<T> >
+{ enum { ret = true }; };
+
+template<typename T> struct is_diagonal<DiagonalWrapper<T> >
+{ enum { ret = true }; };
+
+template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
+{ enum { ret = true }; };
+
+} // end namespace internal
+
+#endif // EIGEN_META_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h b/extern/Eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h
new file mode 100644
index 00000000000..5ddfbd4aa68
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/ReenableStupidWarnings.h
@@ -0,0 +1,14 @@
+#ifdef EIGEN_WARNINGS_DISABLED
+#undef EIGEN_WARNINGS_DISABLED
+
+#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+ #ifdef _MSC_VER
+ #pragma warning( pop )
+ #elif defined __INTEL_COMPILER
+ #pragma warning pop
+ #elif defined __clang__
+ #pragma clang diagnostic pop
+ #endif
+#endif
+
+#endif // EIGEN_WARNINGS_DISABLED
diff --git a/extern/Eigen2/Eigen/src/Core/util/StaticAssert.h b/extern/Eigen3/Eigen/src/Core/util/StaticAssert.h
index 2c13098a20f..99c7c9972f0 100644
--- a/extern/Eigen2/Eigen/src/Core/util/StaticAssert.h
+++ b/extern/Eigen3/Eigen/src/Core/util/StaticAssert.h
@@ -1,8 +1,8 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -29,11 +29,11 @@
/* Some notes on Eigen's static assertion mechanism:
*
* - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean
- * expression, and MSG an enum listed in struct ei_static_assert<true>
+ * expression, and MSG an enum listed in struct internal::static_assertion<true>
*
* - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time)
* in that case, the static assertion is converted to the following runtime assert:
- * ei_assert(CONDITION && "MSG")
+ * eigen_assert(CONDITION && "MSG")
*
* - currently EIGEN_STATIC_ASSERT can only be used in function scope
*
@@ -41,18 +41,20 @@
#ifndef EIGEN_NO_STATIC_ASSERT
- #ifdef __GXX_EXPERIMENTAL_CXX0X__
+ #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && (_MSC_VER >= 1600))
// if native static_assert is enabled, let's use it
#define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);
- #else // CXX0X
+ #else // not CXX0X
+
+ namespace internal {
template<bool condition>
- struct ei_static_assert {};
+ struct static_assertion {};
template<>
- struct ei_static_assert<true>
+ struct static_assertion<true>
{
enum {
YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX,
@@ -60,10 +62,15 @@
YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES,
THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE,
THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE,
+ THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE,
YOU_MADE_A_PROGRAMMING_MISTAKE,
+ EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT,
+ EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE,
YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR,
+ YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR,
UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC,
- NUMERIC_TYPE_MUST_BE_FLOATING_POINT,
+ THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES,
+ NUMERIC_TYPE_MUST_BE_REAL,
COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED,
WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED,
THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE,
@@ -74,23 +81,38 @@
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES,
THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES,
INVALID_MATRIX_TEMPLATE_PARAMETERS,
+ INVALID_MATRIXBASE_TEMPLATE_PARAMETERS,
BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER,
- THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX
+ THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX,
+ THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE,
+ THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES,
+ YOU_ALREADY_SPECIFIED_THIS_STRIDE,
+ INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION,
+ THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD,
+ PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1,
+ THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS,
+ YOU_CANNOT_MIX_ARRAYS_AND_MATRICES,
+ YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION,
+ THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY,
+ YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT,
+ THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS
};
};
+ } // end namespace internal
+
// Specialized implementation for MSVC to avoid "conditional
// expression is constant" warnings. This implementation doesn't
// appear to work under GCC, hence the multiple implementations.
#ifdef _MSC_VER
#define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
- {Eigen::ei_static_assert<CONDITION ? true : false>::MSG;}
+ {Eigen::internal::static_assertion<bool(CONDITION)>::MSG;}
#else
#define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
- if (Eigen::ei_static_assert<CONDITION ? true : false>::MSG) {}
+ if (Eigen::internal::static_assertion<bool(CONDITION)>::MSG) {}
#endif
@@ -98,7 +120,7 @@
#else // EIGEN_NO_STATIC_ASSERT
- #define EIGEN_STATIC_ASSERT(CONDITION,MSG) ei_assert((CONDITION) && #MSG);
+ #define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);
#endif // EIGEN_NO_STATIC_ASSERT
@@ -113,6 +135,11 @@
EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \
YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR)
+// static assertion failing if the type \a TYPE is not dynamic-size
+#define EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) \
+ EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime==Eigen::Dynamic, \
+ YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR)
+
// static assertion failing if the type \a TYPE is not a vector type of the given size
#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \
EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \
@@ -132,12 +159,26 @@
YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES)
#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
- ((int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \
- || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \
- || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \
- && (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \
- || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \
- || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime)))
+ ( \
+ (int(TYPE0::SizeAtCompileTime)==0 && int(TYPE1::SizeAtCompileTime)==0) \
+ || (\
+ (int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \
+ || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \
+ || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \
+ && (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \
+ || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \
+ || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))\
+ ) \
+ )
+
+#ifdef EIGEN2_SUPPORT
+ #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
+ eigen_assert(!NumTraits<Scalar>::IsInteger);
+#else
+ #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
+ EIGEN_STATIC_ASSERT(!NumTraits<TYPE>::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
+#endif
+
// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes
#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
@@ -145,4 +186,13 @@
EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\
YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES)
+#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \
+ EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Dynamic) && \
+ (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Dynamic), \
+ THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS)
+
+#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \
+ EIGEN_STATIC_ASSERT(internal::is_lvalue<Derived>::value, \
+ THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY)
+
#endif // EIGEN_STATIC_ASSERT_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/XprHelper.h b/extern/Eigen3/Eigen/src/Core/util/XprHelper.h
new file mode 100644
index 00000000000..9047c5f8350
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Core/util/XprHelper.h
@@ -0,0 +1,460 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_XPRHELPER_H
+#define EIGEN_XPRHELPER_H
+
+// just a workaround because GCC seems to not really like empty structs
+// FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
+// so currently we simply disable this optimization for gcc 4.3
+#if (defined __GNUG__) && !((__GNUC__==4) && (__GNUC_MINOR__==3))
+ #define EIGEN_EMPTY_STRUCT_CTOR(X) \
+ EIGEN_STRONG_INLINE X() {} \
+ EIGEN_STRONG_INLINE X(const X& ) {}
+#else
+ #define EIGEN_EMPTY_STRUCT_CTOR(X)
+#endif
+
+typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex;
+
+namespace internal {
+
+//classes inheriting no_assignment_operator don't generate a default operator=.
+class no_assignment_operator
+{
+ private:
+ no_assignment_operator& operator=(const no_assignment_operator&);
+};
+
+/** \internal return the index type with the largest number of bits */
+template<typename I1, typename I2>
+struct promote_index_type
+{
+ typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
+};
+
+/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
+ * can be accessed using value() and setValue().
+ * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
+ */
+template<typename T, int Value> class variable_if_dynamic
+{
+ public:
+ EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic)
+ explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); assert(v == T(Value)); }
+ static T value() { return T(Value); }
+ void setValue(T) {}
+};
+
+template<typename T> class variable_if_dynamic<T, Dynamic>
+{
+ T m_value;
+ variable_if_dynamic() { assert(false); }
+ public:
+ explicit variable_if_dynamic(T value) : m_value(value) {}
+ T value() const { return m_value; }
+ void setValue(T value) { m_value = value; }
+};
+
+template<typename T> struct functor_traits
+{
+ enum
+ {
+ Cost = 10,
+ PacketAccess = false
+ };
+};
+
+template<typename T> struct packet_traits;
+
+template<typename T> struct unpacket_traits
+{
+ typedef T type;
+ enum {size=1};
+};
+
+template<typename _Scalar, int _Rows, int _Cols,
+ int _Options = AutoAlign |
+ ( (_Rows==1 && _Cols!=1) ? RowMajor
+ : (_Cols==1 && _Rows!=1) ? ColMajor
+ : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+ int _MaxRows = _Rows,
+ int _MaxCols = _Cols
+> class make_proper_matrix_type
+{
+ enum {
+ IsColVector = _Cols==1 && _Rows!=1,
+ IsRowVector = _Rows==1 && _Cols!=1,
+ Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
+ : IsRowVector ? (_Options | RowMajor) & ~ColMajor
+ : _Options
+ };
+ public:
+ typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+class compute_matrix_flags
+{
+ enum {
+ row_major_bit = Options&RowMajor ? RowMajorBit : 0,
+ is_dynamic_size_storage = MaxRows==Dynamic || MaxCols==Dynamic,
+
+ aligned_bit =
+ (
+ ((Options&DontAlign)==0)
+ && packet_traits<Scalar>::Vectorizable
+ && (
+#if EIGEN_ALIGN_STATICALLY
+ ((!is_dynamic_size_storage) && (((MaxCols*MaxRows) % packet_traits<Scalar>::size) == 0))
+#else
+ 0
+#endif
+
+ ||
+
+#if EIGEN_ALIGN
+ is_dynamic_size_storage
+#else
+ 0
+#endif
+
+ )
+ ) ? AlignedBit : 0,
+ packet_access_bit = packet_traits<Scalar>::Vectorizable && aligned_bit ? PacketAccessBit : 0
+ };
+
+ public:
+ enum { ret = LinearAccessBit | LvalueBit | DirectAccessBit | NestByRefBit | packet_access_bit | row_major_bit | aligned_bit };
+};
+
+template<int _Rows, int _Cols> struct size_at_compile_time
+{
+ enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
+};
+
+/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
+ * whereas eval is a const reference in the case of a matrix
+ */
+
+template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
+template<typename T, typename BaseClassType> struct plain_matrix_type_dense;
+template<typename T> struct plain_matrix_type<T,Dense>
+{
+ typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind>::type type;
+};
+
+template<typename T> struct plain_matrix_type_dense<T,MatrixXpr>
+{
+ typedef Matrix<typename traits<T>::Scalar,
+ traits<T>::RowsAtCompileTime,
+ traits<T>::ColsAtCompileTime,
+ AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+ traits<T>::MaxRowsAtCompileTime,
+ traits<T>::MaxColsAtCompileTime
+ > type;
+};
+
+template<typename T> struct plain_matrix_type_dense<T,ArrayXpr>
+{
+ typedef Array<typename traits<T>::Scalar,
+ traits<T>::RowsAtCompileTime,
+ traits<T>::ColsAtCompileTime,
+ AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+ traits<T>::MaxRowsAtCompileTime,
+ traits<T>::MaxColsAtCompileTime
+ > type;
+};
+
+/* eval : the return type of eval(). For matrices, this is just a const reference
+ * in order to avoid a useless copy
+ */
+
+template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
+
+template<typename T> struct eval<T,Dense>
+{
+ typedef typename plain_matrix_type<T>::type type;
+// typedef typename T::PlainObject type;
+// typedef T::Matrix<typename traits<T>::Scalar,
+// traits<T>::RowsAtCompileTime,
+// traits<T>::ColsAtCompileTime,
+// AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+// traits<T>::MaxRowsAtCompileTime,
+// traits<T>::MaxColsAtCompileTime
+// > type;
+};
+
+// for matrices, no need to evaluate, just use a const reference to avoid a useless copy
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
+{
+ typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
+};
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
+{
+ typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
+};
+
+
+
+/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
+ */
+template<typename T> struct plain_matrix_type_column_major
+{
+ enum { Rows = traits<T>::RowsAtCompileTime,
+ Cols = traits<T>::ColsAtCompileTime,
+ MaxRows = traits<T>::MaxRowsAtCompileTime,
+ MaxCols = traits<T>::MaxColsAtCompileTime
+ };
+ typedef Matrix<typename traits<T>::Scalar,
+ Rows,
+ Cols,
+ (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
+ MaxRows,
+ MaxCols
+ > type;
+};
+
+/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
+ */
+template<typename T> struct plain_matrix_type_row_major
+{
+ enum { Rows = traits<T>::RowsAtCompileTime,
+ Cols = traits<T>::ColsAtCompileTime,
+ MaxRows = traits<T>::MaxRowsAtCompileTime,
+ MaxCols = traits<T>::MaxColsAtCompileTime
+ };
+ typedef Matrix<typename traits<T>::Scalar,
+ Rows,
+ Cols,
+ (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
+ MaxRows,
+ MaxCols
+ > type;
+};
+
+// we should be able to get rid of this one too
+template<typename T> struct must_nest_by_value { enum { ret = false }; };
+
+template<class T>
+struct is_reference
+{
+ enum { ret = false };
+};
+
+template<class T>
+struct is_reference<T&>
+{
+ enum { ret = true };
+};
+
+/**
+* \internal The reference selector for template expressions. The idea is that we don't
+* need to use references for expressions since they are light weight proxy
+* objects which should generate no copying overhead.
+**/
+template <typename T>
+struct ref_selector
+{
+ typedef typename conditional<
+ bool(traits<T>::Flags & NestByRefBit),
+ T const&,
+ T
+ >::type type;
+};
+
+/** \internal Determines how a given expression should be nested into another one.
+ * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
+ * nested into the bigger product expression. The choice is between nesting the expression b+c as-is, or
+ * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
+ * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
+ * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
+ *
+ * \param T the type of the expression being nested
+ * \param n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
+ *
+ * Example. Suppose that a, b, and c are of type Matrix3d. The user forms the expression a*(b+c).
+ * b+c is an expression "sum of matrices", which we will denote by S. In order to determine how to nest it,
+ * the Product expression uses: nested<S, 3>::ret, which turns out to be Matrix3d because the internal logic of
+ * nested determined that in this case it was better to evaluate the expression b+c into a temporary. On the other hand,
+ * since a is of type Matrix3d, the Product expression nests it as nested<Matrix3d, 3>::ret, which turns out to be
+ * const Matrix3d&, because the internal logic of nested determined that since a was already a matrix, there was no point
+ * in copying it into another matrix.
+ */
+template<typename T, int n=1, typename PlainObject = typename eval<T>::type> struct nested
+{
+ enum {
+ // for the purpose of this test, to keep it reasonably simple, we arbitrarily choose a value of Dynamic values.
+ // the choice of 10000 makes it larger than any practical fixed value and even most dynamic values.
+ // in extreme cases where these assumptions would be wrong, we would still at worst suffer performance issues
+ // (poor choice of temporaries).
+ // it's important that this value can still be squared without integer overflowing.
+ DynamicAsInteger = 10000,
+ ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
+ ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? DynamicAsInteger : ScalarReadCost,
+ CoeffReadCost = traits<T>::CoeffReadCost,
+ CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? DynamicAsInteger : CoeffReadCost,
+ NAsInteger = n == Dynamic ? int(DynamicAsInteger) : n,
+ CostEvalAsInteger = (NAsInteger+1) * ScalarReadCostAsInteger + CoeffReadCostAsInteger,
+ CostNoEvalAsInteger = NAsInteger * CoeffReadCostAsInteger
+ };
+
+ typedef typename conditional<
+ ( (int(traits<T>::Flags) & EvalBeforeNestingBit) ||
+ int(CostEvalAsInteger) < int(CostNoEvalAsInteger)
+ ),
+ PlainObject,
+ typename ref_selector<T>::type
+ >::type type;
+};
+
+template<typename T>
+T* const_cast_ptr(const T* ptr)
+{
+ return const_cast<T*>(ptr);
+}
+
+template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
+struct dense_xpr_base
+{
+ /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
+};
+
+template<typename Derived>
+struct dense_xpr_base<Derived, MatrixXpr>
+{
+ typedef MatrixBase<Derived> type;
+};
+
+template<typename Derived>
+struct dense_xpr_base<Derived, ArrayXpr>
+{
+ typedef ArrayBase<Derived> type;
+};
+
+/** \internal Helper base class to add a scalar multiple operator
+ * overloads for complex types */
+template<typename Derived,typename Scalar,typename OtherScalar,
+ bool EnableIt = !is_same<Scalar,OtherScalar>::value >
+struct special_scalar_op_base : public DenseCoeffsBase<Derived>
+{
+ // dummy operator* so that the
+ // "using special_scalar_op_base::operator*" compiles
+ void operator*() const;
+};
+
+template<typename Derived,typename Scalar,typename OtherScalar>
+struct special_scalar_op_base<Derived,Scalar,OtherScalar,true> : public DenseCoeffsBase<Derived>
+{
+ const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+ operator*(const OtherScalar& scalar) const
+ {
+ return CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+ (*static_cast<const Derived*>(this), scalar_multiple2_op<Scalar,OtherScalar>(scalar));
+ }
+
+ inline friend const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+ operator*(const OtherScalar& scalar, const Derived& matrix)
+ { return static_cast<const special_scalar_op_base&>(matrix).operator*(scalar); }
+};
+
+template<typename XprType, typename CastType> struct cast_return_type
+{
+ typedef typename XprType::Scalar CurrentScalarType;
+ typedef typename remove_all<CastType>::type _CastType;
+ typedef typename _CastType::Scalar NewScalarType;
+ typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
+ const XprType&,CastType>::type type;
+};
+
+template <typename A, typename B> struct promote_storage_type;
+
+template <typename A> struct promote_storage_type<A,A>
+{
+ typedef A ret;
+};
+
+/** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
+ * \param Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
+ */
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_row_type
+{
+ typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
+ ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
+ typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
+ ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
+
+ typedef typename conditional<
+ is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+ MatrixRowType,
+ ArrayRowType
+ >::type type;
+};
+
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_col_type
+{
+ typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
+ ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
+ typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
+ ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
+
+ typedef typename conditional<
+ is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+ MatrixColType,
+ ArrayColType
+ >::type type;
+};
+
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_diag_type
+{
+ enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
+ max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
+ };
+ typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
+ typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
+
+ typedef typename conditional<
+ is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+ MatrixDiagType,
+ ArrayDiagType
+ >::type type;
+};
+
+template<typename ExpressionType>
+struct is_lvalue
+{
+ enum { value = !bool(is_const<ExpressionType>::value) &&
+ bool(traits<ExpressionType>::Flags & LvalueBit) };
+};
+
+} // end namespace internal
+
+#endif // EIGEN_XPRHELPER_H
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Block.h b/extern/Eigen3/Eigen/src/Eigen2Support/Block.h
new file mode 100644
index 00000000000..bc28051e017
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Block.h
@@ -0,0 +1,137 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BLOCK2_H
+#define EIGEN_BLOCK2_H
+
+/** \returns a dynamic-size expression of a corner of *this.
+ *
+ * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight,
+ * \a Eigen::BottomLeft, \a Eigen::BottomRight.
+ * \param cRows the number of rows in the corner
+ * \param cCols the number of columns in the corner
+ *
+ * Example: \include MatrixBase_corner_enum_int_int.cpp
+ * Output: \verbinclude MatrixBase_corner_enum_int_int.out
+ *
+ * \note Even though the returned expression has dynamic size, in the case
+ * when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
+ * which means that evaluating it does not cause a dynamic memory allocation.
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<typename Derived>
+inline Block<Derived> DenseBase<Derived>
+ ::corner(CornerType type, Index cRows, Index cCols)
+{
+ switch(type)
+ {
+ default:
+ eigen_assert(false && "Bad corner type.");
+ case TopLeft:
+ return Block<Derived>(derived(), 0, 0, cRows, cCols);
+ case TopRight:
+ return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols);
+ case BottomLeft:
+ return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols);
+ case BottomRight:
+ return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+ }
+}
+
+/** This is the const version of corner(CornerType, Index, Index).*/
+template<typename Derived>
+inline const Block<Derived>
+DenseBase<Derived>::corner(CornerType type, Index cRows, Index cCols) const
+{
+ switch(type)
+ {
+ default:
+ eigen_assert(false && "Bad corner type.");
+ case TopLeft:
+ return Block<Derived>(derived(), 0, 0, cRows, cCols);
+ case TopRight:
+ return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols);
+ case BottomLeft:
+ return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols);
+ case BottomRight:
+ return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+ }
+}
+
+/** \returns a fixed-size expression of a corner of *this.
+ *
+ * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight,
+ * \a Eigen::BottomLeft, \a Eigen::BottomRight.
+ *
+ * The template parameters CRows and CCols arethe number of rows and columns in the corner.
+ *
+ * Example: \include MatrixBase_template_int_int_corner_enum.cpp
+ * Output: \verbinclude MatrixBase_template_int_int_corner_enum.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<typename Derived>
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols>
+DenseBase<Derived>::corner(CornerType type)
+{
+ switch(type)
+ {
+ default:
+ eigen_assert(false && "Bad corner type.");
+ case TopLeft:
+ return Block<Derived, CRows, CCols>(derived(), 0, 0);
+ case TopRight:
+ return Block<Derived, CRows, CCols>(derived(), 0, cols() - CCols);
+ case BottomLeft:
+ return Block<Derived, CRows, CCols>(derived(), rows() - CRows, 0);
+ case BottomRight:
+ return Block<Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
+ }
+}
+
+/** This is the const version of corner<int, int>(CornerType).*/
+template<typename Derived>
+template<int CRows, int CCols>
+inline const Block<Derived, CRows, CCols>
+DenseBase<Derived>::corner(CornerType type) const
+{
+ switch(type)
+ {
+ default:
+ eigen_assert(false && "Bad corner type.");
+ case TopLeft:
+ return Block<Derived, CRows, CCols>(derived(), 0, 0);
+ case TopRight:
+ return Block<Derived, CRows, CCols>(derived(), 0, cols() - CCols);
+ case BottomLeft:
+ return Block<Derived, CRows, CCols>(derived(), rows() - CRows, 0);
+ case BottomRight:
+ return Block<Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
+ }
+}
+
+#endif // EIGEN_BLOCK2_H
diff --git a/extern/Eigen2/Eigen/src/Core/Cwise.h b/extern/Eigen3/Eigen/src/Eigen2Support/Cwise.h
index 4dc9d514b04..2dc83b6a7dd 100644
--- a/extern/Eigen2/Eigen/src/Core/Cwise.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Cwise.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
@@ -29,30 +29,18 @@
/** \internal
* convenient macro to defined the return type of a cwise binary operation */
#define EIGEN_CWISE_BINOP_RETURN_TYPE(OP) \
- CwiseBinaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType, OtherDerived>
-
-#define EIGEN_CWISE_PRODUCT_RETURN_TYPE \
- CwiseBinaryOp< \
- ei_scalar_product_op< \
- typename ei_scalar_product_traits< \
- typename ei_traits<ExpressionType>::Scalar, \
- typename ei_traits<OtherDerived>::Scalar \
- >::ReturnType \
- >, \
- ExpressionType, \
- OtherDerived \
- >
+ CwiseBinaryOp<OP<typename internal::traits<ExpressionType>::Scalar>, ExpressionType, OtherDerived>
/** \internal
* convenient macro to defined the return type of a cwise unary operation */
#define EIGEN_CWISE_UNOP_RETURN_TYPE(OP) \
- CwiseUnaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType>
+ CwiseUnaryOp<OP<typename internal::traits<ExpressionType>::Scalar>, ExpressionType>
/** \internal
* convenient macro to defined the return type of a cwise comparison to a scalar */
#define EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(OP) \
- CwiseBinaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType, \
- NestByValue<typename ExpressionType::ConstantReturnType> >
+ CwiseBinaryOp<OP<typename internal::traits<ExpressionType>::Scalar>, ExpressionType, \
+ typename ExpressionType::ConstantReturnType >
/** \class Cwise
*
@@ -64,21 +52,22 @@
* It is the return type of MatrixBase::cwise()
* and most of the time this is the only way it is used.
*
- * Note that some methods are defined in the \ref Array module.
- *
* Example: \include MatrixBase_cwise_const.cpp
* Output: \verbinclude MatrixBase_cwise_const.out
*
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_CWISE_PLUGIN.
+ *
* \sa MatrixBase::cwise() const, MatrixBase::cwise()
*/
template<typename ExpressionType> class Cwise
{
public:
- typedef typename ei_traits<ExpressionType>::Scalar Scalar;
- typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
- ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
- typedef CwiseUnaryOp<ei_scalar_add_op<Scalar>, ExpressionType> ScalarAddReturnType;
+ typedef typename internal::traits<ExpressionType>::Scalar Scalar;
+ typedef typename internal::conditional<internal::must_nest_by_value<ExpressionType>::ret,
+ ExpressionType, const ExpressionType&>::type ExpressionTypeNested;
+ typedef CwiseUnaryOp<internal::scalar_add_op<Scalar>, ExpressionType> ScalarAddReturnType;
inline Cwise(const ExpressionType& matrix) : m_matrix(matrix) {}
@@ -86,32 +75,36 @@ template<typename ExpressionType> class Cwise
inline const ExpressionType& _expression() const { return m_matrix; }
template<typename OtherDerived>
- const EIGEN_CWISE_PRODUCT_RETURN_TYPE
+ const EIGEN_CWISE_PRODUCT_RETURN_TYPE(ExpressionType,OtherDerived)
operator*(const MatrixBase<OtherDerived> &other) const;
template<typename OtherDerived>
- const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
+ const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
operator/(const MatrixBase<OtherDerived> &other) const;
+ /** \deprecated ArrayBase::min() */
template<typename OtherDerived>
- const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)
- min(const MatrixBase<OtherDerived> &other) const;
+ const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)
+ (min)(const MatrixBase<OtherDerived> &other) const
+ { return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived()); }
+ /** \deprecated ArrayBase::max() */
template<typename OtherDerived>
- const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)
- max(const MatrixBase<OtherDerived> &other) const;
-
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op) abs() const;
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op) abs2() const;
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op) square() const;
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op) cube() const;
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op) inverse() const;
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op) sqrt() const;
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op) exp() const;
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op) log() const;
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op) cos() const;
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op) sin() const;
- const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op) pow(const Scalar& exponent) const;
+ const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)
+ (max)(const MatrixBase<OtherDerived> &other) const
+ { return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived()); }
+
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op) abs() const;
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op) abs2() const;
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_square_op) square() const;
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cube_op) cube() const;
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_inverse_op) inverse() const;
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sqrt_op) sqrt() const;
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_exp_op) exp() const;
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_log_op) log() const;
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cos_op) cos() const;
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sin_op) sin() const;
+ const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_pow_op) pow(const Scalar& exponent) const;
const ScalarAddReturnType
operator+(const Scalar& scalar) const;
@@ -178,11 +171,9 @@ template<typename ExpressionType> class Cwise
protected:
ExpressionTypeNested m_matrix;
-
- private:
- Cwise& operator=(const Cwise&);
};
+
/** \returns a Cwise wrapper of *this providing additional coefficient-wise operations
*
* Example: \include MatrixBase_cwise_const.cpp
@@ -191,8 +182,7 @@ template<typename ExpressionType> class Cwise
* \sa class Cwise, cwise()
*/
template<typename Derived>
-inline const Cwise<Derived>
-MatrixBase<Derived>::cwise() const
+inline const Cwise<Derived> MatrixBase<Derived>::cwise() const
{
return derived();
}
@@ -205,8 +195,7 @@ MatrixBase<Derived>::cwise() const
* \sa class Cwise, cwise() const
*/
template<typename Derived>
-inline Cwise<Derived>
-MatrixBase<Derived>::cwise()
+inline Cwise<Derived> MatrixBase<Derived>::cwise()
{
return derived();
}
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/CwiseOperators.h b/extern/Eigen3/Eigen/src/Eigen2Support/CwiseOperators.h
new file mode 100644
index 00000000000..9c28559c329
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/CwiseOperators.h
@@ -0,0 +1,309 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ARRAY_CWISE_OPERATORS_H
+#define EIGEN_ARRAY_CWISE_OPERATORS_H
+
+/***************************************************************************
+* The following functions were defined in Core
+***************************************************************************/
+
+
+/** \deprecated ArrayBase::abs() */
+template<typename ExpressionType>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op)
+Cwise<ExpressionType>::abs() const
+{
+ return _expression();
+}
+
+/** \deprecated ArrayBase::abs2() */
+template<typename ExpressionType>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op)
+Cwise<ExpressionType>::abs2() const
+{
+ return _expression();
+}
+
+/** \deprecated ArrayBase::exp() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_exp_op)
+Cwise<ExpressionType>::exp() const
+{
+ return _expression();
+}
+
+/** \deprecated ArrayBase::log() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_log_op)
+Cwise<ExpressionType>::log() const
+{
+ return _expression();
+}
+
+/** \deprecated ArrayBase::operator*() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(ExpressionType,OtherDerived)
+Cwise<ExpressionType>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_CWISE_PRODUCT_RETURN_TYPE(ExpressionType,OtherDerived)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator/() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
+Cwise<ExpressionType>::operator/(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator*=() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline ExpressionType& Cwise<ExpressionType>::operator*=(const MatrixBase<OtherDerived> &other)
+{
+ return m_matrix.const_cast_derived() = *this * other;
+}
+
+/** \deprecated ArrayBase::operator/=() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline ExpressionType& Cwise<ExpressionType>::operator/=(const MatrixBase<OtherDerived> &other)
+{
+ return m_matrix.const_cast_derived() = *this / other;
+}
+
+/***************************************************************************
+* The following functions were defined in Array
+***************************************************************************/
+
+// -- unary operators --
+
+/** \deprecated ArrayBase::sqrt() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sqrt_op)
+Cwise<ExpressionType>::sqrt() const
+{
+ return _expression();
+}
+
+/** \deprecated ArrayBase::cos() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cos_op)
+Cwise<ExpressionType>::cos() const
+{
+ return _expression();
+}
+
+
+/** \deprecated ArrayBase::sin() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sin_op)
+Cwise<ExpressionType>::sin() const
+{
+ return _expression();
+}
+
+
+/** \deprecated ArrayBase::log() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_pow_op)
+Cwise<ExpressionType>::pow(const Scalar& exponent) const
+{
+ return EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_pow_op)(_expression(), internal::scalar_pow_op<Scalar>(exponent));
+}
+
+
+/** \deprecated ArrayBase::inverse() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_inverse_op)
+Cwise<ExpressionType>::inverse() const
+{
+ return _expression();
+}
+
+/** \deprecated ArrayBase::square() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_square_op)
+Cwise<ExpressionType>::square() const
+{
+ return _expression();
+}
+
+/** \deprecated ArrayBase::cube() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cube_op)
+Cwise<ExpressionType>::cube() const
+{
+ return _expression();
+}
+
+
+// -- binary operators --
+
+/** \deprecated ArrayBase::operator<() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)
+Cwise<ExpressionType>::operator<(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::<=() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)
+Cwise<ExpressionType>::operator<=(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator>() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)
+Cwise<ExpressionType>::operator>(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator>=() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)
+Cwise<ExpressionType>::operator>=(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator==() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)
+Cwise<ExpressionType>::operator==(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator!=() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)
+Cwise<ExpressionType>::operator!=(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)(_expression(), other.derived());
+}
+
+// comparisons to scalar value
+
+/** \deprecated ArrayBase::operator<(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less)
+Cwise<ExpressionType>::operator<(Scalar s) const
+{
+ return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less)(_expression(),
+ typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+/** \deprecated ArrayBase::operator<=(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal)
+Cwise<ExpressionType>::operator<=(Scalar s) const
+{
+ return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal)(_expression(),
+ typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+/** \deprecated ArrayBase::operator>(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater)
+Cwise<ExpressionType>::operator>(Scalar s) const
+{
+ return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater)(_expression(),
+ typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+/** \deprecated ArrayBase::operator>=(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal)
+Cwise<ExpressionType>::operator>=(Scalar s) const
+{
+ return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal)(_expression(),
+ typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+/** \deprecated ArrayBase::operator==(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to)
+Cwise<ExpressionType>::operator==(Scalar s) const
+{
+ return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to)(_expression(),
+ typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+/** \deprecated ArrayBase::operator!=(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to)
+Cwise<ExpressionType>::operator!=(Scalar s) const
+{
+ return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to)(_expression(),
+ typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+// scalar addition
+
+/** \deprecated ArrayBase::operator+(Scalar) */
+template<typename ExpressionType>
+inline const typename Cwise<ExpressionType>::ScalarAddReturnType
+Cwise<ExpressionType>::operator+(const Scalar& scalar) const
+{
+ return typename Cwise<ExpressionType>::ScalarAddReturnType(m_matrix, internal::scalar_add_op<Scalar>(scalar));
+}
+
+/** \deprecated ArrayBase::operator+=(Scalar) */
+template<typename ExpressionType>
+inline ExpressionType& Cwise<ExpressionType>::operator+=(const Scalar& scalar)
+{
+ return m_matrix.const_cast_derived() = *this + scalar;
+}
+
+/** \deprecated ArrayBase::operator-(Scalar) */
+template<typename ExpressionType>
+inline const typename Cwise<ExpressionType>::ScalarAddReturnType
+Cwise<ExpressionType>::operator-(const Scalar& scalar) const
+{
+ return *this + (-scalar);
+}
+
+/** \deprecated ArrayBase::operator-=(Scalar) */
+template<typename ExpressionType>
+inline ExpressionType& Cwise<ExpressionType>::operator-=(const Scalar& scalar)
+{
+ return m_matrix.const_cast_derived() = *this - scalar;
+}
+
+#endif // EIGEN_ARRAY_CWISE_OPERATORS_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/AlignedBox.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
index 14ec9261e3a..78df29d408a 100644
--- a/extern/Eigen2/Eigen/src/Geometry/AlignedBox.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
@@ -22,8 +22,7 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_ALIGNEDBOX_H
-#define EIGEN_ALIGNEDBOX_H
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
/** \geometry_module \ingroup Geometry_Module
* \nonstableyet
@@ -64,7 +63,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
~AlignedBox() {}
/** \returns the dimension in which the box holds */
- inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; }
+ inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : int(AmbientDimAtCompileTime); }
/** \returns true if the box is null, i.e, empty. */
inline bool isNull() const { return (m_min.cwise() > m_max).any(); }
@@ -72,18 +71,18 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
/** Makes \c *this a null/empty box. */
inline void setNull()
{
- m_min.setConstant( std::numeric_limits<Scalar>::max());
- m_max.setConstant(-std::numeric_limits<Scalar>::max());
+ m_min.setConstant( (std::numeric_limits<Scalar>::max)());
+ m_max.setConstant(-(std::numeric_limits<Scalar>::max)());
}
/** \returns the minimal corner */
- inline const VectorType& min() const { return m_min; }
+ inline const VectorType& (min)() const { return m_min; }
/** \returns a non const reference to the minimal corner */
- inline VectorType& min() { return m_min; }
+ inline VectorType& (min)() { return m_min; }
/** \returns the maximal corner */
- inline const VectorType& max() const { return m_max; }
+ inline const VectorType& (max)() const { return m_max; }
/** \returns a non const reference to the maximal corner */
- inline VectorType& max() { return m_max; }
+ inline VectorType& (max)() { return m_max; }
/** \returns true if the point \a p is inside the box \c *this. */
inline bool contains(const VectorType& p) const
@@ -91,19 +90,19 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
/** \returns true if the box \a b is entirely inside the box \c *this. */
inline bool contains(const AlignedBox& b) const
- { return (m_min.cwise()<=b.min()).all() && (b.max().cwise()<=m_max).all(); }
+ { return (m_min.cwise()<=(b.min)()).all() && ((b.max)().cwise()<=m_max).all(); }
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
inline AlignedBox& extend(const VectorType& p)
- { m_min = m_min.cwise().min(p); m_max = m_max.cwise().max(p); return *this; }
+ { m_min = (m_min.cwise().min)(p); m_max = (m_max.cwise().max)(p); return *this; }
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
inline AlignedBox& extend(const AlignedBox& b)
- { m_min = m_min.cwise().min(b.m_min); m_max = m_max.cwise().max(b.m_max); return *this; }
+ { m_min = (m_min.cwise().min)(b.m_min); m_max = (m_max.cwise().max)(b.m_max); return *this; }
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */
inline AlignedBox& clamp(const AlignedBox& b)
- { m_min = m_min.cwise().max(b.m_min); m_max = m_max.cwise().min(b.m_max); return *this; }
+ { m_min = (m_min.cwise().max)(b.m_min); m_max = (m_max.cwise().min)(b.m_max); return *this; }
/** Translate \c *this by the vector \a t and returns a reference to \c *this. */
inline AlignedBox& translate(const VectorType& t)
@@ -128,10 +127,10 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
- inline typename ei_cast_return_type<AlignedBox,
+ inline typename internal::cast_return_type<AlignedBox,
AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
{
- return typename ei_cast_return_type<AlignedBox,
+ return typename internal::cast_return_type<AlignedBox,
AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
}
@@ -139,8 +138,8 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==
template<typename OtherScalarType>
inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
{
- m_min = other.min().template cast<Scalar>();
- m_max = other.max().template cast<Scalar>();
+ m_min = (other.min)().template cast<Scalar>();
+ m_max = (other.max)().template cast<Scalar>();
}
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
@@ -169,5 +168,3 @@ inline Scalar AlignedBox<Scalar,AmbiantDim>::squaredExteriorDistance(const Vecto
}
return dist2;
}
-
-#endif // EIGEN_ALIGNEDBOX_H
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/All.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/All.h
new file mode 100644
index 00000000000..9d8244b07a0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/All.h
@@ -0,0 +1,115 @@
+#ifndef EIGEN2_GEOMETRY_MODULE_H
+#define EIGEN2_GEOMETRY_MODULE_H
+
+#include <limits>
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+
+#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+#include "RotationBase.h"
+#include "Rotation2D.h"
+#include "Quaternion.h"
+#include "AngleAxis.h"
+#include "Transform.h"
+#include "Translation.h"
+#include "Scaling.h"
+#include "AlignedBox.h"
+#include "Hyperplane.h"
+#include "ParametrizedLine.h"
+#endif
+
+
+#define RotationBase eigen2_RotationBase
+#define Rotation2D eigen2_Rotation2D
+#define Rotation2Df eigen2_Rotation2Df
+#define Rotation2Dd eigen2_Rotation2Dd
+
+#define Quaternion eigen2_Quaternion
+#define Quaternionf eigen2_Quaternionf
+#define Quaterniond eigen2_Quaterniond
+
+#define AngleAxis eigen2_AngleAxis
+#define AngleAxisf eigen2_AngleAxisf
+#define AngleAxisd eigen2_AngleAxisd
+
+#define Transform eigen2_Transform
+#define Transform2f eigen2_Transform2f
+#define Transform2d eigen2_Transform2d
+#define Transform3f eigen2_Transform3f
+#define Transform3d eigen2_Transform3d
+
+#define Translation eigen2_Translation
+#define Translation2f eigen2_Translation2f
+#define Translation2d eigen2_Translation2d
+#define Translation3f eigen2_Translation3f
+#define Translation3d eigen2_Translation3d
+
+#define Scaling eigen2_Scaling
+#define Scaling2f eigen2_Scaling2f
+#define Scaling2d eigen2_Scaling2d
+#define Scaling3f eigen2_Scaling3f
+#define Scaling3d eigen2_Scaling3d
+
+#define AlignedBox eigen2_AlignedBox
+
+#define Hyperplane eigen2_Hyperplane
+#define ParametrizedLine eigen2_ParametrizedLine
+
+#define ei_toRotationMatrix eigen2_ei_toRotationMatrix
+#define ei_quaternion_assign_impl eigen2_ei_quaternion_assign_impl
+#define ei_transform_product_impl eigen2_ei_transform_product_impl
+
+#include "RotationBase.h"
+#include "Rotation2D.h"
+#include "Quaternion.h"
+#include "AngleAxis.h"
+#include "Transform.h"
+#include "Translation.h"
+#include "Scaling.h"
+#include "AlignedBox.h"
+#include "Hyperplane.h"
+#include "ParametrizedLine.h"
+
+#undef ei_toRotationMatrix
+#undef ei_quaternion_assign_impl
+#undef ei_transform_product_impl
+
+#undef RotationBase
+#undef Rotation2D
+#undef Rotation2Df
+#undef Rotation2Dd
+
+#undef Quaternion
+#undef Quaternionf
+#undef Quaterniond
+
+#undef AngleAxis
+#undef AngleAxisf
+#undef AngleAxisd
+
+#undef Transform
+#undef Transform2f
+#undef Transform2d
+#undef Transform3f
+#undef Transform3d
+
+#undef Translation
+#undef Translation2f
+#undef Translation2d
+#undef Translation3f
+#undef Translation3d
+
+#undef Scaling
+#undef Scaling2f
+#undef Scaling2d
+#undef Scaling3f
+#undef Scaling3d
+
+#undef AlignedBox
+
+#undef Hyperplane
+#undef ParametrizedLine
+
+#endif // EIGEN2_GEOMETRY_MODULE_H \ No newline at end of file
diff --git a/extern/Eigen2/Eigen/src/Geometry/AngleAxis.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
index 91e3d0d61ec..f7b2d51e3e2 100644
--- a/extern/Eigen2/Eigen/src/Geometry/AngleAxis.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
@@ -22,8 +22,8 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_ANGLEAXIS_H
-#define EIGEN_ANGLEAXIS_H
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
/** \geometry_module \ingroup Geometry_Module
*
@@ -139,8 +139,8 @@ public:
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
- inline typename ei_cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const
- { return typename ei_cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }
+ inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const
+ { return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
@@ -224,5 +224,3 @@ AngleAxis<Scalar>::toRotationMatrix(void) const
return res;
}
-
-#endif // EIGEN_ANGLEAXIS_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/Hyperplane.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
index 22c530d4be0..81c4f55b173 100644
--- a/extern/Eigen2/Eigen/src/Geometry/Hyperplane.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
@@ -23,8 +23,7 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_HYPERPLANE_H
-#define EIGEN_HYPERPLANE_H
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
/** \geometry_module \ingroup Geometry_Module
*
@@ -52,9 +51,9 @@ public:
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
- typedef Matrix<Scalar,AmbientDimAtCompileTime==Dynamic
+ typedef Matrix<Scalar,int(AmbientDimAtCompileTime)==Dynamic
? Dynamic
- : AmbientDimAtCompileTime+1,1> Coefficients;
+ : int(AmbientDimAtCompileTime)+1,1> Coefficients;
typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
/** Default constructor without initialization */
@@ -71,7 +70,7 @@ public:
: m_coeffs(n.size()+1)
{
normal() = n;
- offset() = -e.dot(n);
+ offset() = -e.eigen2_dot(n);
}
/** Constructs a plane from its normal \a n and distance to the origin \a d
@@ -92,7 +91,7 @@ public:
{
Hyperplane result(p0.size());
result.normal() = (p1 - p0).unitOrthogonal();
- result.offset() = -result.normal().dot(p0);
+ result.offset() = -result.normal().eigen2_dot(p0);
return result;
}
@@ -104,7 +103,7 @@ public:
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
Hyperplane result(p0.size());
result.normal() = (p2 - p0).cross(p1 - p0).normalized();
- result.offset() = -result.normal().dot(p0);
+ result.offset() = -result.normal().eigen2_dot(p0);
return result;
}
@@ -116,13 +115,13 @@ public:
explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
{
normal() = parametrized.direction().unitOrthogonal();
- offset() = -normal().dot(parametrized.origin());
+ offset() = -normal().eigen2_dot(parametrized.origin());
}
~Hyperplane() {}
/** \returns the dimension in which the plane holds */
- inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : AmbientDimAtCompileTime; }
+ inline int dim() const { return int(AmbientDimAtCompileTime)==Dynamic ? m_coeffs.size()-1 : int(AmbientDimAtCompileTime); }
/** normalizes \c *this */
void normalize(void)
@@ -133,7 +132,7 @@ public:
/** \returns the signed distance between the plane \c *this and a point \a p.
* \sa absDistance()
*/
- inline Scalar signedDistance(const VectorType& p) const { return p.dot(normal()) + offset(); }
+ inline Scalar signedDistance(const VectorType& p) const { return p.eigen2_dot(normal()) + offset(); }
/** \returns the absolute distance between the plane \c *this and a point \a p.
* \sa signedDistance()
@@ -147,7 +146,7 @@ public:
/** \returns a constant reference to the unit normal vector of the plane, which corresponds
* to the linear part of the implicit equation.
*/
- inline const NormalReturnType normal() const { return NormalReturnType(m_coeffs,0,0,dim(),1); }
+ inline const NormalReturnType normal() const { return NormalReturnType(*const_cast<Coefficients*>(&m_coeffs),0,0,dim(),1); }
/** \returns a non-constant reference to the unit normal vector of the plane, which corresponds
* to the linear part of the implicit equation.
@@ -231,7 +230,7 @@ public:
TransformTraits traits = Affine)
{
transform(t.linear(), traits);
- offset() -= t.translation().dot(normal());
+ offset() -= t.translation().eigen2_dot(normal());
return *this;
}
@@ -241,10 +240,10 @@ public:
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
- inline typename ei_cast_return_type<Hyperplane,
+ inline typename internal::cast_return_type<Hyperplane,
Hyperplane<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
{
- return typename ei_cast_return_type<Hyperplane,
+ return typename internal::cast_return_type<Hyperplane,
Hyperplane<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
}
@@ -264,5 +263,3 @@ protected:
Coefficients m_coeffs;
};
-
-#endif // EIGEN_HYPERPLANE_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/ParametrizedLine.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
index 2b990d084f0..411c4b57079 100644
--- a/extern/Eigen2/Eigen/src/Geometry/ParametrizedLine.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
@@ -23,8 +23,8 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_PARAMETRIZEDLINE_H
-#define EIGEN_PARAMETRIZEDLINE_H
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
/** \geometry_module \ingroup Geometry_Module
*
@@ -85,7 +85,7 @@ public:
RealScalar squaredDistance(const VectorType& p) const
{
VectorType diff = p-origin();
- return (diff - diff.dot(direction())* direction()).squaredNorm();
+ return (diff - diff.eigen2_dot(direction())* direction()).squaredNorm();
}
/** \returns the distance of a point \a p to its projection onto the line \c *this.
* \sa squaredDistance()
@@ -94,7 +94,7 @@ public:
/** \returns the projection of a point \a p onto the line \c *this. */
VectorType projection(const VectorType& p) const
- { return origin() + (p-origin()).dot(direction()) * direction(); }
+ { return origin() + (p-origin()).eigen2_dot(direction()) * direction(); }
Scalar intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane);
@@ -104,10 +104,10 @@ public:
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
- inline typename ei_cast_return_type<ParametrizedLine,
+ inline typename internal::cast_return_type<ParametrizedLine,
ParametrizedLine<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
{
- return typename ei_cast_return_type<ParametrizedLine,
+ return typename internal::cast_return_type<ParametrizedLine,
ParametrizedLine<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
}
@@ -148,8 +148,6 @@ inline ParametrizedLine<_Scalar, _AmbientDim>::ParametrizedLine(const Hyperplane
template <typename _Scalar, int _AmbientDim>
inline _Scalar ParametrizedLine<_Scalar, _AmbientDim>::intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane)
{
- return -(hyperplane.offset()+origin().dot(hyperplane.normal()))
- /(direction().dot(hyperplane.normal()));
+ return -(hyperplane.offset()+origin().eigen2_dot(hyperplane.normal()))
+ /(direction().eigen2_dot(hyperplane.normal()));
}
-
-#endif // EIGEN_PARAMETRIZEDLINE_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/Quaternion.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h
index 3fcbff4e71d..a75fa42aeac 100644
--- a/extern/Eigen2/Eigen/src/Geometry/Quaternion.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Quaternion.h
@@ -22,8 +22,7 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_QUATERNION_H
-#define EIGEN_QUATERNION_H
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
template<typename Other,
int OtherRows=Other::RowsAtCompileTime,
@@ -98,7 +97,7 @@ public:
inline Scalar& w() { return m_coeffs.coeffRef(3); }
/** \returns a read-only vector expression of the imaginary part (x,y,z) */
- inline const Block<Coefficients,3,1> vec() const { return m_coeffs.template start<3>(); }
+ inline const Block<const Coefficients,3,1> vec() const { return m_coeffs.template start<3>(); }
/** \returns a vector expression of the imaginary part (x,y,z) */
inline Block<Coefficients,3,1> vec() { return m_coeffs.template start<3>(); }
@@ -172,7 +171,7 @@ public:
* corresponds to the cosine of half the angle between the two rotations.
* \sa angularDistance()
*/
- inline Scalar dot(const Quaternion& other) const { return m_coeffs.dot(other.m_coeffs); }
+ inline Scalar eigen2_dot(const Quaternion& other) const { return m_coeffs.eigen2_dot(other.m_coeffs); }
inline Scalar angularDistance(const Quaternion& other) const;
@@ -198,8 +197,8 @@ public:
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
- inline typename ei_cast_return_type<Quaternion,Quaternion<NewScalarType> >::type cast() const
- { return typename ei_cast_return_type<Quaternion,Quaternion<NewScalarType> >::type(*this); }
+ inline typename internal::cast_return_type<Quaternion,Quaternion<NewScalarType> >::type cast() const
+ { return typename internal::cast_return_type<Quaternion,Quaternion<NewScalarType> >::type(*this); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
@@ -225,7 +224,7 @@ typedef Quaternion<float> Quaternionf;
typedef Quaternion<double> Quaterniond;
// Generic Quaternion * Quaternion product
-template<int Arch,typename Scalar> inline Quaternion<Scalar>
+template<typename Scalar> inline Quaternion<Scalar>
ei_quaternion_product(const Quaternion<Scalar>& a, const Quaternion<Scalar>& b)
{
return Quaternion<Scalar>
@@ -237,32 +236,11 @@ ei_quaternion_product(const Quaternion<Scalar>& a, const Quaternion<Scalar>& b)
);
}
-#ifdef EIGEN_VECTORIZE_SSE
-template<> inline Quaternion<float>
-ei_quaternion_product<EiArch_SSE,float>(const Quaternion<float>& _a, const Quaternion<float>& _b)
-{
- const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0,0,0,0x80000000));
- Quaternion<float> res;
- __m128 a = _a.coeffs().packet<Aligned>(0);
- __m128 b = _b.coeffs().packet<Aligned>(0);
- __m128 flip1 = _mm_xor_ps(_mm_mul_ps(ei_vec4f_swizzle1(a,1,2,0,2),
- ei_vec4f_swizzle1(b,2,0,1,2)),mask);
- __m128 flip2 = _mm_xor_ps(_mm_mul_ps(ei_vec4f_swizzle1(a,3,3,3,1),
- ei_vec4f_swizzle1(b,0,1,2,1)),mask);
- ei_pstore(&res.x(),
- _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,ei_vec4f_swizzle1(b,3,3,3,3)),
- _mm_mul_ps(ei_vec4f_swizzle1(a,2,0,1,0),
- ei_vec4f_swizzle1(b,1,2,0,0))),
- _mm_add_ps(flip1,flip2)));
- return res;
-}
-#endif
-
/** \returns the concatenation of two rotations as a quaternion-quaternion product */
template <typename Scalar>
inline Quaternion<Scalar> Quaternion<Scalar>::operator* (const Quaternion& other) const
{
- return ei_quaternion_product<EiArch>(*this,other);
+ return ei_quaternion_product(*this,other);
}
/** \sa operator*(Quaternion) */
@@ -374,7 +352,7 @@ inline Quaternion<Scalar>& Quaternion<Scalar>::setFromTwoVectors(const MatrixBas
{
Vector3 v0 = a.normalized();
Vector3 v1 = b.normalized();
- Scalar c = v0.dot(v1);
+ Scalar c = v0.eigen2_dot(v1);
// if dot == 1, vectors are the same
if (ei_isApprox(c,Scalar(1)))
@@ -433,12 +411,12 @@ inline Quaternion<Scalar> Quaternion<Scalar>::conjugate() const
}
/** \returns the angle (in radian) between two rotations
- * \sa dot()
+ * \sa eigen2_dot()
*/
template <typename Scalar>
inline Scalar Quaternion<Scalar>::angularDistance(const Quaternion& other) const
{
- double d = ei_abs(this->dot(other));
+ double d = ei_abs(this->eigen2_dot(other));
if (d>=1.0)
return 0;
return Scalar(2) * std::acos(d);
@@ -450,22 +428,31 @@ inline Scalar Quaternion<Scalar>::angularDistance(const Quaternion& other) const
template <typename Scalar>
Quaternion<Scalar> Quaternion<Scalar>::slerp(Scalar t, const Quaternion& other) const
{
- static const Scalar one = Scalar(1) - precision<Scalar>();
- Scalar d = this->dot(other);
+ static const Scalar one = Scalar(1) - machine_epsilon<Scalar>();
+ Scalar d = this->eigen2_dot(other);
Scalar absD = ei_abs(d);
- if (absD>=one)
- return *this;
- // theta is the angle between the 2 quaternions
- Scalar theta = std::acos(absD);
- Scalar sinTheta = ei_sin(theta);
+ Scalar scale0;
+ Scalar scale1;
- Scalar scale0 = ei_sin( ( Scalar(1) - t ) * theta) / sinTheta;
- Scalar scale1 = ei_sin( ( t * theta) ) / sinTheta;
- if (d<0)
- scale1 = -scale1;
+ if (absD>=one)
+ {
+ scale0 = Scalar(1) - t;
+ scale1 = t;
+ }
+ else
+ {
+ // theta is the angle between the 2 quaternions
+ Scalar theta = std::acos(absD);
+ Scalar sinTheta = ei_sin(theta);
+
+ scale0 = ei_sin( ( Scalar(1) - t ) * theta) / sinTheta;
+ scale1 = ei_sin( ( t * theta) ) / sinTheta;
+ if (d<0)
+ scale1 = -scale1;
+ }
- return Quaternion(scale0 * m_coeffs + scale1 * other.m_coeffs);
+ return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
}
// set from a rotation matrix
@@ -517,5 +504,3 @@ struct ei_quaternion_assign_impl<Other,4,1>
q.coeffs() = vec;
}
};
-
-#endif // EIGEN_QUATERNION_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/Rotation2D.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
index dca7f06bf5d..ee7c80e7eaa 100644
--- a/extern/Eigen2/Eigen/src/Geometry/Rotation2D.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
@@ -22,8 +22,8 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_ROTATION2D_H
-#define EIGEN_ROTATION2D_H
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
/** \geometry_module \ingroup Geometry_Module
*
@@ -107,8 +107,8 @@ public:
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
- inline typename ei_cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const
- { return typename ei_cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }
+ inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const
+ { return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
@@ -155,5 +155,3 @@ Rotation2D<Scalar>::toRotationMatrix(void) const
Scalar cosA = ei_cos(m_angle);
return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
}
-
-#endif // EIGEN_ROTATION2D_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/RotationBase.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h
index 5fec0f18d72..2f494f198bd 100644
--- a/extern/Eigen2/Eigen/src/Geometry/RotationBase.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/RotationBase.h
@@ -22,8 +22,7 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_ROTATIONBASE_H
-#define EIGEN_ROTATIONBASE_H
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
// this file aims to contains the various representations of rotation/orientation
// in 2D and 3D space excepted Matrix and Quaternion.
@@ -133,5 +132,3 @@ inline static const MatrixBase<OtherDerived>& ei_toRotationMatrix(const MatrixBa
YOU_MADE_A_PROGRAMMING_MISTAKE)
return mat;
}
-
-#endif // EIGEN_ROTATIONBASE_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/Scaling.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h
index 5daf0a49961..108e6d7d58f 100644
--- a/extern/Eigen2/Eigen/src/Geometry/Scaling.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Scaling.h
@@ -22,8 +22,8 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_SCALING_H
-#define EIGEN_SCALING_H
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
/** \geometry_module \ingroup Geometry_Module
*
@@ -132,8 +132,8 @@ public:
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
- inline typename ei_cast_return_type<Scaling,Scaling<NewScalarType,Dim> >::type cast() const
- { return typename ei_cast_return_type<Scaling,Scaling<NewScalarType,Dim> >::type(*this); }
+ inline typename internal::cast_return_type<Scaling,Scaling<NewScalarType,Dim> >::type cast() const
+ { return typename internal::cast_return_type<Scaling,Scaling<NewScalarType,Dim> >::type(*this); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
@@ -177,5 +177,3 @@ Scaling<Scalar,Dim>::operator* (const TransformType& t) const
res.prescale(m_coeffs);
return res;
}
-
-#endif // EIGEN_SCALING_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/Transform.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h
index 8425a1cd963..88956c86c73 100644
--- a/extern/Eigen2/Eigen/src/Geometry/Transform.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Transform.h
@@ -23,15 +23,8 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_TRANSFORM_H
-#define EIGEN_TRANSFORM_H
-
-/** Represents some traits of a transformation */
-enum TransformTraits {
- Isometry, ///< the transformation is a concatenation of translations and rotations
- Affine, ///< the transformation is affine (linear transformation + translation)
- Projective ///< the transformation might not be affine
-};
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
// Note that we have to pass Dim and HDim because it is not allowed to use a template
// parameter to define a template specialization. To be more precise, in the following
@@ -77,10 +70,14 @@ public:
typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
/** type of read/write reference to the linear part of the transformation */
typedef Block<MatrixType,Dim,Dim> LinearPart;
+ /** type of read/write reference to the linear part of the transformation */
+ typedef const Block<const MatrixType,Dim,Dim> ConstLinearPart;
/** type of a vector */
typedef Matrix<Scalar,Dim,1> VectorType;
/** type of a read/write reference to the translation part of the rotation */
typedef Block<MatrixType,Dim,1> TranslationPart;
+ /** type of a read/write reference to the translation part of the rotation */
+ typedef const Block<const MatrixType,Dim,1> ConstTranslationPart;
/** corresponding translation type */
typedef Translation<Scalar,Dim> TranslationType;
/** corresponding scaling transformation type */
@@ -162,12 +159,12 @@ public:
inline MatrixType& matrix() { return m_matrix; }
/** \returns a read-only expression of the linear (linear) part of the transformation */
- inline const LinearPart linear() const { return m_matrix.template block<Dim,Dim>(0,0); }
+ inline ConstLinearPart linear() const { return m_matrix.template block<Dim,Dim>(0,0); }
/** \returns a writable expression of the linear (linear) part of the transformation */
inline LinearPart linear() { return m_matrix.template block<Dim,Dim>(0,0); }
/** \returns a read-only expression of the translation vector of the transformation */
- inline const TranslationPart translation() const { return m_matrix.template block<Dim,1>(0,Dim); }
+ inline ConstTranslationPart translation() const { return m_matrix.template block<Dim,1>(0,Dim); }
/** \returns a writable expression of the translation vector of the transformation */
inline TranslationPart translation() { return m_matrix.template block<Dim,1>(0,Dim); }
@@ -272,8 +269,8 @@ public:
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
- inline typename ei_cast_return_type<Transform,Transform<NewScalarType,Dim> >::type cast() const
- { return typename ei_cast_return_type<Transform,Transform<NewScalarType,Dim> >::type(*this); }
+ inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim> >::type cast() const
+ { return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim> >::type(*this); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
@@ -635,7 +632,20 @@ template<typename Scalar, int Dim>
template<typename RotationMatrixType, typename ScalingMatrixType>
void Transform<Scalar,Dim>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
{
- linear().svd().computeRotationScaling(rotation, scaling);
+ JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU|ComputeFullV);
+ Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+ Matrix<Scalar, Dim, 1> sv(svd.singularValues());
+ sv.coeffRef(0) *= x;
+ if(scaling)
+ {
+ scaling->noalias() = svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint();
+ }
+ if(rotation)
+ {
+ LinearMatrixType m(svd.matrixU());
+ m.col(0) /= x;
+ rotation->noalias() = m * svd.matrixV().adjoint();
+ }
}
/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
@@ -653,7 +663,20 @@ template<typename Scalar, int Dim>
template<typename ScalingMatrixType, typename RotationMatrixType>
void Transform<Scalar,Dim>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
{
- linear().svd().computeScalingRotation(scaling, rotation);
+ JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU|ComputeFullV);
+ Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+ Matrix<Scalar, Dim, 1> sv(svd.singularValues());
+ sv.coeffRef(0) *= x;
+ if(scaling)
+ {
+ scaling->noalias() = svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint();
+ }
+ if(rotation)
+ {
+ LinearMatrixType m(svd.matrixU());
+ m.col(0) /= x;
+ rotation->noalias() = m * svd.matrixV().adjoint();
+ }
}
/** Convenient method to set \c *this from a position, orientation and scale
@@ -768,18 +791,8 @@ struct ei_transform_product_impl<Other,Dim,HDim, Dim,1>
{
typedef typename Other::Scalar Scalar;
typedef Transform<Scalar,Dim> TransformType;
- typedef typename TransformType::LinearPart MatrixType;
- typedef const CwiseUnaryOp<
- ei_scalar_multiple_op<Scalar>,
- NestByValue<CwiseBinaryOp<
- ei_scalar_sum_op<Scalar>,
- NestByValue<typename ProductReturnType<NestByValue<MatrixType>,Other>::Type >,
- NestByValue<typename TransformType::TranslationPart> > >
- > ResultType;
- // FIXME should we offer an optimized version when the last row is known to be 0,0...,0,1 ?
+ typedef Matrix<Scalar,Dim,1> ResultType;
static ResultType run(const TransformType& tr, const Other& other)
- { return ((tr.linear().nestByValue() * other).nestByValue() + tr.translation().nestByValue()).nestByValue()
+ { return ((tr.linear() * other) + tr.translation())
* (Scalar(1) / ( (tr.matrix().template block<1,Dim>(Dim,0) * other).coeff(0) + tr.matrix().coeff(Dim,Dim))); }
};
-
-#endif // EIGEN_TRANSFORM_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/Translation.h b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h
index 4b2fc7a56fc..e651e310212 100644
--- a/extern/Eigen2/Eigen/src/Geometry/Translation.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Geometry/Translation.h
@@ -22,8 +22,8 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_TRANSLATION_H
-#define EIGEN_TRANSLATION_H
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
/** \geometry_module \ingroup Geometry_Module
*
@@ -135,8 +135,8 @@ public:
* then this function smartly returns a const reference to \c *this.
*/
template<typename NewScalarType>
- inline typename ei_cast_return_type<Translation,Translation<NewScalarType,Dim> >::type cast() const
- { return typename ei_cast_return_type<Translation,Translation<NewScalarType,Dim> >::type(*this); }
+ inline typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type cast() const
+ { return typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type(*this); }
/** Copy constructor with scalar type conversion */
template<typename OtherScalarType>
@@ -194,5 +194,3 @@ Translation<Scalar,Dim>::operator* (const TransformType& t) const
res.pretranslate(m_coeffs);
return res;
}
-
-#endif // EIGEN_TRANSLATION_H
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/LU.h b/extern/Eigen3/Eigen/src/Eigen2Support/LU.h
new file mode 100644
index 00000000000..c23c11baa72
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/LU.h
@@ -0,0 +1,133 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN2_LU_H
+#define EIGEN2_LU_H
+
+template<typename MatrixType>
+class LU : public FullPivLU<MatrixType>
+{
+ public:
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef Matrix<int, 1, MatrixType::ColsAtCompileTime, MatrixType::Options, 1, MatrixType::MaxColsAtCompileTime> IntRowVectorType;
+ typedef Matrix<int, MatrixType::RowsAtCompileTime, 1, MatrixType::Options, MatrixType::MaxRowsAtCompileTime, 1> IntColVectorType;
+ typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime, MatrixType::Options, 1, MatrixType::MaxColsAtCompileTime> RowVectorType;
+ typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1, MatrixType::Options, MatrixType::MaxRowsAtCompileTime, 1> ColVectorType;
+
+ typedef Matrix<typename MatrixType::Scalar,
+ MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix" is the number of cols of the original matrix
+ // so that the product "matrix * kernel = zero" makes sense
+ Dynamic, // we don't know at compile-time the dimension of the kernel
+ MatrixType::Options,
+ MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter
+ MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space, whose dimension is the number
+ // of columns of the original matrix
+ > KernelResultType;
+
+ typedef Matrix<typename MatrixType::Scalar,
+ MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose dimension is the number
+ // of rows of the original matrix
+ Dynamic, // we don't know at compile time the dimension of the image (the rank)
+ MatrixType::Options,
+ MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix,
+ MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns.
+ > ImageResultType;
+
+ typedef FullPivLU<MatrixType> Base;
+ LU() : Base() {}
+
+ template<typename T>
+ explicit LU(const T& t) : Base(t), m_originalMatrix(t) {}
+
+ template<typename OtherDerived, typename ResultType>
+ bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
+ {
+ *result = static_cast<const Base*>(this)->solve(b);
+ return true;
+ }
+
+ template<typename ResultType>
+ inline void computeInverse(ResultType *result) const
+ {
+ solve(MatrixType::Identity(this->rows(), this->cols()), result);
+ }
+
+ template<typename KernelMatrixType>
+ void computeKernel(KernelMatrixType *result) const
+ {
+ *result = static_cast<const Base*>(this)->kernel();
+ }
+
+ template<typename ImageMatrixType>
+ void computeImage(ImageMatrixType *result) const
+ {
+ *result = static_cast<const Base*>(this)->image(m_originalMatrix);
+ }
+
+ const ImageResultType image() const
+ {
+ return static_cast<const Base*>(this)->image(m_originalMatrix);
+ }
+
+ const MatrixType& m_originalMatrix;
+};
+
+#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+/** \lu_module
+ *
+ * Synonym of partialPivLu().
+ *
+ * \return the partial-pivoting LU decomposition of \c *this.
+ *
+ * \sa class PartialPivLU
+ */
+template<typename Derived>
+inline const LU<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::lu() const
+{
+ return LU<PlainObject>(eval());
+}
+#endif
+
+#ifdef EIGEN2_SUPPORT
+/** \lu_module
+ *
+ * Synonym of partialPivLu().
+ *
+ * \return the partial-pivoting LU decomposition of \c *this.
+ *
+ * \sa class PartialPivLU
+ */
+template<typename Derived>
+inline const LU<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::eigen2_lu() const
+{
+ return LU<PlainObject>(eval());
+}
+#endif
+
+
+#endif // EIGEN2_LU_H
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Lazy.h b/extern/Eigen3/Eigen/src/Eigen2Support/Lazy.h
new file mode 100644
index 00000000000..c4288ede2ef
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Lazy.h
@@ -0,0 +1,82 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_LAZY_H
+#define EIGEN_LAZY_H
+
+/** \deprecated it is only used by lazy() which is deprecated
+ *
+ * \returns an expression of *this with added flags
+ *
+ * Example: \include MatrixBase_marked.cpp
+ * Output: \verbinclude MatrixBase_marked.out
+ *
+ * \sa class Flagged, extract(), part()
+ */
+template<typename Derived>
+template<unsigned int Added>
+inline const Flagged<Derived, Added, 0>
+MatrixBase<Derived>::marked() const
+{
+ return derived();
+}
+
+/** \deprecated use MatrixBase::noalias()
+ *
+ * \returns an expression of *this with the EvalBeforeAssigningBit flag removed.
+ *
+ * Example: \include MatrixBase_lazy.cpp
+ * Output: \verbinclude MatrixBase_lazy.out
+ *
+ * \sa class Flagged, marked()
+ */
+template<typename Derived>
+inline const Flagged<Derived, 0, EvalBeforeAssigningBit>
+MatrixBase<Derived>::lazy() const
+{
+ return derived();
+}
+
+
+/** \internal
+ * Overloaded to perform an efficient C += (A*B).lazy() */
+template<typename Derived>
+template<typename ProductDerived, typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::operator+=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+ EvalBeforeAssigningBit>& other)
+{
+ other._expression().derived().addTo(derived()); return derived();
+}
+
+/** \internal
+ * Overloaded to perform an efficient C -= (A*B).lazy() */
+template<typename Derived>
+template<typename ProductDerived, typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::operator-=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+ EvalBeforeAssigningBit>& other)
+{
+ other._expression().derived().subTo(derived()); return derived();
+}
+
+#endif // EIGEN_LAZY_H
diff --git a/extern/Eigen2/Eigen/src/LeastSquares/LeastSquares.h b/extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h
index b2595ede1fe..4b62ffa92c7 100644
--- a/extern/Eigen2/Eigen/src/LeastSquares/LeastSquares.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h
@@ -22,8 +22,8 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_LEASTSQUARES_H
-#define EIGEN_LEASTSQUARES_H
+#ifndef EIGEN2_LEASTSQUARES_H
+#define EIGEN2_LEASTSQUARES_H
/** \ingroup LeastSquares_Module
*
@@ -179,4 +179,4 @@ void fitHyperplane(int numPoints,
}
-#endif // EIGEN_LEASTSQUARES_H
+#endif // EIGEN2_LEASTSQUARES_H
diff --git a/extern/Eigen2/Eigen/src/Cholesky/CholeskyInstantiations.cpp b/extern/Eigen3/Eigen/src/Eigen2Support/Macros.h
index e7f40a2ce9c..77e85a41e3d 100644
--- a/extern/Eigen2/Eigen/src/Cholesky/CholeskyInstantiations.cpp
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Macros.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -22,14 +22,14 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_EXTERN_INSTANTIATIONS
-#define EIGEN_EXTERN_INSTANTIATIONS
-#endif
-#include "../../Core"
-#undef EIGEN_EXTERN_INSTANTIATIONS
+#ifndef EIGEN2_MACROS_H
+#define EIGEN2_MACROS_H
-#include "../../Cholesky"
+#define ei_assert eigen_assert
+#define ei_internal_assert eigen_internal_assert
-namespace Eigen {
- EIGEN_CHOLESKY_MODULE_INSTANTIATE();
-}
+#define EIGEN_ALIGN_128 EIGEN_ALIGN16
+
+#define EIGEN_ARCH_WANTS_ALIGNMENT EIGEN_ALIGN_STATICALLY
+
+#endif // EIGEN2_MACROS_H
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/MathFunctions.h b/extern/Eigen3/Eigen/src/Eigen2Support/MathFunctions.h
new file mode 100644
index 00000000000..caa44e63f32
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/MathFunctions.h
@@ -0,0 +1,68 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN2_MATH_FUNCTIONS_H
+#define EIGEN2_MATH_FUNCTIONS_H
+
+template<typename T> inline typename NumTraits<T>::Real ei_real(const T& x) { return internal::real(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_imag(const T& x) { return internal::imag(x); }
+template<typename T> inline T ei_conj(const T& x) { return internal::conj(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_abs (const T& x) { return internal::abs(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_abs2(const T& x) { return internal::abs2(x); }
+template<typename T> inline T ei_sqrt(const T& x) { return internal::sqrt(x); }
+template<typename T> inline T ei_exp (const T& x) { return internal::exp(x); }
+template<typename T> inline T ei_log (const T& x) { return internal::log(x); }
+template<typename T> inline T ei_sin (const T& x) { return internal::sin(x); }
+template<typename T> inline T ei_cos (const T& x) { return internal::cos(x); }
+template<typename T> inline T ei_atan2(const T& x,const T& y) { return internal::atan2(x,y); }
+template<typename T> inline T ei_pow (const T& x,const T& y) { return internal::pow(x,y); }
+template<typename T> inline T ei_random () { return internal::random<T>(); }
+template<typename T> inline T ei_random (const T& x, const T& y) { return internal::random(x, y); }
+
+template<typename T> inline T precision () { return NumTraits<T>::dummy_precision(); }
+template<typename T> inline T machine_epsilon () { return NumTraits<T>::epsilon(); }
+
+
+template<typename Scalar, typename OtherScalar>
+inline bool ei_isMuchSmallerThan(const Scalar& x, const OtherScalar& y,
+ typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+ return internal::isMuchSmallerThan(x, y, precision);
+}
+
+template<typename Scalar>
+inline bool ei_isApprox(const Scalar& x, const Scalar& y,
+ typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+ return internal::isApprox(x, y, precision);
+}
+
+template<typename Scalar>
+inline bool ei_isApproxOrLessThan(const Scalar& x, const Scalar& y,
+ typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+ return internal::isApproxOrLessThan(x, y, precision);
+}
+
+#endif // EIGEN2_MATH_FUNCTIONS_H
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Memory.h b/extern/Eigen3/Eigen/src/Eigen2Support/Memory.h
new file mode 100644
index 00000000000..0283475419e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Memory.h
@@ -0,0 +1,58 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN2_MEMORY_H
+#define EIGEN2_MEMORY_H
+
+inline void* ei_aligned_malloc(size_t size) { return internal::aligned_malloc(size); }
+inline void ei_aligned_free(void *ptr) { internal::aligned_free(ptr); }
+inline void* ei_aligned_realloc(void *ptr, size_t new_size, size_t old_size) { return internal::aligned_realloc(ptr, new_size, old_size); }
+inline void* ei_handmade_aligned_malloc(size_t size) { return internal::handmade_aligned_malloc(size); }
+inline void ei_handmade_aligned_free(void *ptr) { internal::handmade_aligned_free(ptr); }
+
+template<bool Align> inline void* ei_conditional_aligned_malloc(size_t size)
+{
+ return internal::conditional_aligned_malloc<Align>(size);
+}
+template<bool Align> inline void ei_conditional_aligned_free(void *ptr)
+{
+ internal::conditional_aligned_free<Align>(ptr);
+}
+template<bool Align> inline void* ei_conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size)
+{
+ return internal::conditional_aligned_realloc<Align>(ptr, new_size, old_size);
+}
+
+template<typename T> inline T* ei_aligned_new(size_t size)
+{
+ return internal::aligned_new<T>(size);
+}
+template<typename T> inline void ei_aligned_delete(T *ptr, size_t size)
+{
+ return internal::aligned_delete(ptr, size);
+}
+
+
+
+#endif // EIGEN2_MACROS_H
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/Meta.h b/extern/Eigen3/Eigen/src/Eigen2Support/Meta.h
new file mode 100644
index 00000000000..6e500b79a2e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Meta.h
@@ -0,0 +1,86 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN2_META_H
+#define EIGEN2_META_H
+
+template<typename T>
+struct ei_traits : internal::traits<T>
+{};
+
+struct ei_meta_true { enum { ret = 1 }; };
+struct ei_meta_false { enum { ret = 0 }; };
+
+template<bool Condition, typename Then, typename Else>
+struct ei_meta_if { typedef Then ret; };
+
+template<typename Then, typename Else>
+struct ei_meta_if <false, Then, Else> { typedef Else ret; };
+
+template<typename T, typename U> struct ei_is_same_type { enum { ret = 0 }; };
+template<typename T> struct ei_is_same_type<T,T> { enum { ret = 1 }; };
+
+template<typename T> struct ei_unref { typedef T type; };
+template<typename T> struct ei_unref<T&> { typedef T type; };
+
+template<typename T> struct ei_unpointer { typedef T type; };
+template<typename T> struct ei_unpointer<T*> { typedef T type; };
+template<typename T> struct ei_unpointer<T*const> { typedef T type; };
+
+template<typename T> struct ei_unconst { typedef T type; };
+template<typename T> struct ei_unconst<const T> { typedef T type; };
+template<typename T> struct ei_unconst<T const &> { typedef T & type; };
+template<typename T> struct ei_unconst<T const *> { typedef T * type; };
+
+template<typename T> struct ei_cleantype { typedef T type; };
+template<typename T> struct ei_cleantype<const T> { typedef typename ei_cleantype<T>::type type; };
+template<typename T> struct ei_cleantype<const T&> { typedef typename ei_cleantype<T>::type type; };
+template<typename T> struct ei_cleantype<T&> { typedef typename ei_cleantype<T>::type type; };
+template<typename T> struct ei_cleantype<const T*> { typedef typename ei_cleantype<T>::type type; };
+template<typename T> struct ei_cleantype<T*> { typedef typename ei_cleantype<T>::type type; };
+
+/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer.
+ * Usage example: \code ei_meta_sqrt<1023>::ret \endcode
+ */
+template<int Y,
+ int InfX = 0,
+ int SupX = ((Y==1) ? 1 : Y/2),
+ bool Done = ((SupX-InfX)<=1 ? true : ((SupX*SupX <= Y) && ((SupX+1)*(SupX+1) > Y))) >
+ // use ?: instead of || just to shut up a stupid gcc 4.3 warning
+class ei_meta_sqrt
+{
+ enum {
+ MidX = (InfX+SupX)/2,
+ TakeInf = MidX*MidX > Y ? 1 : 0,
+ NewInf = int(TakeInf) ? InfX : int(MidX),
+ NewSup = int(TakeInf) ? int(MidX) : SupX
+ };
+ public:
+ enum { ret = ei_meta_sqrt<Y,NewInf,NewSup>::ret };
+};
+
+template<int Y, int InfX, int SupX>
+class ei_meta_sqrt<Y, InfX, SupX, true> { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; };
+
+#endif // EIGEN2_META_H
diff --git a/extern/Eigen2/Eigen/src/Core/Minor.h b/extern/Eigen3/Eigen/src/Eigen2Support/Minor.h
index e2d47da79c2..eda91cc32be 100644
--- a/extern/Eigen2/Eigen/src/Core/Minor.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/Minor.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -25,7 +25,7 @@
#ifndef EIGEN_MINOR_H
#define EIGEN_MINOR_H
-/** \nonstableyet
+/**
* \class Minor
*
* \brief Expression of a minor
@@ -38,12 +38,15 @@
*
* \sa MatrixBase::minor()
*/
+
+namespace internal {
template<typename MatrixType>
-struct ei_traits<Minor<MatrixType> >
+struct traits<Minor<MatrixType> >
+ : traits<MatrixType>
{
- typedef typename MatrixType::Scalar Scalar;
- typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
- typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+ typedef typename MatrixType::StorageKind StorageKind;
enum {
RowsAtCompileTime = (MatrixType::RowsAtCompileTime != Dynamic) ?
int(MatrixType::RowsAtCompileTime) - 1 : Dynamic,
@@ -53,47 +56,50 @@ struct ei_traits<Minor<MatrixType> >
int(MatrixType::MaxRowsAtCompileTime) - 1 : Dynamic,
MaxColsAtCompileTime = (MatrixType::MaxColsAtCompileTime != Dynamic) ?
int(MatrixType::MaxColsAtCompileTime) - 1 : Dynamic,
- Flags = _MatrixTypeNested::Flags & HereditaryBits,
- CoeffReadCost = _MatrixTypeNested::CoeffReadCost
+ Flags = _MatrixTypeNested::Flags & (HereditaryBits | LvalueBit),
+ CoeffReadCost = _MatrixTypeNested::CoeffReadCost // minor is used typically on tiny matrices,
+ // where loops are unrolled and the 'if' evaluates at compile time
};
};
+}
template<typename MatrixType> class Minor
: public MatrixBase<Minor<MatrixType> >
{
public:
- EIGEN_GENERIC_PUBLIC_INTERFACE(Minor)
+ typedef MatrixBase<Minor> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Minor)
inline Minor(const MatrixType& matrix,
- int row, int col)
+ Index row, Index col)
: m_matrix(matrix), m_row(row), m_col(col)
{
- ei_assert(row >= 0 && row < matrix.rows()
+ eigen_assert(row >= 0 && row < matrix.rows()
&& col >= 0 && col < matrix.cols());
}
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Minor)
- inline int rows() const { return m_matrix.rows() - 1; }
- inline int cols() const { return m_matrix.cols() - 1; }
+ inline Index rows() const { return m_matrix.rows() - 1; }
+ inline Index cols() const { return m_matrix.cols() - 1; }
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
return m_matrix.const_cast_derived().coeffRef(row + (row >= m_row), col + (col >= m_col));
}
- inline const Scalar coeff(int row, int col) const
+ inline const Scalar coeff(Index row, Index col) const
{
return m_matrix.coeff(row + (row >= m_row), col + (col >= m_col));
}
protected:
const typename MatrixType::Nested m_matrix;
- const int m_row, m_col;
+ const Index m_row, m_col;
};
-/** \nonstableyet
+/**
* \return an expression of the (\a row, \a col)-minor of *this,
* i.e. an expression constructed from *this by removing the specified
* row and column.
@@ -105,16 +111,16 @@ template<typename MatrixType> class Minor
*/
template<typename Derived>
inline Minor<Derived>
-MatrixBase<Derived>::minor(int row, int col)
+MatrixBase<Derived>::minor(Index row, Index col)
{
return Minor<Derived>(derived(), row, col);
}
-/** \nonstableyet
+/**
* This is the const version of minor(). */
template<typename Derived>
inline const Minor<Derived>
-MatrixBase<Derived>::minor(int row, int col) const
+MatrixBase<Derived>::minor(Index row, Index col) const
{
return Minor<Derived>(derived(), row, col);
}
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/QR.h b/extern/Eigen3/Eigen/src/Eigen2Support/QR.h
new file mode 100644
index 00000000000..64f5d5ccb30
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/QR.h
@@ -0,0 +1,79 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN2_QR_H
+#define EIGEN2_QR_H
+
+template<typename MatrixType>
+class QR : public HouseholderQR<MatrixType>
+{
+ public:
+
+ typedef HouseholderQR<MatrixType> Base;
+ typedef Block<const MatrixType, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> MatrixRBlockType;
+
+ QR() : Base() {}
+
+ template<typename T>
+ explicit QR(const T& t) : Base(t) {}
+
+ template<typename OtherDerived, typename ResultType>
+ bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
+ {
+ *result = static_cast<const Base*>(this)->solve(b);
+ return true;
+ }
+
+ MatrixType matrixQ(void) const {
+ MatrixType ret = MatrixType::Identity(this->rows(), this->cols());
+ ret = this->householderQ() * ret;
+ return ret;
+ }
+
+ bool isFullRank() const {
+ return true;
+ }
+
+ const TriangularView<MatrixRBlockType, UpperTriangular>
+ matrixR(void) const
+ {
+ int cols = this->cols();
+ return MatrixRBlockType(this->matrixQR(), 0, 0, cols, cols).template triangularView<UpperTriangular>();
+ }
+};
+
+/** \return the QR decomposition of \c *this.
+ *
+ * \sa class QR
+ */
+template<typename Derived>
+const QR<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::qr() const
+{
+ return QR<PlainObject>(eval());
+}
+
+
+#endif // EIGEN2_QR_H
diff --git a/extern/Eigen2/Eigen/src/SVD/SVD.h b/extern/Eigen3/Eigen/src/Eigen2Support/SVD.h
index d117c158397..16b4b488f0c 100644
--- a/extern/Eigen2/Eigen/src/SVD/SVD.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/SVD.h
@@ -22,8 +22,8 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_SVD_H
-#define EIGEN_SVD_H
+#ifndef EIGEN2_SVD_H
+#define EIGEN2_SVD_H
/** \ingroup SVD_Module
* \nonstableyet
@@ -47,9 +47,9 @@ template<typename MatrixType> class SVD
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
enum {
- PacketSize = ei_packet_traits<Scalar>::size,
+ PacketSize = internal::packet_traits<Scalar>::size,
AlignmentMask = int(PacketSize)-1,
- MinSize = EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime)
+ MinSize = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime)
};
typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVector;
@@ -61,10 +61,12 @@ template<typename MatrixType> class SVD
public:
+ SVD() {} // a user who relied on compiler-generated default compiler reported problems with MSVC in 2.0.7
+
SVD(const MatrixType& matrix)
- : m_matU(matrix.rows(), std::min(matrix.rows(), matrix.cols())),
+ : m_matU(matrix.rows(), (std::min)(matrix.rows(), matrix.cols())),
m_matV(matrix.cols(),matrix.cols()),
- m_sigma(std::min(matrix.rows(),matrix.cols()))
+ m_sigma((std::min)(matrix.rows(),matrix.cols()))
{
compute(matrix);
}
@@ -106,12 +108,13 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
{
const int m = matrix.rows();
const int n = matrix.cols();
- const int nu = std::min(m,n);
+ const int nu = (std::min)(m,n);
ei_assert(m>=n && "In Eigen 2.0, SVD only works for MxN matrices with M>=N. Sorry!");
+ ei_assert(m>1 && "In Eigen 2.0, SVD doesn't work on 1x1 matrices");
m_matU.resize(m, nu);
m_matU.setZero();
- m_sigma.resize(std::min(m,n));
+ m_sigma.resize((std::min)(m,n));
m_matV.resize(n,n);
RowVector e(n);
@@ -123,9 +126,9 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
// Reduce A to bidiagonal form, storing the diagonal elements
// in s and the super-diagonal elements in e.
- int nct = std::min(m-1,n);
- int nrt = std::max(0,std::min(n-2,m));
- for (k = 0; k < std::max(nct,nrt); ++k)
+ int nct = (std::min)(m-1,n);
+ int nrt = (std::max)(0,(std::min)(n-2,m));
+ for (k = 0; k < (std::max)(nct,nrt); ++k)
{
if (k < nct)
{
@@ -147,7 +150,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
if ((k < nct) && (m_sigma[k] != 0.0))
{
// Apply the transformation.
- Scalar t = matA.col(k).end(m-k).dot(matA.col(j).end(m-k)); // FIXME dot product or cwise prod + .sum() ??
+ Scalar t = matA.col(k).end(m-k).eigen2_dot(matA.col(j).end(m-k)); // FIXME dot product or cwise prod + .sum() ??
t = -t/matA(k,k);
matA.col(j).end(m-k) += t * matA.col(k).end(m-k);
}
@@ -190,7 +193,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
// Set up the final bidiagonal matrix or order p.
- int p = std::min(n,m+1);
+ int p = (std::min)(n,m+1);
if (nct < n)
m_sigma[nct] = matA(nct,nct);
if (m < p)
@@ -213,7 +216,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
{
for (j = k+1; j < nu; ++j)
{
- Scalar t = m_matU.col(k).end(m-k).dot(m_matU.col(j).end(m-k)); // FIXME is it really a dot product we want ?
+ Scalar t = m_matU.col(k).end(m-k).eigen2_dot(m_matU.col(j).end(m-k)); // FIXME is it really a dot product we want ?
t = -t/m_matU(k,k);
m_matU.col(j).end(m-k) += t * m_matU.col(k).end(m-k);
}
@@ -239,7 +242,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
{
for (j = k+1; j < nu; ++j)
{
- Scalar t = m_matV.col(k).end(n-k-1).dot(m_matV.col(j).end(n-k-1)); // FIXME is it really a dot product we want ?
+ Scalar t = m_matV.col(k).end(n-k-1).eigen2_dot(m_matV.col(j).end(n-k-1)); // FIXME is it really a dot product we want ?
t = -t/m_matV(k+1,k);
m_matV.col(j).end(n-k-1) += t * m_matV.col(k).end(n-k-1);
}
@@ -325,7 +328,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
e[p-2] = 0.0;
for (j = p-2; j >= k; --j)
{
- Scalar t(ei_hypot(m_sigma[j],f));
+ Scalar t(internal::hypot(m_sigma[j],f));
Scalar cs(m_sigma[j]/t);
Scalar sn(f/t);
m_sigma[j] = t;
@@ -354,7 +357,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
e[k-1] = 0.0;
for (j = k; j < p; ++j)
{
- Scalar t(ei_hypot(m_sigma[j],f));
+ Scalar t(internal::hypot(m_sigma[j],f));
Scalar cs( m_sigma[j]/t);
Scalar sn(f/t);
m_sigma[j] = t;
@@ -377,7 +380,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
case 3:
{
// Calculate the shift.
- Scalar scale = std::max(std::max(std::max(std::max(
+ Scalar scale = (std::max)((std::max)((std::max)((std::max)(
ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])),
ei_abs(m_sigma[k])),ei_abs(e[k]));
Scalar sp = m_sigma[p-1]/scale;
@@ -402,7 +405,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
for (j = k; j < p-1; ++j)
{
- Scalar t = ei_hypot(f,g);
+ Scalar t = internal::hypot(f,g);
Scalar cs = f/t;
Scalar sn = g/t;
if (j != k)
@@ -420,7 +423,7 @@ void SVD<MatrixType>::compute(const MatrixType& matrix)
m_matV(i,j) = t;
}
}
- t = ei_hypot(f,g);
+ t = internal::hypot(f,g);
cs = f/t;
sn = g/t;
m_sigma[j] = t;
@@ -637,10 +640,10 @@ void SVD<MatrixType>::computeScalingRotation(ScalingType *scaling, RotationType
* \returns the SVD decomposition of \c *this
*/
template<typename Derived>
-inline SVD<typename MatrixBase<Derived>::PlainMatrixType>
+inline SVD<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::svd() const
{
- return SVD<PlainMatrixType>(derived());
+ return SVD<PlainObject>(derived());
}
-#endif // EIGEN_SVD_H
+#endif // EIGEN2_SVD_H
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/TriangularSolver.h b/extern/Eigen3/Eigen/src/Eigen2Support/TriangularSolver.h
new file mode 100644
index 00000000000..e94e47a5093
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/TriangularSolver.h
@@ -0,0 +1,53 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRIANGULAR_SOLVER2_H
+#define EIGEN_TRIANGULAR_SOLVER2_H
+
+const unsigned int UnitDiagBit = UnitDiag;
+const unsigned int SelfAdjointBit = SelfAdjoint;
+const unsigned int UpperTriangularBit = Upper;
+const unsigned int LowerTriangularBit = Lower;
+
+const unsigned int UpperTriangular = Upper;
+const unsigned int LowerTriangular = Lower;
+const unsigned int UnitUpperTriangular = UnitUpper;
+const unsigned int UnitLowerTriangular = UnitLower;
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed>
+template<typename OtherDerived>
+typename ExpressionType::PlainObject
+Flagged<ExpressionType,Added,Removed>::solveTriangular(const MatrixBase<OtherDerived>& other) const
+{
+ return m_matrix.template triangularView<Added>().solve(other.derived());
+}
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed>
+template<typename OtherDerived>
+void Flagged<ExpressionType,Added,Removed>::solveTriangularInPlace(const MatrixBase<OtherDerived>& other) const
+{
+ m_matrix.template triangularView<Added>().solveInPlace(other.derived());
+}
+
+#endif // EIGEN_TRIANGULAR_SOLVER2_H
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/VectorBlock.h b/extern/Eigen3/Eigen/src/Eigen2Support/VectorBlock.h
new file mode 100644
index 00000000000..010031d1971
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/VectorBlock.h
@@ -0,0 +1,105 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN2_VECTORBLOCK_H
+#define EIGEN2_VECTORBLOCK_H
+
+/** \deprecated use DenseMase::head(Index) */
+template<typename Derived>
+inline VectorBlock<Derived>
+MatrixBase<Derived>::start(Index size)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return VectorBlock<Derived>(derived(), 0, size);
+}
+
+/** \deprecated use DenseMase::head(Index) */
+template<typename Derived>
+inline const VectorBlock<const Derived>
+MatrixBase<Derived>::start(Index size) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return VectorBlock<const Derived>(derived(), 0, size);
+}
+
+/** \deprecated use DenseMase::tail(Index) */
+template<typename Derived>
+inline VectorBlock<Derived>
+MatrixBase<Derived>::end(Index size)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return VectorBlock<Derived>(derived(), this->size() - size, size);
+}
+
+/** \deprecated use DenseMase::tail(Index) */
+template<typename Derived>
+inline const VectorBlock<const Derived>
+MatrixBase<Derived>::end(Index size) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return VectorBlock<const Derived>(derived(), this->size() - size, size);
+}
+
+/** \deprecated use DenseMase::head() */
+template<typename Derived>
+template<int Size>
+inline VectorBlock<Derived,Size>
+MatrixBase<Derived>::start()
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return VectorBlock<Derived,Size>(derived(), 0);
+}
+
+/** \deprecated use DenseMase::head() */
+template<typename Derived>
+template<int Size>
+inline const VectorBlock<const Derived,Size>
+MatrixBase<Derived>::start() const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return VectorBlock<const Derived,Size>(derived(), 0);
+}
+
+/** \deprecated use DenseMase::tail() */
+template<typename Derived>
+template<int Size>
+inline VectorBlock<Derived,Size>
+MatrixBase<Derived>::end()
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return VectorBlock<Derived, Size>(derived(), size() - Size);
+}
+
+/** \deprecated use DenseMase::tail() */
+template<typename Derived>
+template<int Size>
+inline const VectorBlock<const Derived,Size>
+MatrixBase<Derived>::end() const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return VectorBlock<const Derived, Size>(derived(), size() - Size);
+}
+
+#endif // EIGEN2_VECTORBLOCK_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h
new file mode 100644
index 00000000000..57e00227d72
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -0,0 +1,332 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Claire Maurice
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H
+#define EIGEN_COMPLEX_EIGEN_SOLVER_H
+
+#include "./EigenvaluesCommon.h"
+#include "./ComplexSchur.h"
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \class ComplexEigenSolver
+ *
+ * \brief Computes eigenvalues and eigenvectors of general complex matrices
+ *
+ * \tparam _MatrixType the type of the matrix of which we are
+ * computing the eigendecomposition; this is expected to be an
+ * instantiation of the Matrix class template.
+ *
+ * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
+ * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v
+ * \f$. If \f$ D \f$ is a diagonal matrix with the eigenvalues on
+ * the diagonal, and \f$ V \f$ is a matrix with the eigenvectors as
+ * its columns, then \f$ A V = V D \f$. The matrix \f$ V \f$ is
+ * almost always invertible, in which case we have \f$ A = V D V^{-1}
+ * \f$. This is called the eigendecomposition.
+ *
+ * The main function in this class is compute(), which computes the
+ * eigenvalues and eigenvectors of a given function. The
+ * documentation for that function contains an example showing the
+ * main features of the class.
+ *
+ * \sa class EigenSolver, class SelfAdjointEigenSolver
+ */
+template<typename _MatrixType> class ComplexEigenSolver
+{
+ public:
+
+ /** \brief Synonym for the template parameter \p _MatrixType. */
+ typedef _MatrixType MatrixType;
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+
+ /** \brief Scalar type for matrices of type #MatrixType. */
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ /** \brief Complex scalar type for #MatrixType.
+ *
+ * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+ * \c float or \c double) and just \c Scalar if #Scalar is
+ * complex.
+ */
+ typedef std::complex<RealScalar> ComplexScalar;
+
+ /** \brief Type for vector of eigenvalues as returned by eigenvalues().
+ *
+ * This is a column vector with entries of type #ComplexScalar.
+ * The length of the vector is the size of #MatrixType.
+ */
+ typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options&(~RowMajor), MaxColsAtCompileTime, 1> EigenvalueType;
+
+ /** \brief Type for matrix of eigenvectors as returned by eigenvectors().
+ *
+ * This is a square matrix with entries of type #ComplexScalar.
+ * The size is the same as the size of #MatrixType.
+ */
+ typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorType;
+
+ /** \brief Default constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via compute().
+ */
+ ComplexEigenSolver()
+ : m_eivec(),
+ m_eivalues(),
+ m_schur(),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_matX()
+ {}
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem \a size.
+ * \sa ComplexEigenSolver()
+ */
+ ComplexEigenSolver(Index size)
+ : m_eivec(size, size),
+ m_eivalues(size),
+ m_schur(size),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_matX(size, size)
+ {}
+
+ /** \brief Constructor; computes eigendecomposition of given matrix.
+ *
+ * \param[in] matrix Square matrix whose eigendecomposition is to be computed.
+ * \param[in] computeEigenvectors If true, both the eigenvectors and the
+ * eigenvalues are computed; if false, only the eigenvalues are
+ * computed.
+ *
+ * This constructor calls compute() to compute the eigendecomposition.
+ */
+ ComplexEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
+ : m_eivec(matrix.rows(),matrix.cols()),
+ m_eivalues(matrix.cols()),
+ m_schur(matrix.rows()),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_matX(matrix.rows(),matrix.cols())
+ {
+ compute(matrix, computeEigenvectors);
+ }
+
+ /** \brief Returns the eigenvectors of given matrix.
+ *
+ * \returns A const reference to the matrix whose columns are the eigenvectors.
+ *
+ * \pre Either the constructor
+ * ComplexEigenSolver(const MatrixType& matrix, bool) or the member
+ * function compute(const MatrixType& matrix, bool) has been called before
+ * to compute the eigendecomposition of a matrix, and
+ * \p computeEigenvectors was set to true (the default).
+ *
+ * This function returns a matrix whose columns are the eigenvectors. Column
+ * \f$ k \f$ is an eigenvector corresponding to eigenvalue number \f$ k
+ * \f$ as returned by eigenvalues(). The eigenvectors are normalized to
+ * have (Euclidean) norm equal to one. The matrix returned by this
+ * function is the matrix \f$ V \f$ in the eigendecomposition \f$ A = V D
+ * V^{-1} \f$, if it exists.
+ *
+ * Example: \include ComplexEigenSolver_eigenvectors.cpp
+ * Output: \verbinclude ComplexEigenSolver_eigenvectors.out
+ */
+ const EigenvectorType& eigenvectors() const
+ {
+ eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec;
+ }
+
+ /** \brief Returns the eigenvalues of given matrix.
+ *
+ * \returns A const reference to the column vector containing the eigenvalues.
+ *
+ * \pre Either the constructor
+ * ComplexEigenSolver(const MatrixType& matrix, bool) or the member
+ * function compute(const MatrixType& matrix, bool) has been called before
+ * to compute the eigendecomposition of a matrix.
+ *
+ * This function returns a column vector containing the
+ * eigenvalues. Eigenvalues are repeated according to their
+ * algebraic multiplicity, so there are as many eigenvalues as
+ * rows in the matrix. The eigenvalues are not sorted in any particular
+ * order.
+ *
+ * Example: \include ComplexEigenSolver_eigenvalues.cpp
+ * Output: \verbinclude ComplexEigenSolver_eigenvalues.out
+ */
+ const EigenvalueType& eigenvalues() const
+ {
+ eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
+ return m_eivalues;
+ }
+
+ /** \brief Computes eigendecomposition of given matrix.
+ *
+ * \param[in] matrix Square matrix whose eigendecomposition is to be computed.
+ * \param[in] computeEigenvectors If true, both the eigenvectors and the
+ * eigenvalues are computed; if false, only the eigenvalues are
+ * computed.
+ * \returns Reference to \c *this
+ *
+ * This function computes the eigenvalues of the complex matrix \p matrix.
+ * The eigenvalues() function can be used to retrieve them. If
+ * \p computeEigenvectors is true, then the eigenvectors are also computed
+ * and can be retrieved by calling eigenvectors().
+ *
+ * The matrix is first reduced to Schur form using the
+ * ComplexSchur class. The Schur decomposition is then used to
+ * compute the eigenvalues and eigenvectors.
+ *
+ * The cost of the computation is dominated by the cost of the
+ * Schur decomposition, which is \f$ O(n^3) \f$ where \f$ n \f$
+ * is the size of the matrix.
+ *
+ * Example: \include ComplexEigenSolver_compute.cpp
+ * Output: \verbinclude ComplexEigenSolver_compute.out
+ */
+ ComplexEigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
+ return m_schur.info();
+ }
+
+ protected:
+ EigenvectorType m_eivec;
+ EigenvalueType m_eivalues;
+ ComplexSchur<MatrixType> m_schur;
+ bool m_isInitialized;
+ bool m_eigenvectorsOk;
+ EigenvectorType m_matX;
+
+ private:
+ void doComputeEigenvectors(RealScalar matrixnorm);
+ void sortEigenvalues(bool computeEigenvectors);
+};
+
+
+template<typename MatrixType>
+ComplexEigenSolver<MatrixType>& ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
+{
+ // this code is inspired from Jampack
+ assert(matrix.cols() == matrix.rows());
+
+ // Do a complex Schur decomposition, A = U T U^*
+ // The eigenvalues are on the diagonal of T.
+ m_schur.compute(matrix, computeEigenvectors);
+
+ if(m_schur.info() == Success)
+ {
+ m_eivalues = m_schur.matrixT().diagonal();
+ if(computeEigenvectors)
+ doComputeEigenvectors(matrix.norm());
+ sortEigenvalues(computeEigenvectors);
+ }
+
+ m_isInitialized = true;
+ m_eigenvectorsOk = computeEigenvectors;
+ return *this;
+}
+
+
+template<typename MatrixType>
+void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(RealScalar matrixnorm)
+{
+ const Index n = m_eivalues.size();
+
+ // Compute X such that T = X D X^(-1), where D is the diagonal of T.
+ // The matrix X is unit triangular.
+ m_matX = EigenvectorType::Zero(n, n);
+ for(Index k=n-1 ; k>=0 ; k--)
+ {
+ m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0);
+ // Compute X(i,k) using the (i,k) entry of the equation X T = D X
+ for(Index i=k-1 ; i>=0 ; i--)
+ {
+ m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k);
+ if(k-i-1>0)
+ m_matX.coeffRef(i,k) -= (m_schur.matrixT().row(i).segment(i+1,k-i-1) * m_matX.col(k).segment(i+1,k-i-1)).value();
+ ComplexScalar z = m_schur.matrixT().coeff(i,i) - m_schur.matrixT().coeff(k,k);
+ if(z==ComplexScalar(0))
+ {
+ // If the i-th and k-th eigenvalue are equal, then z equals 0.
+ // Use a small value instead, to prevent division by zero.
+ internal::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;
+ }
+ m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z;
+ }
+ }
+
+ // Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)
+ m_eivec.noalias() = m_schur.matrixU() * m_matX;
+ // .. and normalize the eigenvectors
+ for(Index k=0 ; k<n ; k++)
+ {
+ m_eivec.col(k).normalize();
+ }
+}
+
+
+template<typename MatrixType>
+void ComplexEigenSolver<MatrixType>::sortEigenvalues(bool computeEigenvectors)
+{
+ const Index n = m_eivalues.size();
+ for (Index i=0; i<n; i++)
+ {
+ Index k;
+ m_eivalues.cwiseAbs().tail(n-i).minCoeff(&k);
+ if (k != 0)
+ {
+ k += i;
+ std::swap(m_eivalues[k],m_eivalues[i]);
+ if(computeEigenvectors)
+ m_eivec.col(i).swap(m_eivec.col(k));
+ }
+ }
+}
+
+
+#endif // EIGEN_COMPLEX_EIGEN_SOLVER_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur.h b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur.h
new file mode 100644
index 00000000000..ec93af2e58a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexSchur.h
@@ -0,0 +1,448 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Claire Maurice
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COMPLEX_SCHUR_H
+#define EIGEN_COMPLEX_SCHUR_H
+
+#include "./EigenvaluesCommon.h"
+#include "./HessenbergDecomposition.h"
+
+namespace internal {
+template<typename MatrixType, bool IsComplex> struct complex_schur_reduce_to_hessenberg;
+}
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \class ComplexSchur
+ *
+ * \brief Performs a complex Schur decomposition of a real or complex square matrix
+ *
+ * \tparam _MatrixType the type of the matrix of which we are
+ * computing the Schur decomposition; this is expected to be an
+ * instantiation of the Matrix class template.
+ *
+ * Given a real or complex square matrix A, this class computes the
+ * Schur decomposition: \f$ A = U T U^*\f$ where U is a unitary
+ * complex matrix, and T is a complex upper triangular matrix. The
+ * diagonal of the matrix T corresponds to the eigenvalues of the
+ * matrix A.
+ *
+ * Call the function compute() to compute the Schur decomposition of
+ * a given matrix. Alternatively, you can use the
+ * ComplexSchur(const MatrixType&, bool) constructor which computes
+ * the Schur decomposition at construction time. Once the
+ * decomposition is computed, you can use the matrixU() and matrixT()
+ * functions to retrieve the matrices U and V in the decomposition.
+ *
+ * \note This code is inspired from Jampack
+ *
+ * \sa class RealSchur, class EigenSolver, class ComplexEigenSolver
+ */
+template<typename _MatrixType> class ComplexSchur
+{
+ public:
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+
+ /** \brief Scalar type for matrices of type \p _MatrixType. */
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ /** \brief Complex scalar type for \p _MatrixType.
+ *
+ * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+ * \c float or \c double) and just \c Scalar if #Scalar is
+ * complex.
+ */
+ typedef std::complex<RealScalar> ComplexScalar;
+
+ /** \brief Type for the matrices in the Schur decomposition.
+ *
+ * This is a square matrix with entries of type #ComplexScalar.
+ * The size is the same as the size of \p _MatrixType.
+ */
+ typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrixType;
+
+ /** \brief Default constructor.
+ *
+ * \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed.
+ *
+ * The default constructor is useful in cases in which the user
+ * intends to perform decompositions via compute(). The \p size
+ * parameter is only used as a hint. It is not an error to give a
+ * wrong \p size, but it may impair performance.
+ *
+ * \sa compute() for an example.
+ */
+ ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
+ : m_matT(size,size),
+ m_matU(size,size),
+ m_hess(size),
+ m_isInitialized(false),
+ m_matUisUptodate(false)
+ {}
+
+ /** \brief Constructor; computes Schur decomposition of given matrix.
+ *
+ * \param[in] matrix Square matrix whose Schur decomposition is to be computed.
+ * \param[in] computeU If true, both T and U are computed; if false, only T is computed.
+ *
+ * This constructor calls compute() to compute the Schur decomposition.
+ *
+ * \sa matrixT() and matrixU() for examples.
+ */
+ ComplexSchur(const MatrixType& matrix, bool computeU = true)
+ : m_matT(matrix.rows(),matrix.cols()),
+ m_matU(matrix.rows(),matrix.cols()),
+ m_hess(matrix.rows()),
+ m_isInitialized(false),
+ m_matUisUptodate(false)
+ {
+ compute(matrix, computeU);
+ }
+
+ /** \brief Returns the unitary matrix in the Schur decomposition.
+ *
+ * \returns A const reference to the matrix U.
+ *
+ * It is assumed that either the constructor
+ * ComplexSchur(const MatrixType& matrix, bool computeU) or the
+ * member function compute(const MatrixType& matrix, bool computeU)
+ * has been called before to compute the Schur decomposition of a
+ * matrix, and that \p computeU was set to true (the default
+ * value).
+ *
+ * Example: \include ComplexSchur_matrixU.cpp
+ * Output: \verbinclude ComplexSchur_matrixU.out
+ */
+ const ComplexMatrixType& matrixU() const
+ {
+ eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
+ eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the ComplexSchur decomposition.");
+ return m_matU;
+ }
+
+ /** \brief Returns the triangular matrix in the Schur decomposition.
+ *
+ * \returns A const reference to the matrix T.
+ *
+ * It is assumed that either the constructor
+ * ComplexSchur(const MatrixType& matrix, bool computeU) or the
+ * member function compute(const MatrixType& matrix, bool computeU)
+ * has been called before to compute the Schur decomposition of a
+ * matrix.
+ *
+ * Note that this function returns a plain square matrix. If you want to reference
+ * only the upper triangular part, use:
+ * \code schur.matrixT().triangularView<Upper>() \endcode
+ *
+ * Example: \include ComplexSchur_matrixT.cpp
+ * Output: \verbinclude ComplexSchur_matrixT.out
+ */
+ const ComplexMatrixType& matrixT() const
+ {
+ eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
+ return m_matT;
+ }
+
+ /** \brief Computes Schur decomposition of given matrix.
+ *
+ * \param[in] matrix Square matrix whose Schur decomposition is to be computed.
+ * \param[in] computeU If true, both T and U are computed; if false, only T is computed.
+ * \returns Reference to \c *this
+ *
+ * The Schur decomposition is computed by first reducing the
+ * matrix to Hessenberg form using the class
+ * HessenbergDecomposition. The Hessenberg matrix is then reduced
+ * to triangular form by performing QR iterations with a single
+ * shift. The cost of computing the Schur decomposition depends
+ * on the number of iterations; as a rough guide, it may be taken
+ * on the number of iterations; as a rough guide, it may be taken
+ * to be \f$25n^3\f$ complex flops, or \f$10n^3\f$ complex flops
+ * if \a computeU is false.
+ *
+ * Example: \include ComplexSchur_compute.cpp
+ * Output: \verbinclude ComplexSchur_compute.out
+ */
+ ComplexSchur& compute(const MatrixType& matrix, bool computeU = true);
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+ return m_info;
+ }
+
+ /** \brief Maximum number of iterations.
+ *
+ * Maximum number of iterations allowed for an eigenvalue to converge.
+ */
+ static const int m_maxIterations = 30;
+
+ protected:
+ ComplexMatrixType m_matT, m_matU;
+ HessenbergDecomposition<MatrixType> m_hess;
+ ComputationInfo m_info;
+ bool m_isInitialized;
+ bool m_matUisUptodate;
+
+ private:
+ bool subdiagonalEntryIsNeglegible(Index i);
+ ComplexScalar computeShift(Index iu, Index iter);
+ void reduceToTriangularForm(bool computeU);
+ friend struct internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>;
+};
+
+namespace internal {
+
+/** Computes the principal value of the square root of the complex \a z. */
+template<typename RealScalar>
+std::complex<RealScalar> sqrt(const std::complex<RealScalar> &z)
+{
+ RealScalar t, tre, tim;
+
+ t = abs(z);
+
+ if (abs(real(z)) <= abs(imag(z)))
+ {
+ // No cancellation in these formulas
+ tre = sqrt(RealScalar(0.5)*(t + real(z)));
+ tim = sqrt(RealScalar(0.5)*(t - real(z)));
+ }
+ else
+ {
+ // Stable computation of the above formulas
+ if (z.real() > RealScalar(0))
+ {
+ tre = t + z.real();
+ tim = abs(imag(z))*sqrt(RealScalar(0.5)/tre);
+ tre = sqrt(RealScalar(0.5)*tre);
+ }
+ else
+ {
+ tim = t - z.real();
+ tre = abs(imag(z))*sqrt(RealScalar(0.5)/tim);
+ tim = sqrt(RealScalar(0.5)*tim);
+ }
+ }
+ if(z.imag() < RealScalar(0))
+ tim = -tim;
+
+ return (std::complex<RealScalar>(tre,tim));
+}
+} // end namespace internal
+
+
+/** If m_matT(i+1,i) is neglegible in floating point arithmetic
+ * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and
+ * return true, else return false. */
+template<typename MatrixType>
+inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
+{
+ RealScalar d = internal::norm1(m_matT.coeff(i,i)) + internal::norm1(m_matT.coeff(i+1,i+1));
+ RealScalar sd = internal::norm1(m_matT.coeff(i+1,i));
+ if (internal::isMuchSmallerThan(sd, d, NumTraits<RealScalar>::epsilon()))
+ {
+ m_matT.coeffRef(i+1,i) = ComplexScalar(0);
+ return true;
+ }
+ return false;
+}
+
+
+/** Compute the shift in the current QR iteration. */
+template<typename MatrixType>
+typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter)
+{
+ if (iter == 10 || iter == 20)
+ {
+ // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f
+ return internal::abs(internal::real(m_matT.coeff(iu,iu-1))) + internal::abs(internal::real(m_matT.coeff(iu-1,iu-2)));
+ }
+
+ // compute the shift as one of the eigenvalues of t, the 2x2
+ // diagonal block on the bottom of the active submatrix
+ Matrix<ComplexScalar,2,2> t = m_matT.template block<2,2>(iu-1,iu-1);
+ RealScalar normt = t.cwiseAbs().sum();
+ t /= normt; // the normalization by sf is to avoid under/overflow
+
+ ComplexScalar b = t.coeff(0,1) * t.coeff(1,0);
+ ComplexScalar c = t.coeff(0,0) - t.coeff(1,1);
+ ComplexScalar disc = internal::sqrt(c*c + RealScalar(4)*b);
+ ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b;
+ ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1);
+ ComplexScalar eival1 = (trace + disc) / RealScalar(2);
+ ComplexScalar eival2 = (trace - disc) / RealScalar(2);
+
+ if(internal::norm1(eival1) > internal::norm1(eival2))
+ eival2 = det / eival1;
+ else
+ eival1 = det / eival2;
+
+ // choose the eigenvalue closest to the bottom entry of the diagonal
+ if(internal::norm1(eival1-t.coeff(1,1)) < internal::norm1(eival2-t.coeff(1,1)))
+ return normt * eival1;
+ else
+ return normt * eival2;
+}
+
+
+template<typename MatrixType>
+ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
+{
+ m_matUisUptodate = false;
+ eigen_assert(matrix.cols() == matrix.rows());
+
+ if(matrix.cols() == 1)
+ {
+ m_matT = matrix.template cast<ComplexScalar>();
+ if(computeU) m_matU = ComplexMatrixType::Identity(1,1);
+ m_info = Success;
+ m_isInitialized = true;
+ m_matUisUptodate = computeU;
+ return *this;
+ }
+
+ internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>::run(*this, matrix, computeU);
+ reduceToTriangularForm(computeU);
+ return *this;
+}
+
+namespace internal {
+
+/* Reduce given matrix to Hessenberg form */
+template<typename MatrixType, bool IsComplex>
+struct complex_schur_reduce_to_hessenberg
+{
+ // this is the implementation for the case IsComplex = true
+ static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
+ {
+ _this.m_hess.compute(matrix);
+ _this.m_matT = _this.m_hess.matrixH();
+ if(computeU) _this.m_matU = _this.m_hess.matrixQ();
+ }
+};
+
+template<typename MatrixType>
+struct complex_schur_reduce_to_hessenberg<MatrixType, false>
+{
+ static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
+ {
+ typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
+ typedef typename ComplexSchur<MatrixType>::ComplexMatrixType ComplexMatrixType;
+
+ // Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar
+ _this.m_hess.compute(matrix);
+ _this.m_matT = _this.m_hess.matrixH().template cast<ComplexScalar>();
+ if(computeU)
+ {
+ // This may cause an allocation which seems to be avoidable
+ MatrixType Q = _this.m_hess.matrixQ();
+ _this.m_matU = Q.template cast<ComplexScalar>();
+ }
+ }
+};
+
+} // end namespace internal
+
+// Reduce the Hessenberg matrix m_matT to triangular form by QR iteration.
+template<typename MatrixType>
+void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
+{
+ // The matrix m_matT is divided in three parts.
+ // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
+ // Rows il,...,iu is the part we are working on (the active submatrix).
+ // Rows iu+1,...,end are already brought in triangular form.
+ Index iu = m_matT.cols() - 1;
+ Index il;
+ Index iter = 0; // number of iterations we are working on the (iu,iu) element
+
+ while(true)
+ {
+ // find iu, the bottom row of the active submatrix
+ while(iu > 0)
+ {
+ if(!subdiagonalEntryIsNeglegible(iu-1)) break;
+ iter = 0;
+ --iu;
+ }
+
+ // if iu is zero then we are done; the whole matrix is triangularized
+ if(iu==0) break;
+
+ // if we spent too many iterations on the current element, we give up
+ iter++;
+ if(iter > m_maxIterations) break;
+
+ // find il, the top row of the active submatrix
+ il = iu-1;
+ while(il > 0 && !subdiagonalEntryIsNeglegible(il-1))
+ {
+ --il;
+ }
+
+ /* perform the QR step using Givens rotations. The first rotation
+ creates a bulge; the (il+2,il) element becomes nonzero. This
+ bulge is chased down to the bottom of the active submatrix. */
+
+ ComplexScalar shift = computeShift(iu, iter);
+ JacobiRotation<ComplexScalar> rot;
+ rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
+ m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
+ m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
+ if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
+
+ for(Index i=il+1 ; i<iu ; i++)
+ {
+ rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
+ m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
+ m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
+ m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
+ if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
+ }
+ }
+
+ if(iter <= m_maxIterations)
+ m_info = Success;
+ else
+ m_info = NoConvergence;
+
+ m_isInitialized = true;
+ m_matUisUptodate = computeU;
+}
+
+#endif // EIGEN_COMPLEX_SCHUR_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h
new file mode 100644
index 00000000000..ac4c4242dd4
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h
@@ -0,0 +1,588 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_EIGENSOLVER_H
+#define EIGEN_EIGENSOLVER_H
+
+#include "./EigenvaluesCommon.h"
+#include "./RealSchur.h"
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \class EigenSolver
+ *
+ * \brief Computes eigenvalues and eigenvectors of general matrices
+ *
+ * \tparam _MatrixType the type of the matrix of which we are computing the
+ * eigendecomposition; this is expected to be an instantiation of the Matrix
+ * class template. Currently, only real matrices are supported.
+ *
+ * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
+ * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v \f$. If
+ * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
+ * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
+ * V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
+ * have \f$ A = V D V^{-1} \f$. This is called the eigendecomposition.
+ *
+ * The eigenvalues and eigenvectors of a matrix may be complex, even when the
+ * matrix is real. However, we can choose real matrices \f$ V \f$ and \f$ D
+ * \f$ satisfying \f$ A V = V D \f$, just like the eigendecomposition, if the
+ * matrix \f$ D \f$ is not required to be diagonal, but if it is allowed to
+ * have blocks of the form
+ * \f[ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f]
+ * (where \f$ u \f$ and \f$ v \f$ are real numbers) on the diagonal. These
+ * blocks correspond to complex eigenvalue pairs \f$ u \pm iv \f$. We call
+ * this variant of the eigendecomposition the pseudo-eigendecomposition.
+ *
+ * Call the function compute() to compute the eigenvalues and eigenvectors of
+ * a given matrix. Alternatively, you can use the
+ * EigenSolver(const MatrixType&, bool) constructor which computes the
+ * eigenvalues and eigenvectors at construction time. Once the eigenvalue and
+ * eigenvectors are computed, they can be retrieved with the eigenvalues() and
+ * eigenvectors() functions. The pseudoEigenvalueMatrix() and
+ * pseudoEigenvectors() methods allow the construction of the
+ * pseudo-eigendecomposition.
+ *
+ * The documentation for EigenSolver(const MatrixType&, bool) contains an
+ * example of the typical use of this class.
+ *
+ * \note The implementation is adapted from
+ * <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
+ * Their code is based on EISPACK.
+ *
+ * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
+ */
+template<typename _MatrixType> class EigenSolver
+{
+ public:
+
+ /** \brief Synonym for the template parameter \p _MatrixType. */
+ typedef _MatrixType MatrixType;
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+
+ /** \brief Scalar type for matrices of type #MatrixType. */
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ /** \brief Complex scalar type for #MatrixType.
+ *
+ * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+ * \c float or \c double) and just \c Scalar if #Scalar is
+ * complex.
+ */
+ typedef std::complex<RealScalar> ComplexScalar;
+
+ /** \brief Type for vector of eigenvalues as returned by eigenvalues().
+ *
+ * This is a column vector with entries of type #ComplexScalar.
+ * The length of the vector is the size of #MatrixType.
+ */
+ typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
+
+ /** \brief Type for matrix of eigenvectors as returned by eigenvectors().
+ *
+ * This is a square matrix with entries of type #ComplexScalar.
+ * The size is the same as the size of #MatrixType.
+ */
+ typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
+
+ /** \brief Default constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via EigenSolver::compute(const MatrixType&, bool).
+ *
+ * \sa compute() for an example.
+ */
+ EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {}
+
+ /** \brief Default constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem \a size.
+ * \sa EigenSolver()
+ */
+ EigenSolver(Index size)
+ : m_eivec(size, size),
+ m_eivalues(size),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_realSchur(size),
+ m_matT(size, size),
+ m_tmp(size)
+ {}
+
+ /** \brief Constructor; computes eigendecomposition of given matrix.
+ *
+ * \param[in] matrix Square matrix whose eigendecomposition is to be computed.
+ * \param[in] computeEigenvectors If true, both the eigenvectors and the
+ * eigenvalues are computed; if false, only the eigenvalues are
+ * computed.
+ *
+ * This constructor calls compute() to compute the eigenvalues
+ * and eigenvectors.
+ *
+ * Example: \include EigenSolver_EigenSolver_MatrixType.cpp
+ * Output: \verbinclude EigenSolver_EigenSolver_MatrixType.out
+ *
+ * \sa compute()
+ */
+ EigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
+ : m_eivec(matrix.rows(), matrix.cols()),
+ m_eivalues(matrix.cols()),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_realSchur(matrix.cols()),
+ m_matT(matrix.rows(), matrix.cols()),
+ m_tmp(matrix.cols())
+ {
+ compute(matrix, computeEigenvectors);
+ }
+
+ /** \brief Returns the eigenvectors of given matrix.
+ *
+ * \returns %Matrix whose columns are the (possibly complex) eigenvectors.
+ *
+ * \pre Either the constructor
+ * EigenSolver(const MatrixType&,bool) or the member function
+ * compute(const MatrixType&, bool) has been called before, and
+ * \p computeEigenvectors was set to true (the default).
+ *
+ * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+ * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
+ * eigenvectors are normalized to have (Euclidean) norm equal to one. The
+ * matrix returned by this function is the matrix \f$ V \f$ in the
+ * eigendecomposition \f$ A = V D V^{-1} \f$, if it exists.
+ *
+ * Example: \include EigenSolver_eigenvectors.cpp
+ * Output: \verbinclude EigenSolver_eigenvectors.out
+ *
+ * \sa eigenvalues(), pseudoEigenvectors()
+ */
+ EigenvectorsType eigenvectors() const;
+
+ /** \brief Returns the pseudo-eigenvectors of given matrix.
+ *
+ * \returns Const reference to matrix whose columns are the pseudo-eigenvectors.
+ *
+ * \pre Either the constructor
+ * EigenSolver(const MatrixType&,bool) or the member function
+ * compute(const MatrixType&, bool) has been called before, and
+ * \p computeEigenvectors was set to true (the default).
+ *
+ * The real matrix \f$ V \f$ returned by this function and the
+ * block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix()
+ * satisfy \f$ AV = VD \f$.
+ *
+ * Example: \include EigenSolver_pseudoEigenvectors.cpp
+ * Output: \verbinclude EigenSolver_pseudoEigenvectors.out
+ *
+ * \sa pseudoEigenvalueMatrix(), eigenvectors()
+ */
+ const MatrixType& pseudoEigenvectors() const
+ {
+ eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec;
+ }
+
+ /** \brief Returns the block-diagonal matrix in the pseudo-eigendecomposition.
+ *
+ * \returns A block-diagonal matrix.
+ *
+ * \pre Either the constructor
+ * EigenSolver(const MatrixType&,bool) or the member function
+ * compute(const MatrixType&, bool) has been called before.
+ *
+ * The matrix \f$ D \f$ returned by this function is real and
+ * block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2
+ * blocks of the form
+ * \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$.
+ * These blocks are not sorted in any particular order.
+ * The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by
+ * pseudoEigenvectors() satisfy \f$ AV = VD \f$.
+ *
+ * \sa pseudoEigenvectors() for an example, eigenvalues()
+ */
+ MatrixType pseudoEigenvalueMatrix() const;
+
+ /** \brief Returns the eigenvalues of given matrix.
+ *
+ * \returns A const reference to the column vector containing the eigenvalues.
+ *
+ * \pre Either the constructor
+ * EigenSolver(const MatrixType&,bool) or the member function
+ * compute(const MatrixType&, bool) has been called before.
+ *
+ * The eigenvalues are repeated according to their algebraic multiplicity,
+ * so there are as many eigenvalues as rows in the matrix. The eigenvalues
+ * are not sorted in any particular order.
+ *
+ * Example: \include EigenSolver_eigenvalues.cpp
+ * Output: \verbinclude EigenSolver_eigenvalues.out
+ *
+ * \sa eigenvectors(), pseudoEigenvalueMatrix(),
+ * MatrixBase::eigenvalues()
+ */
+ const EigenvalueType& eigenvalues() const
+ {
+ eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+ return m_eivalues;
+ }
+
+ /** \brief Computes eigendecomposition of given matrix.
+ *
+ * \param[in] matrix Square matrix whose eigendecomposition is to be computed.
+ * \param[in] computeEigenvectors If true, both the eigenvectors and the
+ * eigenvalues are computed; if false, only the eigenvalues are
+ * computed.
+ * \returns Reference to \c *this
+ *
+ * This function computes the eigenvalues of the real matrix \p matrix.
+ * The eigenvalues() function can be used to retrieve them. If
+ * \p computeEigenvectors is true, then the eigenvectors are also computed
+ * and can be retrieved by calling eigenvectors().
+ *
+ * The matrix is first reduced to real Schur form using the RealSchur
+ * class. The Schur decomposition is then used to compute the eigenvalues
+ * and eigenvectors.
+ *
+ * The cost of the computation is dominated by the cost of the
+ * Schur decomposition, which is very approximately \f$ 25n^3 \f$
+ * (where \f$ n \f$ is the size of the matrix) if \p computeEigenvectors
+ * is true, and \f$ 10n^3 \f$ if \p computeEigenvectors is false.
+ *
+ * This method reuses of the allocated data in the EigenSolver object.
+ *
+ * Example: \include EigenSolver_compute.cpp
+ * Output: \verbinclude EigenSolver_compute.out
+ */
+ EigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
+
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
+ return m_realSchur.info();
+ }
+
+ private:
+ void doComputeEigenvectors();
+
+ protected:
+ MatrixType m_eivec;
+ EigenvalueType m_eivalues;
+ bool m_isInitialized;
+ bool m_eigenvectorsOk;
+ RealSchur<MatrixType> m_realSchur;
+ MatrixType m_matT;
+
+ typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+ ColumnVectorType m_tmp;
+};
+
+template<typename MatrixType>
+MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
+{
+ eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+ Index n = m_eivalues.rows();
+ MatrixType matD = MatrixType::Zero(n,n);
+ for (Index i=0; i<n; ++i)
+ {
+ if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i))))
+ matD.coeffRef(i,i) = internal::real(m_eivalues.coeff(i));
+ else
+ {
+ matD.template block<2,2>(i,i) << internal::real(m_eivalues.coeff(i)), internal::imag(m_eivalues.coeff(i)),
+ -internal::imag(m_eivalues.coeff(i)), internal::real(m_eivalues.coeff(i));
+ ++i;
+ }
+ }
+ return matD;
+}
+
+template<typename MatrixType>
+typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const
+{
+ eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ Index n = m_eivec.cols();
+ EigenvectorsType matV(n,n);
+ for (Index j=0; j<n; ++j)
+ {
+ if (internal::isMuchSmallerThan(internal::imag(m_eivalues.coeff(j)), internal::real(m_eivalues.coeff(j))))
+ {
+ // we have a real eigen value
+ matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
+ matV.col(j).normalize();
+ }
+ else
+ {
+ // we have a pair of complex eigen values
+ for (Index i=0; i<n; ++i)
+ {
+ matV.coeffRef(i,j) = ComplexScalar(m_eivec.coeff(i,j), m_eivec.coeff(i,j+1));
+ matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
+ }
+ matV.col(j).normalize();
+ matV.col(j+1).normalize();
+ ++j;
+ }
+ }
+ return matV;
+}
+
+template<typename MatrixType>
+EigenSolver<MatrixType>& EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
+{
+ assert(matrix.cols() == matrix.rows());
+
+ // Reduce to real Schur form.
+ m_realSchur.compute(matrix, computeEigenvectors);
+ if (m_realSchur.info() == Success)
+ {
+ m_matT = m_realSchur.matrixT();
+ if (computeEigenvectors)
+ m_eivec = m_realSchur.matrixU();
+
+ // Compute eigenvalues from matT
+ m_eivalues.resize(matrix.cols());
+ Index i = 0;
+ while (i < matrix.cols())
+ {
+ if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0))
+ {
+ m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
+ ++i;
+ }
+ else
+ {
+ Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
+ Scalar z = internal::sqrt(internal::abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
+ m_eivalues.coeffRef(i) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
+ m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
+ i += 2;
+ }
+ }
+
+ // Compute eigenvectors.
+ if (computeEigenvectors)
+ doComputeEigenvectors();
+ }
+
+ m_isInitialized = true;
+ m_eigenvectorsOk = computeEigenvectors;
+
+ return *this;
+}
+
+// Complex scalar division.
+template<typename Scalar>
+std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
+{
+ Scalar r,d;
+ if (internal::abs(yr) > internal::abs(yi))
+ {
+ r = yi/yr;
+ d = yr + r*yi;
+ return std::complex<Scalar>((xr + r*xi)/d, (xi - r*xr)/d);
+ }
+ else
+ {
+ r = yr/yi;
+ d = yi + r*yr;
+ return std::complex<Scalar>((r*xr + xi)/d, (r*xi - xr)/d);
+ }
+}
+
+
+template<typename MatrixType>
+void EigenSolver<MatrixType>::doComputeEigenvectors()
+{
+ const Index size = m_eivec.cols();
+ const Scalar eps = NumTraits<Scalar>::epsilon();
+
+ // inefficient! this is already computed in RealSchur
+ Scalar norm = 0.0;
+ for (Index j = 0; j < size; ++j)
+ {
+ norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
+ }
+
+ // Backsubstitute to find vectors of upper triangular form
+ if (norm == 0.0)
+ {
+ return;
+ }
+
+ for (Index n = size-1; n >= 0; n--)
+ {
+ Scalar p = m_eivalues.coeff(n).real();
+ Scalar q = m_eivalues.coeff(n).imag();
+
+ // Scalar vector
+ if (q == Scalar(0))
+ {
+ Scalar lastr=0, lastw=0;
+ Index l = n;
+
+ m_matT.coeffRef(n,n) = 1.0;
+ for (Index i = n-1; i >= 0; i--)
+ {
+ Scalar w = m_matT.coeff(i,i) - p;
+ Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
+
+ if (m_eivalues.coeff(i).imag() < 0.0)
+ {
+ lastw = w;
+ lastr = r;
+ }
+ else
+ {
+ l = i;
+ if (m_eivalues.coeff(i).imag() == 0.0)
+ {
+ if (w != 0.0)
+ m_matT.coeffRef(i,n) = -r / w;
+ else
+ m_matT.coeffRef(i,n) = -r / (eps * norm);
+ }
+ else // Solve real equations
+ {
+ Scalar x = m_matT.coeff(i,i+1);
+ Scalar y = m_matT.coeff(i+1,i);
+ Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
+ Scalar t = (x * lastr - lastw * r) / denom;
+ m_matT.coeffRef(i,n) = t;
+ if (internal::abs(x) > internal::abs(lastw))
+ m_matT.coeffRef(i+1,n) = (-r - w * t) / x;
+ else
+ m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw;
+ }
+
+ // Overflow control
+ Scalar t = internal::abs(m_matT.coeff(i,n));
+ if ((eps * t) * t > Scalar(1))
+ m_matT.col(n).tail(size-i) /= t;
+ }
+ }
+ }
+ else if (q < Scalar(0) && n > 0) // Complex vector
+ {
+ Scalar lastra=0, lastsa=0, lastw=0;
+ Index l = n-1;
+
+ // Last vector component imaginary so matrix is triangular
+ if (internal::abs(m_matT.coeff(n,n-1)) > internal::abs(m_matT.coeff(n-1,n)))
+ {
+ m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1);
+ m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1);
+ }
+ else
+ {
+ std::complex<Scalar> cc = cdiv<Scalar>(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q);
+ m_matT.coeffRef(n-1,n-1) = internal::real(cc);
+ m_matT.coeffRef(n-1,n) = internal::imag(cc);
+ }
+ m_matT.coeffRef(n,n-1) = 0.0;
+ m_matT.coeffRef(n,n) = 1.0;
+ for (Index i = n-2; i >= 0; i--)
+ {
+ Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1));
+ Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
+ Scalar w = m_matT.coeff(i,i) - p;
+
+ if (m_eivalues.coeff(i).imag() < 0.0)
+ {
+ lastw = w;
+ lastra = ra;
+ lastsa = sa;
+ }
+ else
+ {
+ l = i;
+ if (m_eivalues.coeff(i).imag() == RealScalar(0))
+ {
+ std::complex<Scalar> cc = cdiv(-ra,-sa,w,q);
+ m_matT.coeffRef(i,n-1) = internal::real(cc);
+ m_matT.coeffRef(i,n) = internal::imag(cc);
+ }
+ else
+ {
+ // Solve complex equations
+ Scalar x = m_matT.coeff(i,i+1);
+ Scalar y = m_matT.coeff(i+1,i);
+ Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
+ Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
+ if ((vr == 0.0) && (vi == 0.0))
+ vr = eps * norm * (internal::abs(w) + internal::abs(q) + internal::abs(x) + internal::abs(y) + internal::abs(lastw));
+
+ std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi);
+ m_matT.coeffRef(i,n-1) = internal::real(cc);
+ m_matT.coeffRef(i,n) = internal::imag(cc);
+ if (internal::abs(x) > (internal::abs(lastw) + internal::abs(q)))
+ {
+ m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x;
+ m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x;
+ }
+ else
+ {
+ cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q);
+ m_matT.coeffRef(i+1,n-1) = internal::real(cc);
+ m_matT.coeffRef(i+1,n) = internal::imag(cc);
+ }
+ }
+
+ // Overflow control
+ using std::max;
+ Scalar t = (max)(internal::abs(m_matT.coeff(i,n-1)),internal::abs(m_matT.coeff(i,n)));
+ if ((eps * t) * t > Scalar(1))
+ m_matT.block(i, n-1, size-i, 2) /= t;
+
+ }
+ }
+ }
+ else
+ {
+ eigen_assert("Internal bug in EigenSolver"); // this should not happen
+ }
+ }
+
+ // Back transformation to get eigenvectors of original matrix
+ for (Index j = size-1; j >= 0; j--)
+ {
+ m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1);
+ m_eivec.col(j) = m_tmp;
+ }
+}
+
+#endif // EIGEN_EIGENSOLVER_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/EigenvaluesCommon.h b/extern/Eigen3/Eigen/src/Eigenvalues/EigenvaluesCommon.h
new file mode 100644
index 00000000000..749bea79500
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/EigenvaluesCommon.h
@@ -0,0 +1,31 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_EIGENVALUES_COMMON_H
+#define EIGEN_EIGENVALUES_COMMON_H
+
+
+
+#endif // EIGEN_EIGENVALUES_COMMON_H
+
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
new file mode 100644
index 00000000000..980af14ce71
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
@@ -0,0 +1,239 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H
+#define EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H
+
+#include "./EigenvaluesCommon.h"
+#include "./Tridiagonalization.h"
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \class GeneralizedSelfAdjointEigenSolver
+ *
+ * \brief Computes eigenvalues and eigenvectors of the generalized selfadjoint eigen problem
+ *
+ * \tparam _MatrixType the type of the matrix of which we are computing the
+ * eigendecomposition; this is expected to be an instantiation of the Matrix
+ * class template.
+ *
+ * This class solves the generalized eigenvalue problem
+ * \f$ Av = \lambda Bv \f$. In this case, the matrix \f$ A \f$ should be
+ * selfadjoint and the matrix \f$ B \f$ should be positive definite.
+ *
+ * Only the \b lower \b triangular \b part of the input matrix is referenced.
+ *
+ * Call the function compute() to compute the eigenvalues and eigenvectors of
+ * a given matrix. Alternatively, you can use the
+ * GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)
+ * constructor which computes the eigenvalues and eigenvectors at construction time.
+ * Once the eigenvalue and eigenvectors are computed, they can be retrieved with the eigenvalues()
+ * and eigenvectors() functions.
+ *
+ * The documentation for GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)
+ * contains an example of the typical use of this class.
+ *
+ * \sa class SelfAdjointEigenSolver, class EigenSolver, class ComplexEigenSolver
+ */
+template<typename _MatrixType>
+class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixType>
+{
+ typedef SelfAdjointEigenSolver<_MatrixType> Base;
+ public:
+
+ typedef typename Base::Index Index;
+ typedef _MatrixType MatrixType;
+
+ /** \brief Default constructor for fixed-size matrices.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via compute(). This constructor
+ * can only be used if \p _MatrixType is a fixed-size matrix; use
+ * GeneralizedSelfAdjointEigenSolver(Index) for dynamic-size matrices.
+ */
+ GeneralizedSelfAdjointEigenSolver() : Base() {}
+
+ /** \brief Constructor, pre-allocates memory for dynamic-size matrices.
+ *
+ * \param [in] size Positive integer, size of the matrix whose
+ * eigenvalues and eigenvectors will be computed.
+ *
+ * This constructor is useful for dynamic-size matrices, when the user
+ * intends to perform decompositions via compute(). The \p size
+ * parameter is only used as a hint. It is not an error to give a wrong
+ * \p size, but it may impair performance.
+ *
+ * \sa compute() for an example
+ */
+ GeneralizedSelfAdjointEigenSolver(Index size)
+ : Base(size)
+ {}
+
+ /** \brief Constructor; computes generalized eigendecomposition of given matrix pencil.
+ *
+ * \param[in] matA Selfadjoint matrix in matrix pencil.
+ * Only the lower triangular part of the matrix is referenced.
+ * \param[in] matB Positive-definite matrix in matrix pencil.
+ * Only the lower triangular part of the matrix is referenced.
+ * \param[in] options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}.
+ * Default is #ComputeEigenvectors|#Ax_lBx.
+ *
+ * This constructor calls compute(const MatrixType&, const MatrixType&, int)
+ * to compute the eigenvalues and (if requested) the eigenvectors of the
+ * generalized eigenproblem \f$ Ax = \lambda B x \f$ with \a matA the
+ * selfadjoint matrix \f$ A \f$ and \a matB the positive definite matrix
+ * \f$ B \f$. Each eigenvector \f$ x \f$ satisfies the property
+ * \f$ x^* B x = 1 \f$. The eigenvectors are computed if
+ * \a options contains ComputeEigenvectors.
+ *
+ * In addition, the two following variants can be solved via \p options:
+ * - \c ABx_lx: \f$ ABx = \lambda x \f$
+ * - \c BAx_lx: \f$ BAx = \lambda x \f$
+ *
+ * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.out
+ *
+ * \sa compute(const MatrixType&, const MatrixType&, int)
+ */
+ GeneralizedSelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB,
+ int options = ComputeEigenvectors|Ax_lBx)
+ : Base(matA.cols())
+ {
+ compute(matA, matB, options);
+ }
+
+ /** \brief Computes generalized eigendecomposition of given matrix pencil.
+ *
+ * \param[in] matA Selfadjoint matrix in matrix pencil.
+ * Only the lower triangular part of the matrix is referenced.
+ * \param[in] matB Positive-definite matrix in matrix pencil.
+ * Only the lower triangular part of the matrix is referenced.
+ * \param[in] options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}.
+ * Default is #ComputeEigenvectors|#Ax_lBx.
+ *
+ * \returns Reference to \c *this
+ *
+ * Accoring to \p options, this function computes eigenvalues and (if requested)
+ * the eigenvectors of one of the following three generalized eigenproblems:
+ * - \c Ax_lBx: \f$ Ax = \lambda B x \f$
+ * - \c ABx_lx: \f$ ABx = \lambda x \f$
+ * - \c BAx_lx: \f$ BAx = \lambda x \f$
+ * with \a matA the selfadjoint matrix \f$ A \f$ and \a matB the positive definite
+ * matrix \f$ B \f$.
+ * In addition, each eigenvector \f$ x \f$ satisfies the property \f$ x^* B x = 1 \f$.
+ *
+ * The eigenvalues() function can be used to retrieve
+ * the eigenvalues. If \p options contains ComputeEigenvectors, then the
+ * eigenvectors are also computed and can be retrieved by calling
+ * eigenvectors().
+ *
+ * The implementation uses LLT to compute the Cholesky decomposition
+ * \f$ B = LL^* \f$ and computes the classical eigendecomposition
+ * of the selfadjoint matrix \f$ L^{-1} A (L^*)^{-1} \f$ if \p options contains Ax_lBx
+ * and of \f$ L^{*} A L \f$ otherwise. This solves the
+ * generalized eigenproblem, because any solution of the generalized
+ * eigenproblem \f$ Ax = \lambda B x \f$ corresponds to a solution
+ * \f$ L^{-1} A (L^*)^{-1} (L^* x) = \lambda (L^* x) \f$ of the
+ * eigenproblem for \f$ L^{-1} A (L^*)^{-1} \f$. Similar statements
+ * can be made for the two other variants.
+ *
+ * Example: \include SelfAdjointEigenSolver_compute_MatrixType2.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType2.out
+ *
+ * \sa GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)
+ */
+ GeneralizedSelfAdjointEigenSolver& compute(const MatrixType& matA, const MatrixType& matB,
+ int options = ComputeEigenvectors|Ax_lBx);
+
+ protected:
+
+};
+
+
+template<typename MatrixType>
+GeneralizedSelfAdjointEigenSolver<MatrixType>& GeneralizedSelfAdjointEigenSolver<MatrixType>::
+compute(const MatrixType& matA, const MatrixType& matB, int options)
+{
+ eigen_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows());
+ eigen_assert((options&~(EigVecMask|GenEigMask))==0
+ && (options&EigVecMask)!=EigVecMask
+ && ((options&GenEigMask)==0 || (options&GenEigMask)==Ax_lBx
+ || (options&GenEigMask)==ABx_lx || (options&GenEigMask)==BAx_lx)
+ && "invalid option parameter");
+
+ bool computeEigVecs = ((options&EigVecMask)==0) || ((options&EigVecMask)==ComputeEigenvectors);
+
+ // Compute the cholesky decomposition of matB = L L' = U'U
+ LLT<MatrixType> cholB(matB);
+
+ int type = (options&GenEigMask);
+ if(type==0)
+ type = Ax_lBx;
+
+ if(type==Ax_lBx)
+ {
+ // compute C = inv(L) A inv(L')
+ MatrixType matC = matA.template selfadjointView<Lower>();
+ cholB.matrixL().template solveInPlace<OnTheLeft>(matC);
+ cholB.matrixU().template solveInPlace<OnTheRight>(matC);
+
+ Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly );
+
+ // transform back the eigen vectors: evecs = inv(U) * evecs
+ if(computeEigVecs)
+ cholB.matrixU().solveInPlace(Base::m_eivec);
+ }
+ else if(type==ABx_lx)
+ {
+ // compute C = L' A L
+ MatrixType matC = matA.template selfadjointView<Lower>();
+ matC = matC * cholB.matrixL();
+ matC = cholB.matrixU() * matC;
+
+ Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly);
+
+ // transform back the eigen vectors: evecs = inv(U) * evecs
+ if(computeEigVecs)
+ cholB.matrixU().solveInPlace(Base::m_eivec);
+ }
+ else if(type==BAx_lx)
+ {
+ // compute C = L' A L
+ MatrixType matC = matA.template selfadjointView<Lower>();
+ matC = matC * cholB.matrixL();
+ matC = cholB.matrixU() * matC;
+
+ Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly);
+
+ // transform back the eigen vectors: evecs = L * evecs
+ if(computeEigVecs)
+ Base::m_eivec = cholB.matrixL() * Base::m_eivec;
+ }
+
+ return *this;
+}
+
+#endif // EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/extern/Eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h
new file mode 100644
index 00000000000..c17f155a59b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/HessenbergDecomposition.h
@@ -0,0 +1,384 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_HESSENBERGDECOMPOSITION_H
+#define EIGEN_HESSENBERGDECOMPOSITION_H
+
+namespace internal {
+
+template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType;
+template<typename MatrixType>
+struct traits<HessenbergDecompositionMatrixHReturnType<MatrixType> >
+{
+ typedef MatrixType ReturnType;
+};
+
+}
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \class HessenbergDecomposition
+ *
+ * \brief Reduces a square matrix to Hessenberg form by an orthogonal similarity transformation
+ *
+ * \tparam _MatrixType the type of the matrix of which we are computing the Hessenberg decomposition
+ *
+ * This class performs an Hessenberg decomposition of a matrix \f$ A \f$. In
+ * the real case, the Hessenberg decomposition consists of an orthogonal
+ * matrix \f$ Q \f$ and a Hessenberg matrix \f$ H \f$ such that \f$ A = Q H
+ * Q^T \f$. An orthogonal matrix is a matrix whose inverse equals its
+ * transpose (\f$ Q^{-1} = Q^T \f$). A Hessenberg matrix has zeros below the
+ * subdiagonal, so it is almost upper triangular. The Hessenberg decomposition
+ * of a complex matrix is \f$ A = Q H Q^* \f$ with \f$ Q \f$ unitary (that is,
+ * \f$ Q^{-1} = Q^* \f$).
+ *
+ * Call the function compute() to compute the Hessenberg decomposition of a
+ * given matrix. Alternatively, you can use the
+ * HessenbergDecomposition(const MatrixType&) constructor which computes the
+ * Hessenberg decomposition at construction time. Once the decomposition is
+ * computed, you can use the matrixH() and matrixQ() functions to construct
+ * the matrices H and Q in the decomposition.
+ *
+ * The documentation for matrixH() contains an example of the typical use of
+ * this class.
+ *
+ * \sa class ComplexSchur, class Tridiagonalization, \ref QR_Module "QR Module"
+ */
+template<typename _MatrixType> class HessenbergDecomposition
+{
+ public:
+
+ /** \brief Synonym for the template parameter \p _MatrixType. */
+ typedef _MatrixType MatrixType;
+
+ enum {
+ Size = MatrixType::RowsAtCompileTime,
+ SizeMinusOne = Size == Dynamic ? Dynamic : Size - 1,
+ Options = MatrixType::Options,
+ MaxSize = MatrixType::MaxRowsAtCompileTime,
+ MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : MaxSize - 1
+ };
+
+ /** \brief Scalar type for matrices of type #MatrixType. */
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
+ /** \brief Type for vector of Householder coefficients.
+ *
+ * This is column vector with entries of type #Scalar. The length of the
+ * vector is one less than the size of #MatrixType, if it is a fixed-side
+ * type.
+ */
+ typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
+
+ /** \brief Return type of matrixQ() */
+ typedef typename HouseholderSequence<MatrixType,CoeffVectorType>::ConjugateReturnType HouseholderSequenceType;
+
+ typedef internal::HessenbergDecompositionMatrixHReturnType<MatrixType> MatrixHReturnType;
+
+ /** \brief Default constructor; the decomposition will be computed later.
+ *
+ * \param [in] size The size of the matrix whose Hessenberg decomposition will be computed.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via compute(). The \p size parameter is only
+ * used as a hint. It is not an error to give a wrong \p size, but it may
+ * impair performance.
+ *
+ * \sa compute() for an example.
+ */
+ HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size)
+ : m_matrix(size,size),
+ m_temp(size),
+ m_isInitialized(false)
+ {
+ if(size>1)
+ m_hCoeffs.resize(size-1);
+ }
+
+ /** \brief Constructor; computes Hessenberg decomposition of given matrix.
+ *
+ * \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed.
+ *
+ * This constructor calls compute() to compute the Hessenberg
+ * decomposition.
+ *
+ * \sa matrixH() for an example.
+ */
+ HessenbergDecomposition(const MatrixType& matrix)
+ : m_matrix(matrix),
+ m_temp(matrix.rows()),
+ m_isInitialized(false)
+ {
+ if(matrix.rows()<2)
+ {
+ m_isInitialized = true;
+ return;
+ }
+ m_hCoeffs.resize(matrix.rows()-1,1);
+ _compute(m_matrix, m_hCoeffs, m_temp);
+ m_isInitialized = true;
+ }
+
+ /** \brief Computes Hessenberg decomposition of given matrix.
+ *
+ * \param[in] matrix Square matrix whose Hessenberg decomposition is to be computed.
+ * \returns Reference to \c *this
+ *
+ * The Hessenberg decomposition is computed by bringing the columns of the
+ * matrix successively in the required form using Householder reflections
+ * (see, e.g., Algorithm 7.4.2 in Golub \& Van Loan, <i>%Matrix
+ * Computations</i>). The cost is \f$ 10n^3/3 \f$ flops, where \f$ n \f$
+ * denotes the size of the given matrix.
+ *
+ * This method reuses of the allocated data in the HessenbergDecomposition
+ * object.
+ *
+ * Example: \include HessenbergDecomposition_compute.cpp
+ * Output: \verbinclude HessenbergDecomposition_compute.out
+ */
+ HessenbergDecomposition& compute(const MatrixType& matrix)
+ {
+ m_matrix = matrix;
+ if(matrix.rows()<2)
+ {
+ m_isInitialized = true;
+ return *this;
+ }
+ m_hCoeffs.resize(matrix.rows()-1,1);
+ _compute(m_matrix, m_hCoeffs, m_temp);
+ m_isInitialized = true;
+ return *this;
+ }
+
+ /** \brief Returns the Householder coefficients.
+ *
+ * \returns a const reference to the vector of Householder coefficients
+ *
+ * \pre Either the constructor HessenbergDecomposition(const MatrixType&)
+ * or the member function compute(const MatrixType&) has been called
+ * before to compute the Hessenberg decomposition of a matrix.
+ *
+ * The Householder coefficients allow the reconstruction of the matrix
+ * \f$ Q \f$ in the Hessenberg decomposition from the packed data.
+ *
+ * \sa packedMatrix(), \ref Householder_Module "Householder module"
+ */
+ const CoeffVectorType& householderCoefficients() const
+ {
+ eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
+ return m_hCoeffs;
+ }
+
+ /** \brief Returns the internal representation of the decomposition
+ *
+ * \returns a const reference to a matrix with the internal representation
+ * of the decomposition.
+ *
+ * \pre Either the constructor HessenbergDecomposition(const MatrixType&)
+ * or the member function compute(const MatrixType&) has been called
+ * before to compute the Hessenberg decomposition of a matrix.
+ *
+ * The returned matrix contains the following information:
+ * - the upper part and lower sub-diagonal represent the Hessenberg matrix H
+ * - the rest of the lower part contains the Householder vectors that, combined with
+ * Householder coefficients returned by householderCoefficients(),
+ * allows to reconstruct the matrix Q as
+ * \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
+ * Here, the matrices \f$ H_i \f$ are the Householder transformations
+ * \f$ H_i = (I - h_i v_i v_i^T) \f$
+ * where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
+ * \f$ v_i \f$ is the Householder vector defined by
+ * \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
+ * with M the matrix returned by this function.
+ *
+ * See LAPACK for further details on this packed storage.
+ *
+ * Example: \include HessenbergDecomposition_packedMatrix.cpp
+ * Output: \verbinclude HessenbergDecomposition_packedMatrix.out
+ *
+ * \sa householderCoefficients()
+ */
+ const MatrixType& packedMatrix() const
+ {
+ eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
+ return m_matrix;
+ }
+
+ /** \brief Reconstructs the orthogonal matrix Q in the decomposition
+ *
+ * \returns object representing the matrix Q
+ *
+ * \pre Either the constructor HessenbergDecomposition(const MatrixType&)
+ * or the member function compute(const MatrixType&) has been called
+ * before to compute the Hessenberg decomposition of a matrix.
+ *
+ * This function returns a light-weight object of template class
+ * HouseholderSequence. You can either apply it directly to a matrix or
+ * you can convert it to a matrix of type #MatrixType.
+ *
+ * \sa matrixH() for an example, class HouseholderSequence
+ */
+ HouseholderSequenceType matrixQ() const
+ {
+ eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
+ return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate())
+ .setLength(m_matrix.rows() - 1)
+ .setShift(1);
+ }
+
+ /** \brief Constructs the Hessenberg matrix H in the decomposition
+ *
+ * \returns expression object representing the matrix H
+ *
+ * \pre Either the constructor HessenbergDecomposition(const MatrixType&)
+ * or the member function compute(const MatrixType&) has been called
+ * before to compute the Hessenberg decomposition of a matrix.
+ *
+ * The object returned by this function constructs the Hessenberg matrix H
+ * when it is assigned to a matrix or otherwise evaluated. The matrix H is
+ * constructed from the packed matrix as returned by packedMatrix(): The
+ * upper part (including the subdiagonal) of the packed matrix contains
+ * the matrix H. It may sometimes be better to directly use the packed
+ * matrix instead of constructing the matrix H.
+ *
+ * Example: \include HessenbergDecomposition_matrixH.cpp
+ * Output: \verbinclude HessenbergDecomposition_matrixH.out
+ *
+ * \sa matrixQ(), packedMatrix()
+ */
+ MatrixHReturnType matrixH() const
+ {
+ eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
+ return MatrixHReturnType(*this);
+ }
+
+ private:
+
+ typedef Matrix<Scalar, 1, Size, Options | RowMajor, 1, MaxSize> VectorType;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp);
+
+ protected:
+ MatrixType m_matrix;
+ CoeffVectorType m_hCoeffs;
+ VectorType m_temp;
+ bool m_isInitialized;
+};
+
+/** \internal
+ * Performs a tridiagonal decomposition of \a matA in place.
+ *
+ * \param matA the input selfadjoint matrix
+ * \param hCoeffs returned Householder coefficients
+ *
+ * The result is written in the lower triangular part of \a matA.
+ *
+ * Implemented from Golub's "%Matrix Computations", algorithm 8.3.1.
+ *
+ * \sa packedMatrix()
+ */
+template<typename MatrixType>
+void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp)
+{
+ assert(matA.rows()==matA.cols());
+ Index n = matA.rows();
+ temp.resize(n);
+ for (Index i = 0; i<n-1; ++i)
+ {
+ // let's consider the vector v = i-th column starting at position i+1
+ Index remainingSize = n-i-1;
+ RealScalar beta;
+ Scalar h;
+ matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
+ matA.col(i).coeffRef(i+1) = beta;
+ hCoeffs.coeffRef(i) = h;
+
+ // Apply similarity transformation to remaining columns,
+ // i.e., compute A = H A H'
+
+ // A = H A
+ matA.bottomRightCorner(remainingSize, remainingSize)
+ .applyHouseholderOnTheLeft(matA.col(i).tail(remainingSize-1), h, &temp.coeffRef(0));
+
+ // A = A H'
+ matA.rightCols(remainingSize)
+ .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), internal::conj(h), &temp.coeffRef(0));
+ }
+}
+
+namespace internal {
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \brief Expression type for return value of HessenbergDecomposition::matrixH()
+ *
+ * \tparam MatrixType type of matrix in the Hessenberg decomposition
+ *
+ * Objects of this type represent the Hessenberg matrix in the Hessenberg
+ * decomposition of some matrix. The object holds a reference to the
+ * HessenbergDecomposition class until the it is assigned or evaluated for
+ * some other reason (the reference should remain valid during the life time
+ * of this object). This class is the return type of
+ * HessenbergDecomposition::matrixH(); there is probably no other use for this
+ * class.
+ */
+template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType
+: public ReturnByValue<HessenbergDecompositionMatrixHReturnType<MatrixType> >
+{
+ typedef typename MatrixType::Index Index;
+ public:
+ /** \brief Constructor.
+ *
+ * \param[in] hess Hessenberg decomposition
+ */
+ HessenbergDecompositionMatrixHReturnType(const HessenbergDecomposition<MatrixType>& hess) : m_hess(hess) { }
+
+ /** \brief Hessenberg matrix in decomposition.
+ *
+ * \param[out] result Hessenberg matrix in decomposition \p hess which
+ * was passed to the constructor
+ */
+ template <typename ResultType>
+ inline void evalTo(ResultType& result) const
+ {
+ result = m_hess.packedMatrix();
+ Index n = result.rows();
+ if (n>2)
+ result.bottomLeftCorner(n-2, n-2).template triangularView<Lower>().setZero();
+ }
+
+ Index rows() const { return m_hess.packedMatrix().rows(); }
+ Index cols() const { return m_hess.packedMatrix().cols(); }
+
+ protected:
+ const HessenbergDecomposition<MatrixType>& m_hess;
+};
+
+}
+
+#endif // EIGEN_HESSENBERGDECOMPOSITION_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/extern/Eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
new file mode 100644
index 00000000000..5591519fb75
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
@@ -0,0 +1,170 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MATRIXBASEEIGENVALUES_H
+#define EIGEN_MATRIXBASEEIGENVALUES_H
+
+namespace internal {
+
+template<typename Derived, bool IsComplex>
+struct eigenvalues_selector
+{
+ // this is the implementation for the case IsComplex = true
+ static inline typename MatrixBase<Derived>::EigenvaluesReturnType const
+ run(const MatrixBase<Derived>& m)
+ {
+ typedef typename Derived::PlainObject PlainObject;
+ PlainObject m_eval(m);
+ return ComplexEigenSolver<PlainObject>(m_eval, false).eigenvalues();
+ }
+};
+
+template<typename Derived>
+struct eigenvalues_selector<Derived, false>
+{
+ static inline typename MatrixBase<Derived>::EigenvaluesReturnType const
+ run(const MatrixBase<Derived>& m)
+ {
+ typedef typename Derived::PlainObject PlainObject;
+ PlainObject m_eval(m);
+ return EigenSolver<PlainObject>(m_eval, false).eigenvalues();
+ }
+};
+
+} // end namespace internal
+
+/** \brief Computes the eigenvalues of a matrix
+ * \returns Column vector containing the eigenvalues.
+ *
+ * \eigenvalues_module
+ * This function computes the eigenvalues with the help of the EigenSolver
+ * class (for real matrices) or the ComplexEigenSolver class (for complex
+ * matrices).
+ *
+ * The eigenvalues are repeated according to their algebraic multiplicity,
+ * so there are as many eigenvalues as rows in the matrix.
+ *
+ * The SelfAdjointView class provides a better algorithm for selfadjoint
+ * matrices.
+ *
+ * Example: \include MatrixBase_eigenvalues.cpp
+ * Output: \verbinclude MatrixBase_eigenvalues.out
+ *
+ * \sa EigenSolver::eigenvalues(), ComplexEigenSolver::eigenvalues(),
+ * SelfAdjointView::eigenvalues()
+ */
+template<typename Derived>
+inline typename MatrixBase<Derived>::EigenvaluesReturnType
+MatrixBase<Derived>::eigenvalues() const
+{
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ return internal::eigenvalues_selector<Derived, NumTraits<Scalar>::IsComplex>::run(derived());
+}
+
+/** \brief Computes the eigenvalues of a matrix
+ * \returns Column vector containing the eigenvalues.
+ *
+ * \eigenvalues_module
+ * This function computes the eigenvalues with the help of the
+ * SelfAdjointEigenSolver class. The eigenvalues are repeated according to
+ * their algebraic multiplicity, so there are as many eigenvalues as rows in
+ * the matrix.
+ *
+ * Example: \include SelfAdjointView_eigenvalues.cpp
+ * Output: \verbinclude SelfAdjointView_eigenvalues.out
+ *
+ * \sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues()
+ */
+template<typename MatrixType, unsigned int UpLo>
+inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType
+SelfAdjointView<MatrixType, UpLo>::eigenvalues() const
+{
+ typedef typename SelfAdjointView<MatrixType, UpLo>::PlainObject PlainObject;
+ PlainObject thisAsMatrix(*this);
+ return SelfAdjointEigenSolver<PlainObject>(thisAsMatrix, false).eigenvalues();
+}
+
+
+
+/** \brief Computes the L2 operator norm
+ * \returns Operator norm of the matrix.
+ *
+ * \eigenvalues_module
+ * This function computes the L2 operator norm of a matrix, which is also
+ * known as the spectral norm. The norm of a matrix \f$ A \f$ is defined to be
+ * \f[ \|A\|_2 = \max_x \frac{\|Ax\|_2}{\|x\|_2} \f]
+ * where the maximum is over all vectors and the norm on the right is the
+ * Euclidean vector norm. The norm equals the largest singular value, which is
+ * the square root of the largest eigenvalue of the positive semi-definite
+ * matrix \f$ A^*A \f$.
+ *
+ * The current implementation uses the eigenvalues of \f$ A^*A \f$, as computed
+ * by SelfAdjointView::eigenvalues(), to compute the operator norm of a
+ * matrix. The SelfAdjointView class provides a better algorithm for
+ * selfadjoint matrices.
+ *
+ * Example: \include MatrixBase_operatorNorm.cpp
+ * Output: \verbinclude MatrixBase_operatorNorm.out
+ *
+ * \sa SelfAdjointView::eigenvalues(), SelfAdjointView::operatorNorm()
+ */
+template<typename Derived>
+inline typename MatrixBase<Derived>::RealScalar
+MatrixBase<Derived>::operatorNorm() const
+{
+ typename Derived::PlainObject m_eval(derived());
+ // FIXME if it is really guaranteed that the eigenvalues are already sorted,
+ // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
+ return internal::sqrt((m_eval*m_eval.adjoint())
+ .eval()
+ .template selfadjointView<Lower>()
+ .eigenvalues()
+ .maxCoeff()
+ );
+}
+
+/** \brief Computes the L2 operator norm
+ * \returns Operator norm of the matrix.
+ *
+ * \eigenvalues_module
+ * This function computes the L2 operator norm of a self-adjoint matrix. For a
+ * self-adjoint matrix, the operator norm is the largest eigenvalue.
+ *
+ * The current implementation uses the eigenvalues of the matrix, as computed
+ * by eigenvalues(), to compute the operator norm of the matrix.
+ *
+ * Example: \include SelfAdjointView_operatorNorm.cpp
+ * Output: \verbinclude SelfAdjointView_operatorNorm.out
+ *
+ * \sa eigenvalues(), MatrixBase::operatorNorm()
+ */
+template<typename MatrixType, unsigned int UpLo>
+inline typename SelfAdjointView<MatrixType, UpLo>::RealScalar
+SelfAdjointView<MatrixType, UpLo>::operatorNorm() const
+{
+ return eigenvalues().cwiseAbs().maxCoeff();
+}
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h b/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h
new file mode 100644
index 00000000000..cc9af11c117
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h
@@ -0,0 +1,474 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_REAL_SCHUR_H
+#define EIGEN_REAL_SCHUR_H
+
+#include "./EigenvaluesCommon.h"
+#include "./HessenbergDecomposition.h"
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \class RealSchur
+ *
+ * \brief Performs a real Schur decomposition of a square matrix
+ *
+ * \tparam _MatrixType the type of the matrix of which we are computing the
+ * real Schur decomposition; this is expected to be an instantiation of the
+ * Matrix class template.
+ *
+ * Given a real square matrix A, this class computes the real Schur
+ * decomposition: \f$ A = U T U^T \f$ where U is a real orthogonal matrix and
+ * T is a real quasi-triangular matrix. An orthogonal matrix is a matrix whose
+ * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
+ * matrix is a block-triangular matrix whose diagonal consists of 1-by-1
+ * blocks and 2-by-2 blocks with complex eigenvalues. The eigenvalues of the
+ * blocks on the diagonal of T are the same as the eigenvalues of the matrix
+ * A, and thus the real Schur decomposition is used in EigenSolver to compute
+ * the eigendecomposition of a matrix.
+ *
+ * Call the function compute() to compute the real Schur decomposition of a
+ * given matrix. Alternatively, you can use the RealSchur(const MatrixType&, bool)
+ * constructor which computes the real Schur decomposition at construction
+ * time. Once the decomposition is computed, you can use the matrixU() and
+ * matrixT() functions to retrieve the matrices U and T in the decomposition.
+ *
+ * The documentation of RealSchur(const MatrixType&, bool) contains an example
+ * of the typical use of this class.
+ *
+ * \note The implementation is adapted from
+ * <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
+ * Their code is based on EISPACK.
+ *
+ * \sa class ComplexSchur, class EigenSolver, class ComplexEigenSolver
+ */
+template<typename _MatrixType> class RealSchur
+{
+ public:
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+ typedef typename MatrixType::Index Index;
+
+ typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
+ typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+
+ /** \brief Default constructor.
+ *
+ * \param [in] size Positive integer, size of the matrix whose Schur decomposition will be computed.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via compute(). The \p size parameter is only
+ * used as a hint. It is not an error to give a wrong \p size, but it may
+ * impair performance.
+ *
+ * \sa compute() for an example.
+ */
+ RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
+ : m_matT(size, size),
+ m_matU(size, size),
+ m_workspaceVector(size),
+ m_hess(size),
+ m_isInitialized(false),
+ m_matUisUptodate(false)
+ { }
+
+ /** \brief Constructor; computes real Schur decomposition of given matrix.
+ *
+ * \param[in] matrix Square matrix whose Schur decomposition is to be computed.
+ * \param[in] computeU If true, both T and U are computed; if false, only T is computed.
+ *
+ * This constructor calls compute() to compute the Schur decomposition.
+ *
+ * Example: \include RealSchur_RealSchur_MatrixType.cpp
+ * Output: \verbinclude RealSchur_RealSchur_MatrixType.out
+ */
+ RealSchur(const MatrixType& matrix, bool computeU = true)
+ : m_matT(matrix.rows(),matrix.cols()),
+ m_matU(matrix.rows(),matrix.cols()),
+ m_workspaceVector(matrix.rows()),
+ m_hess(matrix.rows()),
+ m_isInitialized(false),
+ m_matUisUptodate(false)
+ {
+ compute(matrix, computeU);
+ }
+
+ /** \brief Returns the orthogonal matrix in the Schur decomposition.
+ *
+ * \returns A const reference to the matrix U.
+ *
+ * \pre Either the constructor RealSchur(const MatrixType&, bool) or the
+ * member function compute(const MatrixType&, bool) has been called before
+ * to compute the Schur decomposition of a matrix, and \p computeU was set
+ * to true (the default value).
+ *
+ * \sa RealSchur(const MatrixType&, bool) for an example
+ */
+ const MatrixType& matrixU() const
+ {
+ eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+ eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition.");
+ return m_matU;
+ }
+
+ /** \brief Returns the quasi-triangular matrix in the Schur decomposition.
+ *
+ * \returns A const reference to the matrix T.
+ *
+ * \pre Either the constructor RealSchur(const MatrixType&, bool) or the
+ * member function compute(const MatrixType&, bool) has been called before
+ * to compute the Schur decomposition of a matrix.
+ *
+ * \sa RealSchur(const MatrixType&, bool) for an example
+ */
+ const MatrixType& matrixT() const
+ {
+ eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+ return m_matT;
+ }
+
+ /** \brief Computes Schur decomposition of given matrix.
+ *
+ * \param[in] matrix Square matrix whose Schur decomposition is to be computed.
+ * \param[in] computeU If true, both T and U are computed; if false, only T is computed.
+ * \returns Reference to \c *this
+ *
+ * The Schur decomposition is computed by first reducing the matrix to
+ * Hessenberg form using the class HessenbergDecomposition. The Hessenberg
+ * matrix is then reduced to triangular form by performing Francis QR
+ * iterations with implicit double shift. The cost of computing the Schur
+ * decomposition depends on the number of iterations; as a rough guide, it
+ * may be taken to be \f$25n^3\f$ flops if \a computeU is true and
+ * \f$10n^3\f$ flops if \a computeU is false.
+ *
+ * Example: \include RealSchur_compute.cpp
+ * Output: \verbinclude RealSchur_compute.out
+ */
+ RealSchur& compute(const MatrixType& matrix, bool computeU = true);
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+ return m_info;
+ }
+
+ /** \brief Maximum number of iterations.
+ *
+ * Maximum number of iterations allowed for an eigenvalue to converge.
+ */
+ static const int m_maxIterations = 40;
+
+ private:
+
+ MatrixType m_matT;
+ MatrixType m_matU;
+ ColumnVectorType m_workspaceVector;
+ HessenbergDecomposition<MatrixType> m_hess;
+ ComputationInfo m_info;
+ bool m_isInitialized;
+ bool m_matUisUptodate;
+
+ typedef Matrix<Scalar,3,1> Vector3s;
+
+ Scalar computeNormOfT();
+ Index findSmallSubdiagEntry(Index iu, Scalar norm);
+ void splitOffTwoRows(Index iu, bool computeU, Scalar exshift);
+ void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
+ void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
+ void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);
+};
+
+
+template<typename MatrixType>
+RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
+{
+ assert(matrix.cols() == matrix.rows());
+
+ // Step 1. Reduce to Hessenberg form
+ m_hess.compute(matrix);
+ m_matT = m_hess.matrixH();
+ if (computeU)
+ m_matU = m_hess.matrixQ();
+
+ // Step 2. Reduce to real Schur form
+ m_workspaceVector.resize(m_matT.cols());
+ Scalar* workspace = &m_workspaceVector.coeffRef(0);
+
+ // The matrix m_matT is divided in three parts.
+ // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero.
+ // Rows il,...,iu is the part we are working on (the active window).
+ // Rows iu+1,...,end are already brought in triangular form.
+ Index iu = m_matT.cols() - 1;
+ Index iter = 0; // iteration count
+ Scalar exshift = 0.0; // sum of exceptional shifts
+ Scalar norm = computeNormOfT();
+
+ while (iu >= 0)
+ {
+ Index il = findSmallSubdiagEntry(iu, norm);
+
+ // Check for convergence
+ if (il == iu) // One root found
+ {
+ m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift;
+ if (iu > 0)
+ m_matT.coeffRef(iu, iu-1) = Scalar(0);
+ iu--;
+ iter = 0;
+ }
+ else if (il == iu-1) // Two roots found
+ {
+ splitOffTwoRows(iu, computeU, exshift);
+ iu -= 2;
+ iter = 0;
+ }
+ else // No convergence yet
+ {
+ // The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG )
+ Vector3s firstHouseholderVector(0,0,0), shiftInfo;
+ computeShift(iu, iter, exshift, shiftInfo);
+ iter = iter + 1;
+ if (iter > m_maxIterations) break;
+ Index im;
+ initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
+ performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);
+ }
+ }
+
+ if(iter <= m_maxIterations)
+ m_info = Success;
+ else
+ m_info = NoConvergence;
+
+ m_isInitialized = true;
+ m_matUisUptodate = computeU;
+ return *this;
+}
+
+/** \internal Computes and returns vector L1 norm of T */
+template<typename MatrixType>
+inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
+{
+ const Index size = m_matT.cols();
+ // FIXME to be efficient the following would requires a triangular reduxion code
+ // Scalar norm = m_matT.upper().cwiseAbs().sum()
+ // + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
+ Scalar norm = 0.0;
+ for (Index j = 0; j < size; ++j)
+ norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
+ return norm;
+}
+
+/** \internal Look for single small sub-diagonal element and returns its index */
+template<typename MatrixType>
+inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, Scalar norm)
+{
+ Index res = iu;
+ while (res > 0)
+ {
+ Scalar s = internal::abs(m_matT.coeff(res-1,res-1)) + internal::abs(m_matT.coeff(res,res));
+ if (s == 0.0)
+ s = norm;
+ if (internal::abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
+ break;
+ res--;
+ }
+ return res;
+}
+
+/** \internal Update T given that rows iu-1 and iu decouple from the rest. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, Scalar exshift)
+{
+ const Index size = m_matT.cols();
+
+ // The eigenvalues of the 2x2 matrix [a b; c d] are
+ // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
+ Scalar p = Scalar(0.5) * (m_matT.coeff(iu-1,iu-1) - m_matT.coeff(iu,iu));
+ Scalar q = p * p + m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu); // q = tr^2 / 4 - det = discr/4
+ m_matT.coeffRef(iu,iu) += exshift;
+ m_matT.coeffRef(iu-1,iu-1) += exshift;
+
+ if (q >= Scalar(0)) // Two real eigenvalues
+ {
+ Scalar z = internal::sqrt(internal::abs(q));
+ JacobiRotation<Scalar> rot;
+ if (p >= Scalar(0))
+ rot.makeGivens(p + z, m_matT.coeff(iu, iu-1));
+ else
+ rot.makeGivens(p - z, m_matT.coeff(iu, iu-1));
+
+ m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint());
+ m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot);
+ m_matT.coeffRef(iu, iu-1) = Scalar(0);
+ if (computeU)
+ m_matU.applyOnTheRight(iu-1, iu, rot);
+ }
+
+ if (iu > 1)
+ m_matT.coeffRef(iu-1, iu-2) = Scalar(0);
+}
+
+/** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)
+{
+ shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);
+ shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);
+ shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);
+
+ // Wilkinson's original ad hoc shift
+ if (iter == 10)
+ {
+ exshift += shiftInfo.coeff(0);
+ for (Index i = 0; i <= iu; ++i)
+ m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);
+ Scalar s = internal::abs(m_matT.coeff(iu,iu-1)) + internal::abs(m_matT.coeff(iu-1,iu-2));
+ shiftInfo.coeffRef(0) = Scalar(0.75) * s;
+ shiftInfo.coeffRef(1) = Scalar(0.75) * s;
+ shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s;
+ }
+
+ // MATLAB's new ad hoc shift
+ if (iter == 30)
+ {
+ Scalar s = (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
+ s = s * s + shiftInfo.coeff(2);
+ if (s > Scalar(0))
+ {
+ s = internal::sqrt(s);
+ if (shiftInfo.coeff(1) < shiftInfo.coeff(0))
+ s = -s;
+ s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
+ s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s;
+ exshift += s;
+ for (Index i = 0; i <= iu; ++i)
+ m_matT.coeffRef(i,i) -= s;
+ shiftInfo.setConstant(Scalar(0.964));
+ }
+ }
+}
+
+/** \internal Compute index im at which Francis QR step starts and the first Householder vector. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)
+{
+ Vector3s& v = firstHouseholderVector; // alias to save typing
+
+ for (im = iu-2; im >= il; --im)
+ {
+ const Scalar Tmm = m_matT.coeff(im,im);
+ const Scalar r = shiftInfo.coeff(0) - Tmm;
+ const Scalar s = shiftInfo.coeff(1) - Tmm;
+ v.coeffRef(0) = (r * s - shiftInfo.coeff(2)) / m_matT.coeff(im+1,im) + m_matT.coeff(im,im+1);
+ v.coeffRef(1) = m_matT.coeff(im+1,im+1) - Tmm - r - s;
+ v.coeffRef(2) = m_matT.coeff(im+2,im+1);
+ if (im == il) {
+ break;
+ }
+ const Scalar lhs = m_matT.coeff(im,im-1) * (internal::abs(v.coeff(1)) + internal::abs(v.coeff(2)));
+ const Scalar rhs = v.coeff(0) * (internal::abs(m_matT.coeff(im-1,im-1)) + internal::abs(Tmm) + internal::abs(m_matT.coeff(im+1,im+1)));
+ if (internal::abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
+ {
+ break;
+ }
+ }
+}
+
+/** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)
+{
+ assert(im >= il);
+ assert(im <= iu-2);
+
+ const Index size = m_matT.cols();
+
+ for (Index k = im; k <= iu-2; ++k)
+ {
+ bool firstIteration = (k == im);
+
+ Vector3s v;
+ if (firstIteration)
+ v = firstHouseholderVector;
+ else
+ v = m_matT.template block<3,1>(k,k-1);
+
+ Scalar tau, beta;
+ Matrix<Scalar, 2, 1> ess;
+ v.makeHouseholder(ess, tau, beta);
+
+ if (beta != Scalar(0)) // if v is not zero
+ {
+ if (firstIteration && k > il)
+ m_matT.coeffRef(k,k-1) = -m_matT.coeff(k,k-1);
+ else if (!firstIteration)
+ m_matT.coeffRef(k,k-1) = beta;
+
+ // These Householder transformations form the O(n^3) part of the algorithm
+ m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
+ m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
+ if (computeU)
+ m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
+ }
+ }
+
+ Matrix<Scalar, 2, 1> v = m_matT.template block<2,1>(iu-1, iu-2);
+ Scalar tau, beta;
+ Matrix<Scalar, 1, 1> ess;
+ v.makeHouseholder(ess, tau, beta);
+
+ if (beta != Scalar(0)) // if v is not zero
+ {
+ m_matT.coeffRef(iu-1, iu-2) = beta;
+ m_matT.block(iu-1, iu-1, 2, size-iu+1).applyHouseholderOnTheLeft(ess, tau, workspace);
+ m_matT.block(0, iu-1, iu+1, 2).applyHouseholderOnTheRight(ess, tau, workspace);
+ if (computeU)
+ m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace);
+ }
+
+ // clean up pollution due to round-off errors
+ for (Index i = im+2; i <= iu; ++i)
+ {
+ m_matT.coeffRef(i,i-2) = Scalar(0);
+ if (i > im+2)
+ m_matT.coeffRef(i,i-3) = Scalar(0);
+ }
+}
+
+#endif // EIGEN_REAL_SCHUR_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
new file mode 100644
index 00000000000..965dda88bda
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -0,0 +1,520 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
+#define EIGEN_SELFADJOINTEIGENSOLVER_H
+
+#include "./EigenvaluesCommon.h"
+#include "./Tridiagonalization.h"
+
+template<typename _MatrixType>
+class GeneralizedSelfAdjointEigenSolver;
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \class SelfAdjointEigenSolver
+ *
+ * \brief Computes eigenvalues and eigenvectors of selfadjoint matrices
+ *
+ * \tparam _MatrixType the type of the matrix of which we are computing the
+ * eigendecomposition; this is expected to be an instantiation of the Matrix
+ * class template.
+ *
+ * A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real
+ * matrices, this means that the matrix is symmetric: it equals its
+ * transpose. This class computes the eigenvalues and eigenvectors of a
+ * selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
+ * \f$ v \f$ such that \f$ Av = \lambda v \f$. The eigenvalues of a
+ * selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
+ * the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
+ * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint
+ * matrices, the matrix \f$ V \f$ is always invertible). This is called the
+ * eigendecomposition.
+ *
+ * The algorithm exploits the fact that the matrix is selfadjoint, making it
+ * faster and more accurate than the general purpose eigenvalue algorithms
+ * implemented in EigenSolver and ComplexEigenSolver.
+ *
+ * Only the \b lower \b triangular \b part of the input matrix is referenced.
+ *
+ * Call the function compute() to compute the eigenvalues and eigenvectors of
+ * a given matrix. Alternatively, you can use the
+ * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes
+ * the eigenvalues and eigenvectors at construction time. Once the eigenvalue
+ * and eigenvectors are computed, they can be retrieved with the eigenvalues()
+ * and eigenvectors() functions.
+ *
+ * The documentation for SelfAdjointEigenSolver(const MatrixType&, int)
+ * contains an example of the typical use of this class.
+ *
+ * To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and
+ * the likes, see the class GeneralizedSelfAdjointEigenSolver.
+ *
+ * \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver
+ */
+template<typename _MatrixType> class SelfAdjointEigenSolver
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ enum {
+ Size = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+
+ /** \brief Scalar type for matrices of type \p _MatrixType. */
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
+ /** \brief Real scalar type for \p _MatrixType.
+ *
+ * This is just \c Scalar if #Scalar is real (e.g., \c float or
+ * \c double), and the type of the real part of \c Scalar if #Scalar is
+ * complex.
+ */
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** \brief Type for vector of eigenvalues as returned by eigenvalues().
+ *
+ * This is a column vector with entries of type #RealScalar.
+ * The length of the vector is the size of \p _MatrixType.
+ */
+ typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;
+ typedef Tridiagonalization<MatrixType> TridiagonalizationType;
+
+ /** \brief Default constructor for fixed-size matrices.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via compute(). This constructor
+ * can only be used if \p _MatrixType is a fixed-size matrix; use
+ * SelfAdjointEigenSolver(Index) for dynamic-size matrices.
+ *
+ * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
+ */
+ SelfAdjointEigenSolver()
+ : m_eivec(),
+ m_eivalues(),
+ m_subdiag(),
+ m_isInitialized(false)
+ { }
+
+ /** \brief Constructor, pre-allocates memory for dynamic-size matrices.
+ *
+ * \param [in] size Positive integer, size of the matrix whose
+ * eigenvalues and eigenvectors will be computed.
+ *
+ * This constructor is useful for dynamic-size matrices, when the user
+ * intends to perform decompositions via compute(). The \p size
+ * parameter is only used as a hint. It is not an error to give a wrong
+ * \p size, but it may impair performance.
+ *
+ * \sa compute() for an example
+ */
+ SelfAdjointEigenSolver(Index size)
+ : m_eivec(size, size),
+ m_eivalues(size),
+ m_subdiag(size > 1 ? size - 1 : 1),
+ m_isInitialized(false)
+ {}
+
+ /** \brief Constructor; computes eigendecomposition of given matrix.
+ *
+ * \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
+ * be computed. Only the lower triangular part of the matrix is referenced.
+ * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+ *
+ * This constructor calls compute(const MatrixType&, int) to compute the
+ * eigenvalues of the matrix \p matrix. The eigenvectors are computed if
+ * \p options equals #ComputeEigenvectors.
+ *
+ * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out
+ *
+ * \sa compute(const MatrixType&, int)
+ */
+ SelfAdjointEigenSolver(const MatrixType& matrix, int options = ComputeEigenvectors)
+ : m_eivec(matrix.rows(), matrix.cols()),
+ m_eivalues(matrix.cols()),
+ m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
+ m_isInitialized(false)
+ {
+ compute(matrix, options);
+ }
+
+ /** \brief Computes eigendecomposition of given matrix.
+ *
+ * \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
+ * be computed. Only the lower triangular part of the matrix is referenced.
+ * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+ * \returns Reference to \c *this
+ *
+ * This function computes the eigenvalues of \p matrix. The eigenvalues()
+ * function can be used to retrieve them. If \p options equals #ComputeEigenvectors,
+ * then the eigenvectors are also computed and can be retrieved by
+ * calling eigenvectors().
+ *
+ * This implementation uses a symmetric QR algorithm. The matrix is first
+ * reduced to tridiagonal form using the Tridiagonalization class. The
+ * tridiagonal matrix is then brought to diagonal form with implicit
+ * symmetric QR steps with Wilkinson shift. Details can be found in
+ * Section 8.3 of Golub \& Van Loan, <i>%Matrix Computations</i>.
+ *
+ * The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors
+ * are required and \f$ 4n^3/3 \f$ if they are not required.
+ *
+ * This method reuses the memory in the SelfAdjointEigenSolver object that
+ * was allocated when the object was constructed, if the size of the
+ * matrix does not change.
+ *
+ * Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out
+ *
+ * \sa SelfAdjointEigenSolver(const MatrixType&, int)
+ */
+ SelfAdjointEigenSolver& compute(const MatrixType& matrix, int options = ComputeEigenvectors);
+
+ /** \brief Returns the eigenvectors of given matrix.
+ *
+ * \returns A const reference to the matrix whose columns are the eigenvectors.
+ *
+ * \pre The eigenvectors have been computed before.
+ *
+ * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+ * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
+ * eigenvectors are normalized to have (Euclidean) norm equal to one. If
+ * this object was used to solve the eigenproblem for the selfadjoint
+ * matrix \f$ A \f$, then the matrix returned by this function is the
+ * matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$.
+ *
+ * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
+ *
+ * \sa eigenvalues()
+ */
+ const MatrixType& eigenvectors() const
+ {
+ eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec;
+ }
+
+ /** \brief Returns the eigenvalues of given matrix.
+ *
+ * \returns A const reference to the column vector containing the eigenvalues.
+ *
+ * \pre The eigenvalues have been computed before.
+ *
+ * The eigenvalues are repeated according to their algebraic multiplicity,
+ * so there are as many eigenvalues as rows in the matrix. The eigenvalues
+ * are sorted in increasing order.
+ *
+ * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
+ *
+ * \sa eigenvectors(), MatrixBase::eigenvalues()
+ */
+ const RealVectorType& eigenvalues() const
+ {
+ eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+ return m_eivalues;
+ }
+
+ /** \brief Computes the positive-definite square root of the matrix.
+ *
+ * \returns the positive-definite square root of the matrix
+ *
+ * \pre The eigenvalues and eigenvectors of a positive-definite matrix
+ * have been computed before.
+ *
+ * The square root of a positive-definite matrix \f$ A \f$ is the
+ * positive-definite matrix whose square equals \f$ A \f$. This function
+ * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
+ * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
+ *
+ * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
+ *
+ * \sa operatorInverseSqrt(),
+ * \ref MatrixFunctions_Module "MatrixFunctions Module"
+ */
+ MatrixType operatorSqrt() const
+ {
+ eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
+ }
+
+ /** \brief Computes the inverse square root of the matrix.
+ *
+ * \returns the inverse positive-definite square root of the matrix
+ *
+ * \pre The eigenvalues and eigenvectors of a positive-definite matrix
+ * have been computed before.
+ *
+ * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
+ * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
+ * cheaper than first computing the square root with operatorSqrt() and
+ * then its inverse with MatrixBase::inverse().
+ *
+ * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
+ *
+ * \sa operatorSqrt(), MatrixBase::inverse(),
+ * \ref MatrixFunctions_Module "MatrixFunctions Module"
+ */
+ MatrixType operatorInverseSqrt() const
+ {
+ eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
+ }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+ return m_info;
+ }
+
+ /** \brief Maximum number of iterations.
+ *
+ * Maximum number of iterations allowed for an eigenvalue to converge.
+ */
+ static const int m_maxIterations = 30;
+
+ #ifdef EIGEN2_SUPPORT
+ SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors)
+ : m_eivec(matrix.rows(), matrix.cols()),
+ m_eivalues(matrix.cols()),
+ m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
+ m_isInitialized(false)
+ {
+ compute(matrix, computeEigenvectors);
+ }
+
+ SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
+ : m_eivec(matA.cols(), matA.cols()),
+ m_eivalues(matA.cols()),
+ m_subdiag(matA.cols() > 1 ? matA.cols() - 1 : 1),
+ m_isInitialized(false)
+ {
+ static_cast<GeneralizedSelfAdjointEigenSolver<MatrixType>*>(this)->compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
+ }
+
+ void compute(const MatrixType& matrix, bool computeEigenvectors)
+ {
+ compute(matrix, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
+ }
+
+ void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
+ {
+ compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
+ }
+ #endif // EIGEN2_SUPPORT
+
+ protected:
+ MatrixType m_eivec;
+ RealVectorType m_eivalues;
+ typename TridiagonalizationType::SubDiagonalType m_subdiag;
+ ComputationInfo m_info;
+ bool m_isInitialized;
+ bool m_eigenvectorsOk;
+};
+
+/** \internal
+ *
+ * \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ * Performs a QR step on a tridiagonal symmetric matrix represented as a
+ * pair of two vectors \a diag and \a subdiag.
+ *
+ * \param matA the input selfadjoint matrix
+ * \param hCoeffs returned Householder coefficients
+ *
+ * For compilation efficiency reasons, this procedure does not use eigen expression
+ * for its arguments.
+ *
+ * Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
+ * "implicit symmetric QR step with Wilkinson shift"
+ */
+namespace internal {
+template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
+static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
+}
+
+template<typename MatrixType>
+SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
+::compute(const MatrixType& matrix, int options)
+{
+ eigen_assert(matrix.cols() == matrix.rows());
+ eigen_assert((options&~(EigVecMask|GenEigMask))==0
+ && (options&EigVecMask)!=EigVecMask
+ && "invalid option parameter");
+ bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
+ Index n = matrix.cols();
+ m_eivalues.resize(n,1);
+
+ if(n==1)
+ {
+ m_eivalues.coeffRef(0,0) = internal::real(matrix.coeff(0,0));
+ if(computeEigenvectors)
+ m_eivec.setOnes(n,n);
+ m_info = Success;
+ m_isInitialized = true;
+ m_eigenvectorsOk = computeEigenvectors;
+ return *this;
+ }
+
+ // declare some aliases
+ RealVectorType& diag = m_eivalues;
+ MatrixType& mat = m_eivec;
+
+ // map the matrix coefficients to [-1:1] to avoid over- and underflow.
+ RealScalar scale = matrix.cwiseAbs().maxCoeff();
+ if(scale==Scalar(0)) scale = 1;
+ mat = matrix / scale;
+ m_subdiag.resize(n-1);
+ internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
+
+ Index end = n-1;
+ Index start = 0;
+ Index iter = 0; // number of iterations we are working on one element
+
+ while (end>0)
+ {
+ for (Index i = start; i<end; ++i)
+ if (internal::isMuchSmallerThan(internal::abs(m_subdiag[i]),(internal::abs(diag[i])+internal::abs(diag[i+1]))))
+ m_subdiag[i] = 0;
+
+ // find the largest unreduced block
+ while (end>0 && m_subdiag[end-1]==0)
+ {
+ iter = 0;
+ end--;
+ }
+ if (end<=0)
+ break;
+
+ // if we spent too many iterations on the current element, we give up
+ iter++;
+ if(iter > m_maxIterations) break;
+
+ start = end - 1;
+ while (start>0 && m_subdiag[start-1]!=0)
+ start--;
+
+ internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
+ }
+
+ if (iter <= m_maxIterations)
+ m_info = Success;
+ else
+ m_info = NoConvergence;
+
+ // Sort eigenvalues and corresponding vectors.
+ // TODO make the sort optional ?
+ // TODO use a better sort algorithm !!
+ if (m_info == Success)
+ {
+ for (Index i = 0; i < n-1; ++i)
+ {
+ Index k;
+ m_eivalues.segment(i,n-i).minCoeff(&k);
+ if (k > 0)
+ {
+ std::swap(m_eivalues[i], m_eivalues[k+i]);
+ if(computeEigenvectors)
+ m_eivec.col(i).swap(m_eivec.col(k+i));
+ }
+ }
+ }
+
+ // scale back the eigen values
+ m_eivalues *= scale;
+
+ m_isInitialized = true;
+ m_eigenvectorsOk = computeEigenvectors;
+ return *this;
+}
+
+namespace internal {
+template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
+static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
+{
+ // NOTE this version avoids over & underflow, however since the matrix is prescaled, overflow cannot occur,
+ // and underflows should be meaningless anyway. So I don't any reason to enable this version, but I keep
+ // it here for reference:
+// RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
+// RealScalar e = subdiag[end-1];
+// RealScalar mu = diag[end] - (e / (td + (td>0 ? 1 : -1))) * (e / hypot(td,e));
+ RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
+ RealScalar e2 = abs2(subdiag[end-1]);
+ RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
+ RealScalar x = diag[start] - mu;
+ RealScalar z = subdiag[start];
+ for (Index k = start; k < end; ++k)
+ {
+ JacobiRotation<RealScalar> rot;
+ rot.makeGivens(x, z);
+
+ // do T = G' T G
+ RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
+ RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];
+
+ diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
+ diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
+ subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
+
+
+ if (k > start)
+ subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
+
+ x = subdiag[k];
+
+ if (k < end - 1)
+ {
+ z = -rot.s() * subdiag[k+1];
+ subdiag[k + 1] = rot.c() * subdiag[k+1];
+ }
+
+ // apply the givens rotation to the unit matrix Q = Q * G
+ if (matrixQ)
+ {
+ // FIXME if StorageOrder == RowMajor this operation is not very efficient
+ Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
+ q.applyOnTheRight(k,k+1,rot);
+ }
+ }
+}
+} // end namespace internal
+
+#endif // EIGEN_SELFADJOINTEIGENSOLVER_H
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h b/extern/Eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h
new file mode 100644
index 00000000000..ae4cdce7aeb
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/Tridiagonalization.h
@@ -0,0 +1,568 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRIDIAGONALIZATION_H
+#define EIGEN_TRIDIAGONALIZATION_H
+
+namespace internal {
+
+template<typename MatrixType> struct TridiagonalizationMatrixTReturnType;
+template<typename MatrixType>
+struct traits<TridiagonalizationMatrixTReturnType<MatrixType> >
+{
+ typedef typename MatrixType::PlainObject ReturnType;
+};
+
+template<typename MatrixType, typename CoeffVectorType>
+void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs);
+}
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ *
+ * \class Tridiagonalization
+ *
+ * \brief Tridiagonal decomposition of a selfadjoint matrix
+ *
+ * \tparam _MatrixType the type of the matrix of which we are computing the
+ * tridiagonal decomposition; this is expected to be an instantiation of the
+ * Matrix class template.
+ *
+ * This class performs a tridiagonal decomposition of a selfadjoint matrix \f$ A \f$ such that:
+ * \f$ A = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real symmetric tridiagonal matrix.
+ *
+ * A tridiagonal matrix is a matrix which has nonzero elements only on the
+ * main diagonal and the first diagonal below and above it. The Hessenberg
+ * decomposition of a selfadjoint matrix is in fact a tridiagonal
+ * decomposition. This class is used in SelfAdjointEigenSolver to compute the
+ * eigenvalues and eigenvectors of a selfadjoint matrix.
+ *
+ * Call the function compute() to compute the tridiagonal decomposition of a
+ * given matrix. Alternatively, you can use the Tridiagonalization(const MatrixType&)
+ * constructor which computes the tridiagonal Schur decomposition at
+ * construction time. Once the decomposition is computed, you can use the
+ * matrixQ() and matrixT() functions to retrieve the matrices Q and T in the
+ * decomposition.
+ *
+ * The documentation of Tridiagonalization(const MatrixType&) contains an
+ * example of the typical use of this class.
+ *
+ * \sa class HessenbergDecomposition, class SelfAdjointEigenSolver
+ */
+template<typename _MatrixType> class Tridiagonalization
+{
+ public:
+
+ /** \brief Synonym for the template parameter \p _MatrixType. */
+ typedef _MatrixType MatrixType;
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ enum {
+ Size = MatrixType::RowsAtCompileTime,
+ SizeMinusOne = Size == Dynamic ? Dynamic : (Size > 1 ? Size - 1 : 1),
+ Options = MatrixType::Options,
+ MaxSize = MatrixType::MaxRowsAtCompileTime,
+ MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : (MaxSize > 1 ? MaxSize - 1 : 1)
+ };
+
+ typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
+ typedef typename internal::plain_col_type<MatrixType, RealScalar>::type DiagonalType;
+ typedef Matrix<RealScalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> SubDiagonalType;
+ typedef typename internal::remove_all<typename MatrixType::RealReturnType>::type MatrixTypeRealView;
+ typedef internal::TridiagonalizationMatrixTReturnType<MatrixTypeRealView> MatrixTReturnType;
+
+ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ const typename Diagonal<const MatrixType>::RealReturnType,
+ const Diagonal<const MatrixType>
+ >::type DiagonalReturnType;
+
+ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ const typename Diagonal<
+ Block<const MatrixType,SizeMinusOne,SizeMinusOne> >::RealReturnType,
+ const Diagonal<
+ Block<const MatrixType,SizeMinusOne,SizeMinusOne> >
+ >::type SubDiagonalReturnType;
+
+ /** \brief Return type of matrixQ() */
+ typedef typename HouseholderSequence<MatrixType,CoeffVectorType>::ConjugateReturnType HouseholderSequenceType;
+
+ /** \brief Default constructor.
+ *
+ * \param [in] size Positive integer, size of the matrix whose tridiagonal
+ * decomposition will be computed.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via compute(). The \p size parameter is only
+ * used as a hint. It is not an error to give a wrong \p size, but it may
+ * impair performance.
+ *
+ * \sa compute() for an example.
+ */
+ Tridiagonalization(Index size = Size==Dynamic ? 2 : Size)
+ : m_matrix(size,size),
+ m_hCoeffs(size > 1 ? size-1 : 1),
+ m_isInitialized(false)
+ {}
+
+ /** \brief Constructor; computes tridiagonal decomposition of given matrix.
+ *
+ * \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition
+ * is to be computed.
+ *
+ * This constructor calls compute() to compute the tridiagonal decomposition.
+ *
+ * Example: \include Tridiagonalization_Tridiagonalization_MatrixType.cpp
+ * Output: \verbinclude Tridiagonalization_Tridiagonalization_MatrixType.out
+ */
+ Tridiagonalization(const MatrixType& matrix)
+ : m_matrix(matrix),
+ m_hCoeffs(matrix.cols() > 1 ? matrix.cols()-1 : 1),
+ m_isInitialized(false)
+ {
+ internal::tridiagonalization_inplace(m_matrix, m_hCoeffs);
+ m_isInitialized = true;
+ }
+
+ /** \brief Computes tridiagonal decomposition of given matrix.
+ *
+ * \param[in] matrix Selfadjoint matrix whose tridiagonal decomposition
+ * is to be computed.
+ * \returns Reference to \c *this
+ *
+ * The tridiagonal decomposition is computed by bringing the columns of
+ * the matrix successively in the required form using Householder
+ * reflections. The cost is \f$ 4n^3/3 \f$ flops, where \f$ n \f$ denotes
+ * the size of the given matrix.
+ *
+ * This method reuses of the allocated data in the Tridiagonalization
+ * object, if the size of the matrix does not change.
+ *
+ * Example: \include Tridiagonalization_compute.cpp
+ * Output: \verbinclude Tridiagonalization_compute.out
+ */
+ Tridiagonalization& compute(const MatrixType& matrix)
+ {
+ m_matrix = matrix;
+ m_hCoeffs.resize(matrix.rows()-1, 1);
+ internal::tridiagonalization_inplace(m_matrix, m_hCoeffs);
+ m_isInitialized = true;
+ return *this;
+ }
+
+ /** \brief Returns the Householder coefficients.
+ *
+ * \returns a const reference to the vector of Householder coefficients
+ *
+ * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+ * the member function compute(const MatrixType&) has been called before
+ * to compute the tridiagonal decomposition of a matrix.
+ *
+ * The Householder coefficients allow the reconstruction of the matrix
+ * \f$ Q \f$ in the tridiagonal decomposition from the packed data.
+ *
+ * Example: \include Tridiagonalization_householderCoefficients.cpp
+ * Output: \verbinclude Tridiagonalization_householderCoefficients.out
+ *
+ * \sa packedMatrix(), \ref Householder_Module "Householder module"
+ */
+ inline CoeffVectorType householderCoefficients() const
+ {
+ eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+ return m_hCoeffs;
+ }
+
+ /** \brief Returns the internal representation of the decomposition
+ *
+ * \returns a const reference to a matrix with the internal representation
+ * of the decomposition.
+ *
+ * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+ * the member function compute(const MatrixType&) has been called before
+ * to compute the tridiagonal decomposition of a matrix.
+ *
+ * The returned matrix contains the following information:
+ * - the strict upper triangular part is equal to the input matrix A.
+ * - the diagonal and lower sub-diagonal represent the real tridiagonal
+ * symmetric matrix T.
+ * - the rest of the lower part contains the Householder vectors that,
+ * combined with Householder coefficients returned by
+ * householderCoefficients(), allows to reconstruct the matrix Q as
+ * \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
+ * Here, the matrices \f$ H_i \f$ are the Householder transformations
+ * \f$ H_i = (I - h_i v_i v_i^T) \f$
+ * where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
+ * \f$ v_i \f$ is the Householder vector defined by
+ * \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
+ * with M the matrix returned by this function.
+ *
+ * See LAPACK for further details on this packed storage.
+ *
+ * Example: \include Tridiagonalization_packedMatrix.cpp
+ * Output: \verbinclude Tridiagonalization_packedMatrix.out
+ *
+ * \sa householderCoefficients()
+ */
+ inline const MatrixType& packedMatrix() const
+ {
+ eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+ return m_matrix;
+ }
+
+ /** \brief Returns the unitary matrix Q in the decomposition
+ *
+ * \returns object representing the matrix Q
+ *
+ * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+ * the member function compute(const MatrixType&) has been called before
+ * to compute the tridiagonal decomposition of a matrix.
+ *
+ * This function returns a light-weight object of template class
+ * HouseholderSequence. You can either apply it directly to a matrix or
+ * you can convert it to a matrix of type #MatrixType.
+ *
+ * \sa Tridiagonalization(const MatrixType&) for an example,
+ * matrixT(), class HouseholderSequence
+ */
+ HouseholderSequenceType matrixQ() const
+ {
+ eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+ return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate())
+ .setLength(m_matrix.rows() - 1)
+ .setShift(1);
+ }
+
+ /** \brief Returns an expression of the tridiagonal matrix T in the decomposition
+ *
+ * \returns expression object representing the matrix T
+ *
+ * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+ * the member function compute(const MatrixType&) has been called before
+ * to compute the tridiagonal decomposition of a matrix.
+ *
+ * Currently, this function can be used to extract the matrix T from internal
+ * data and copy it to a dense matrix object. In most cases, it may be
+ * sufficient to directly use the packed matrix or the vector expressions
+ * returned by diagonal() and subDiagonal() instead of creating a new
+ * dense copy matrix with this function.
+ *
+ * \sa Tridiagonalization(const MatrixType&) for an example,
+ * matrixQ(), packedMatrix(), diagonal(), subDiagonal()
+ */
+ MatrixTReturnType matrixT() const
+ {
+ eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+ return MatrixTReturnType(m_matrix.real());
+ }
+
+ /** \brief Returns the diagonal of the tridiagonal matrix T in the decomposition.
+ *
+ * \returns expression representing the diagonal of T
+ *
+ * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+ * the member function compute(const MatrixType&) has been called before
+ * to compute the tridiagonal decomposition of a matrix.
+ *
+ * Example: \include Tridiagonalization_diagonal.cpp
+ * Output: \verbinclude Tridiagonalization_diagonal.out
+ *
+ * \sa matrixT(), subDiagonal()
+ */
+ DiagonalReturnType diagonal() const;
+
+ /** \brief Returns the subdiagonal of the tridiagonal matrix T in the decomposition.
+ *
+ * \returns expression representing the subdiagonal of T
+ *
+ * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+ * the member function compute(const MatrixType&) has been called before
+ * to compute the tridiagonal decomposition of a matrix.
+ *
+ * \sa diagonal() for an example, matrixT()
+ */
+ SubDiagonalReturnType subDiagonal() const;
+
+ protected:
+
+ MatrixType m_matrix;
+ CoeffVectorType m_hCoeffs;
+ bool m_isInitialized;
+};
+
+template<typename MatrixType>
+typename Tridiagonalization<MatrixType>::DiagonalReturnType
+Tridiagonalization<MatrixType>::diagonal() const
+{
+ eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+ return m_matrix.diagonal();
+}
+
+template<typename MatrixType>
+typename Tridiagonalization<MatrixType>::SubDiagonalReturnType
+Tridiagonalization<MatrixType>::subDiagonal() const
+{
+ eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+ Index n = m_matrix.rows();
+ return Block<const MatrixType,SizeMinusOne,SizeMinusOne>(m_matrix, 1, 0, n-1,n-1).diagonal();
+}
+
+namespace internal {
+
+/** \internal
+ * Performs a tridiagonal decomposition of the selfadjoint matrix \a matA in-place.
+ *
+ * \param[in,out] matA On input the selfadjoint matrix. Only the \b lower triangular part is referenced.
+ * On output, the strict upper part is left unchanged, and the lower triangular part
+ * represents the T and Q matrices in packed format has detailed below.
+ * \param[out] hCoeffs returned Householder coefficients (see below)
+ *
+ * On output, the tridiagonal selfadjoint matrix T is stored in the diagonal
+ * and lower sub-diagonal of the matrix \a matA.
+ * The unitary matrix Q is represented in a compact way as a product of
+ * Householder reflectors \f$ H_i \f$ such that:
+ * \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
+ * The Householder reflectors are defined as
+ * \f$ H_i = (I - h_i v_i v_i^T) \f$
+ * where \f$ h_i = hCoeffs[i]\f$ is the \f$ i \f$th Householder coefficient and
+ * \f$ v_i \f$ is the Householder vector defined by
+ * \f$ v_i = [ 0, \ldots, 0, 1, matA(i+2,i), \ldots, matA(N-1,i) ]^T \f$.
+ *
+ * Implemented from Golub's "Matrix Computations", algorithm 8.3.1.
+ *
+ * \sa Tridiagonalization::packedMatrix()
+ */
+template<typename MatrixType, typename CoeffVectorType>
+void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ Index n = matA.rows();
+ eigen_assert(n==matA.cols());
+ eigen_assert(n==hCoeffs.size()+1 || n==1);
+
+ for (Index i = 0; i<n-1; ++i)
+ {
+ Index remainingSize = n-i-1;
+ RealScalar beta;
+ Scalar h;
+ matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
+
+ // Apply similarity transformation to remaining columns,
+ // i.e., A = H A H' where H = I - h v v' and v = matA.col(i).tail(n-i-1)
+ matA.col(i).coeffRef(i+1) = 1;
+
+ hCoeffs.tail(n-i-1).noalias() = (matA.bottomRightCorner(remainingSize,remainingSize).template selfadjointView<Lower>()
+ * (conj(h) * matA.col(i).tail(remainingSize)));
+
+ hCoeffs.tail(n-i-1) += (conj(h)*Scalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1);
+
+ matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView<Lower>()
+ .rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), -1);
+
+ matA.col(i).coeffRef(i+1) = beta;
+ hCoeffs.coeffRef(i) = h;
+ }
+}
+
+// forward declaration, implementation at the end of this file
+template<typename MatrixType,
+ int Size=MatrixType::ColsAtCompileTime,
+ bool IsComplex=NumTraits<typename MatrixType::Scalar>::IsComplex>
+struct tridiagonalization_inplace_selector;
+
+/** \brief Performs a full tridiagonalization in place
+ *
+ * \param[in,out] mat On input, the selfadjoint matrix whose tridiagonal
+ * decomposition is to be computed. Only the lower triangular part referenced.
+ * The rest is left unchanged. On output, the orthogonal matrix Q
+ * in the decomposition if \p extractQ is true.
+ * \param[out] diag The diagonal of the tridiagonal matrix T in the
+ * decomposition.
+ * \param[out] subdiag The subdiagonal of the tridiagonal matrix T in
+ * the decomposition.
+ * \param[in] extractQ If true, the orthogonal matrix Q in the
+ * decomposition is computed and stored in \p mat.
+ *
+ * Computes the tridiagonal decomposition of the selfadjoint matrix \p mat in place
+ * such that \f$ mat = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real
+ * symmetric tridiagonal matrix.
+ *
+ * The tridiagonal matrix T is passed to the output parameters \p diag and \p subdiag. If
+ * \p extractQ is true, then the orthogonal matrix Q is passed to \p mat. Otherwise the lower
+ * part of the matrix \p mat is destroyed.
+ *
+ * The vectors \p diag and \p subdiag are not resized. The function
+ * assumes that they are already of the correct size. The length of the
+ * vector \p diag should equal the number of rows in \p mat, and the
+ * length of the vector \p subdiag should be one left.
+ *
+ * This implementation contains an optimized path for 3-by-3 matrices
+ * which is especially useful for plane fitting.
+ *
+ * \note Currently, it requires two temporary vectors to hold the intermediate
+ * Householder coefficients, and to reconstruct the matrix Q from the Householder
+ * reflectors.
+ *
+ * Example (this uses the same matrix as the example in
+ * Tridiagonalization::Tridiagonalization(const MatrixType&)):
+ * \include Tridiagonalization_decomposeInPlace.cpp
+ * Output: \verbinclude Tridiagonalization_decomposeInPlace.out
+ *
+ * \sa class Tridiagonalization
+ */
+template<typename MatrixType, typename DiagonalType, typename SubDiagonalType>
+void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
+{
+ typedef typename MatrixType::Index Index;
+ //Index n = mat.rows();
+ eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);
+ tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);
+}
+
+/** \internal
+ * General full tridiagonalization
+ */
+template<typename MatrixType, int Size, bool IsComplex>
+struct tridiagonalization_inplace_selector
+{
+ typedef typename Tridiagonalization<MatrixType>::CoeffVectorType CoeffVectorType;
+ typedef typename Tridiagonalization<MatrixType>::HouseholderSequenceType HouseholderSequenceType;
+ typedef typename MatrixType::Index Index;
+ template<typename DiagonalType, typename SubDiagonalType>
+ static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
+ {
+ CoeffVectorType hCoeffs(mat.cols()-1);
+ tridiagonalization_inplace(mat,hCoeffs);
+ diag = mat.diagonal().real();
+ subdiag = mat.template diagonal<-1>().real();
+ if(extractQ)
+ mat = HouseholderSequenceType(mat, hCoeffs.conjugate())
+ .setLength(mat.rows() - 1)
+ .setShift(1);
+ }
+};
+
+/** \internal
+ * Specialization for 3x3 real matrices.
+ * Especially useful for plane fitting.
+ */
+template<typename MatrixType>
+struct tridiagonalization_inplace_selector<MatrixType,3,false>
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+
+ template<typename DiagonalType, typename SubDiagonalType>
+ static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
+ {
+ diag[0] = mat(0,0);
+ RealScalar v1norm2 = abs2(mat(2,0));
+ if(v1norm2 == RealScalar(0))
+ {
+ diag[1] = mat(1,1);
+ diag[2] = mat(2,2);
+ subdiag[0] = mat(1,0);
+ subdiag[1] = mat(2,1);
+ if (extractQ)
+ mat.setIdentity();
+ }
+ else
+ {
+ RealScalar beta = sqrt(abs2(mat(1,0)) + v1norm2);
+ RealScalar invBeta = RealScalar(1)/beta;
+ Scalar m01 = mat(1,0) * invBeta;
+ Scalar m02 = mat(2,0) * invBeta;
+ Scalar q = RealScalar(2)*m01*mat(2,1) + m02*(mat(2,2) - mat(1,1));
+ diag[1] = mat(1,1) + m02*q;
+ diag[2] = mat(2,2) - m02*q;
+ subdiag[0] = beta;
+ subdiag[1] = mat(2,1) - m01 * q;
+ if (extractQ)
+ {
+ mat << 1, 0, 0,
+ 0, m01, m02,
+ 0, m02, -m01;
+ }
+ }
+ }
+};
+
+/** \internal
+ * Trivial specialization for 1x1 matrices
+ */
+template<typename MatrixType, bool IsComplex>
+struct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>
+{
+ typedef typename MatrixType::Scalar Scalar;
+
+ template<typename DiagonalType, typename SubDiagonalType>
+ static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
+ {
+ diag(0,0) = real(mat(0,0));
+ if(extractQ)
+ mat(0,0) = Scalar(1);
+ }
+};
+
+/** \internal
+ * \eigenvalues_module \ingroup Eigenvalues_Module
+ *
+ * \brief Expression type for return value of Tridiagonalization::matrixT()
+ *
+ * \tparam MatrixType type of underlying dense matrix
+ */
+template<typename MatrixType> struct TridiagonalizationMatrixTReturnType
+: public ReturnByValue<TridiagonalizationMatrixTReturnType<MatrixType> >
+{
+ typedef typename MatrixType::Index Index;
+ public:
+ /** \brief Constructor.
+ *
+ * \param[in] mat The underlying dense matrix
+ */
+ TridiagonalizationMatrixTReturnType(const MatrixType& mat) : m_matrix(mat) { }
+
+ template <typename ResultType>
+ inline void evalTo(ResultType& result) const
+ {
+ result.setZero();
+ result.template diagonal<1>() = m_matrix.template diagonal<-1>().conjugate();
+ result.diagonal() = m_matrix.diagonal();
+ result.template diagonal<-1>() = m_matrix.template diagonal<-1>();
+ }
+
+ Index rows() const { return m_matrix.rows(); }
+ Index cols() const { return m_matrix.cols(); }
+
+ protected:
+ const typename MatrixType::Nested m_matrix;
+};
+
+} // end namespace internal
+
+#endif // EIGEN_TRIDIAGONALIZATION_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h b/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h
new file mode 100644
index 00000000000..b51deb3f3c3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h
@@ -0,0 +1,352 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ALIGNEDBOX_H
+#define EIGEN_ALIGNEDBOX_H
+
+/** \geometry_module \ingroup Geometry_Module
+ *
+ *
+ * \class AlignedBox
+ *
+ * \brief An axis aligned box
+ *
+ * \param _Scalar the type of the scalar coefficients
+ * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+ *
+ * This class represents an axis aligned box as a pair of the minimal and maximal corners.
+ */
+template <typename _Scalar, int _AmbientDim>
+class AlignedBox
+{
+public:
+EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
+ enum { AmbientDimAtCompileTime = _AmbientDim };
+ typedef _Scalar Scalar;
+ typedef NumTraits<Scalar> ScalarTraits;
+ typedef DenseIndex Index;
+ typedef typename ScalarTraits::Real RealScalar;
+ typedef typename ScalarTraits::NonInteger NonInteger;
+ typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+
+ /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
+ enum CornerType
+ {
+ /** 1D names */
+ Min=0, Max=1,
+
+ /** Added names for 2D */
+ BottomLeft=0, BottomRight=1,
+ TopLeft=2, TopRight=3,
+
+ /** Added names for 3D */
+ BottomLeftFloor=0, BottomRightFloor=1,
+ TopLeftFloor=2, TopRightFloor=3,
+ BottomLeftCeil=4, BottomRightCeil=5,
+ TopLeftCeil=6, TopRightCeil=7
+ };
+
+
+ /** Default constructor initializing a null box. */
+ inline explicit AlignedBox()
+ { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); }
+
+ /** Constructs a null box with \a _dim the dimension of the ambient space. */
+ inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
+ { setEmpty(); }
+
+ /** Constructs a box with extremities \a _min and \a _max. */
+ template<typename OtherVectorType1, typename OtherVectorType2>
+ inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
+
+ /** Constructs a box containing a single point \a p. */
+ template<typename Derived>
+ inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
+ {
+ const typename internal::nested<Derived,2>::type p(a_p.derived());
+ m_min = p;
+ m_max = p;
+ }
+
+ ~AlignedBox() {}
+
+ /** \returns the dimension in which the box holds */
+ inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : Index(AmbientDimAtCompileTime); }
+
+ /** \deprecated use isEmpty */
+ inline bool isNull() const { return isEmpty(); }
+
+ /** \deprecated use setEmpty */
+ inline void setNull() { setEmpty(); }
+
+ /** \returns true if the box is empty. */
+ inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
+
+ /** Makes \c *this an empty box. */
+ inline void setEmpty()
+ {
+ m_min.setConstant( ScalarTraits::highest() );
+ m_max.setConstant( ScalarTraits::lowest() );
+ }
+
+ /** \returns the minimal corner */
+ inline const VectorType& (min)() const { return m_min; }
+ /** \returns a non const reference to the minimal corner */
+ inline VectorType& (min)() { return m_min; }
+ /** \returns the maximal corner */
+ inline const VectorType& (max)() const { return m_max; }
+ /** \returns a non const reference to the maximal corner */
+ inline VectorType& (max)() { return m_max; }
+
+ /** \returns the center of the box */
+ inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>,
+ const CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const VectorType, const VectorType> >
+ center() const
+ { return (m_min+m_max)/2; }
+
+ /** \returns the lengths of the sides of the bounding box.
+ * Note that this function does not get the same
+ * result for integral or floating scalar types: see
+ */
+ inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> sizes() const
+ { return m_max - m_min; }
+
+ /** \returns the volume of the bounding box */
+ inline Scalar volume() const
+ { return sizes().prod(); }
+
+ /** \returns an expression for the bounding box diagonal vector
+ * if the length of the diagonal is needed: diagonal().norm()
+ * will provide it.
+ */
+ inline CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> diagonal() const
+ { return sizes(); }
+
+ /** \returns the vertex of the bounding box at the corner defined by
+ * the corner-id corner. It works only for a 1D, 2D or 3D bounding box.
+ * For 1D bounding boxes corners are named by 2 enum constants:
+ * BottomLeft and BottomRight.
+ * For 2D bounding boxes, corners are named by 4 enum constants:
+ * BottomLeft, BottomRight, TopLeft, TopRight.
+ * For 3D bounding boxes, the following names are added:
+ * BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil.
+ */
+ inline VectorType corner(CornerType corner) const
+ {
+ EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE);
+
+ VectorType res;
+
+ Index mult = 1;
+ for(Index d=0; d<dim(); ++d)
+ {
+ if( mult & corner ) res[d] = m_max[d];
+ else res[d] = m_min[d];
+ mult *= 2;
+ }
+ return res;
+ }
+
+ /** \returns a random point inside the bounding box sampled with
+ * a uniform distribution */
+ inline VectorType sample() const
+ {
+ VectorType r;
+ for(Index d=0; d<dim(); ++d)
+ {
+ if(!ScalarTraits::IsInteger)
+ {
+ r[d] = m_min[d] + (m_max[d]-m_min[d])
+ * internal::random<Scalar>(Scalar(0), Scalar(1));
+ }
+ else
+ r[d] = internal::random(m_min[d], m_max[d]);
+ }
+ return r;
+ }
+
+ /** \returns true if the point \a p is inside the box \c *this. */
+ template<typename Derived>
+ inline bool contains(const MatrixBase<Derived>& a_p) const
+ {
+ const typename internal::nested<Derived,2>::type p(a_p.derived());
+ return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all();
+ }
+
+ /** \returns true if the box \a b is entirely inside the box \c *this. */
+ inline bool contains(const AlignedBox& b) const
+ { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
+
+ /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
+ template<typename Derived>
+ inline AlignedBox& extend(const MatrixBase<Derived>& a_p)
+ {
+ const typename internal::nested<Derived,2>::type p(a_p.derived());
+ m_min = m_min.cwiseMin(p);
+ m_max = m_max.cwiseMax(p);
+ return *this;
+ }
+
+ /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
+ inline AlignedBox& extend(const AlignedBox& b)
+ {
+ m_min = m_min.cwiseMin(b.m_min);
+ m_max = m_max.cwiseMax(b.m_max);
+ return *this;
+ }
+
+ /** Clamps \c *this by the box \a b and returns a reference to \c *this. */
+ inline AlignedBox& clamp(const AlignedBox& b)
+ {
+ m_min = m_min.cwiseMax(b.m_min);
+ m_max = m_max.cwiseMin(b.m_max);
+ return *this;
+ }
+
+ /** Returns an AlignedBox that is the intersection of \a b and \c *this */
+ inline AlignedBox intersection(const AlignedBox& b) const
+ {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
+
+ /** Returns an AlignedBox that is the union of \a b and \c *this */
+ inline AlignedBox merged(const AlignedBox& b) const
+ { return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
+
+ /** Translate \c *this by the vector \a t and returns a reference to \c *this. */
+ template<typename Derived>
+ inline AlignedBox& translate(const MatrixBase<Derived>& a_t)
+ {
+ const typename internal::nested<Derived,2>::type t(a_t.derived());
+ m_min += t;
+ m_max += t;
+ return *this;
+ }
+
+ /** \returns the squared distance between the point \a p and the box \c *this,
+ * and zero if \a p is inside the box.
+ * \sa exteriorDistance()
+ */
+ template<typename Derived>
+ inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& a_p) const;
+
+ /** \returns the squared distance between the boxes \a b and \c *this,
+ * and zero if the boxes intersect.
+ * \sa exteriorDistance()
+ */
+ inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
+
+ /** \returns the distance between the point \a p and the box \c *this,
+ * and zero if \a p is inside the box.
+ * \sa squaredExteriorDistance()
+ */
+ template<typename Derived>
+ inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
+ { return internal::sqrt(NonInteger(squaredExteriorDistance(p))); }
+
+ /** \returns the distance between the boxes \a b and \c *this,
+ * and zero if the boxes intersect.
+ * \sa squaredExteriorDistance()
+ */
+ inline NonInteger exteriorDistance(const AlignedBox& b) const
+ { return internal::sqrt(NonInteger(squaredExteriorDistance(b))); }
+
+ /** \returns \c *this with scalar type casted to \a NewScalarType
+ *
+ * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+ * then this function smartly returns a const reference to \c *this.
+ */
+ template<typename NewScalarType>
+ inline typename internal::cast_return_type<AlignedBox,
+ AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
+ {
+ return typename internal::cast_return_type<AlignedBox,
+ AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
+ }
+
+ /** Copy constructor with scalar type conversion */
+ template<typename OtherScalarType>
+ inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
+ {
+ m_min = (other.min)().template cast<Scalar>();
+ m_max = (other.max)().template cast<Scalar>();
+ }
+
+ /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+ * determined by \a prec.
+ *
+ * \sa MatrixBase::isApprox() */
+ bool isApprox(const AlignedBox& other, RealScalar prec = ScalarTraits::dummy_precision()) const
+ { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
+
+protected:
+
+ VectorType m_min, m_max;
+};
+
+
+
+template<typename Scalar,int AmbientDim>
+template<typename Derived>
+inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const MatrixBase<Derived>& a_p) const
+{
+ const typename internal::nested<Derived,2*AmbientDim>::type p(a_p.derived());
+ Scalar dist2 = 0.;
+ Scalar aux;
+ for (Index k=0; k<dim(); ++k)
+ {
+ if( m_min[k] > p[k] )
+ {
+ aux = m_min[k] - p[k];
+ dist2 += aux*aux;
+ }
+ else if( p[k] > m_max[k] )
+ {
+ aux = p[k] - m_max[k];
+ dist2 += aux*aux;
+ }
+ }
+ return dist2;
+}
+
+template<typename Scalar,int AmbientDim>
+inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const AlignedBox& b) const
+{
+ Scalar dist2 = 0.;
+ Scalar aux;
+ for (Index k=0; k<dim(); ++k)
+ {
+ if( m_min[k] > b.m_max[k] )
+ {
+ aux = m_min[k] - b.m_max[k];
+ dist2 += aux*aux;
+ }
+ else if( b.m_min[k] > m_max[k] )
+ {
+ aux = b.m_min[k] - m_max[k];
+ dist2 += aux*aux;
+ }
+ }
+ return dist2;
+}
+
+#endif // EIGEN_ALIGNEDBOX_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h b/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h
new file mode 100644
index 00000000000..0ec4624cf98
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h
@@ -0,0 +1,241 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ANGLEAXIS_H
+#define EIGEN_ANGLEAXIS_H
+
+/** \geometry_module \ingroup Geometry_Module
+ *
+ * \class AngleAxis
+ *
+ * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis
+ *
+ * \param _Scalar the scalar type, i.e., the type of the coefficients.
+ *
+ * \warning When setting up an AngleAxis object, the axis vector \b must \b be \b normalized.
+ *
+ * The following two typedefs are provided for convenience:
+ * \li \c AngleAxisf for \c float
+ * \li \c AngleAxisd for \c double
+ *
+ * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily
+ * mimic Euler-angles. Here is an example:
+ * \include AngleAxis_mimic_euler.cpp
+ * Output: \verbinclude AngleAxis_mimic_euler.out
+ *
+ * \note This class is not aimed to be used to store a rotation transformation,
+ * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix)
+ * and transformation objects.
+ *
+ * \sa class Quaternion, class Transform, MatrixBase::UnitX()
+ */
+
+namespace internal {
+template<typename _Scalar> struct traits<AngleAxis<_Scalar> >
+{
+ typedef _Scalar Scalar;
+};
+}
+
+template<typename _Scalar>
+class AngleAxis : public RotationBase<AngleAxis<_Scalar>,3>
+{
+ typedef RotationBase<AngleAxis<_Scalar>,3> Base;
+
+public:
+
+ using Base::operator*;
+
+ enum { Dim = 3 };
+ /** the scalar type of the coefficients */
+ typedef _Scalar Scalar;
+ typedef Matrix<Scalar,3,3> Matrix3;
+ typedef Matrix<Scalar,3,1> Vector3;
+ typedef Quaternion<Scalar> QuaternionType;
+
+protected:
+
+ Vector3 m_axis;
+ Scalar m_angle;
+
+public:
+
+ /** Default constructor without initialization. */
+ AngleAxis() {}
+ /** Constructs and initialize the angle-axis rotation from an \a angle in radian
+ * and an \a axis which \b must \b be \b normalized.
+ *
+ * \warning If the \a axis vector is not normalized, then the angle-axis object
+ * represents an invalid rotation. */
+ template<typename Derived>
+ inline AngleAxis(Scalar angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
+ /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */
+ template<typename QuatDerived> inline explicit AngleAxis(const QuaternionBase<QuatDerived>& q) { *this = q; }
+ /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */
+ template<typename Derived>
+ inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
+
+ Scalar angle() const { return m_angle; }
+ Scalar& angle() { return m_angle; }
+
+ const Vector3& axis() const { return m_axis; }
+ Vector3& axis() { return m_axis; }
+
+ /** Concatenates two rotations */
+ inline QuaternionType operator* (const AngleAxis& other) const
+ { return QuaternionType(*this) * QuaternionType(other); }
+
+ /** Concatenates two rotations */
+ inline QuaternionType operator* (const QuaternionType& other) const
+ { return QuaternionType(*this) * other; }
+
+ /** Concatenates two rotations */
+ friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b)
+ { return a * QuaternionType(b); }
+
+ /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */
+ AngleAxis inverse() const
+ { return AngleAxis(-m_angle, m_axis); }
+
+ template<class QuatDerived>
+ AngleAxis& operator=(const QuaternionBase<QuatDerived>& q);
+ template<typename Derived>
+ AngleAxis& operator=(const MatrixBase<Derived>& m);
+
+ template<typename Derived>
+ AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m);
+ Matrix3 toRotationMatrix(void) const;
+
+ /** \returns \c *this with scalar type casted to \a NewScalarType
+ *
+ * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+ * then this function smartly returns a const reference to \c *this.
+ */
+ template<typename NewScalarType>
+ inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const
+ { return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }
+
+ /** Copy constructor with scalar type conversion */
+ template<typename OtherScalarType>
+ inline explicit AngleAxis(const AngleAxis<OtherScalarType>& other)
+ {
+ m_axis = other.axis().template cast<Scalar>();
+ m_angle = Scalar(other.angle());
+ }
+
+ inline static const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); }
+
+ /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+ * determined by \a prec.
+ *
+ * \sa MatrixBase::isApprox() */
+ bool isApprox(const AngleAxis& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ { return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); }
+};
+
+/** \ingroup Geometry_Module
+ * single precision angle-axis type */
+typedef AngleAxis<float> AngleAxisf;
+/** \ingroup Geometry_Module
+ * double precision angle-axis type */
+typedef AngleAxis<double> AngleAxisd;
+
+/** Set \c *this from a \b unit quaternion.
+ * The axis is normalized.
+ *
+ * \warning As any other method dealing with quaternion, if the input quaternion
+ * is not normalized then the result is undefined.
+ */
+template<typename Scalar>
+template<typename QuatDerived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived>& q)
+{
+ using std::acos;
+ using std::min;
+ using std::max;
+ Scalar n2 = q.vec().squaredNorm();
+ if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
+ {
+ m_angle = 0;
+ m_axis << 1, 0, 0;
+ }
+ else
+ {
+ m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1)));
+ m_axis = q.vec() / internal::sqrt(n2);
+ }
+ return *this;
+}
+
+/** Set \c *this from a 3x3 rotation matrix \a mat.
+ */
+template<typename Scalar>
+template<typename Derived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)
+{
+ // Since a direct conversion would not be really faster,
+ // let's use the robust Quaternion implementation:
+ return *this = QuaternionType(mat);
+}
+
+/**
+* \brief Sets \c *this from a 3x3 rotation matrix.
+**/
+template<typename Scalar>
+template<typename Derived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
+{
+ return *this = QuaternionType(mat);
+}
+
+/** Constructs and \returns an equivalent 3x3 rotation matrix.
+ */
+template<typename Scalar>
+typename AngleAxis<Scalar>::Matrix3
+AngleAxis<Scalar>::toRotationMatrix(void) const
+{
+ Matrix3 res;
+ Vector3 sin_axis = internal::sin(m_angle) * m_axis;
+ Scalar c = internal::cos(m_angle);
+ Vector3 cos1_axis = (Scalar(1)-c) * m_axis;
+
+ Scalar tmp;
+ tmp = cos1_axis.x() * m_axis.y();
+ res.coeffRef(0,1) = tmp - sin_axis.z();
+ res.coeffRef(1,0) = tmp + sin_axis.z();
+
+ tmp = cos1_axis.x() * m_axis.z();
+ res.coeffRef(0,2) = tmp + sin_axis.y();
+ res.coeffRef(2,0) = tmp - sin_axis.y();
+
+ tmp = cos1_axis.y() * m_axis.z();
+ res.coeffRef(1,2) = tmp - sin_axis.x();
+ res.coeffRef(2,1) = tmp + sin_axis.x();
+
+ res.diagonal() = (cos1_axis.cwiseProduct(m_axis)).array() + c;
+
+ return res;
+}
+
+#endif // EIGEN_ANGLEAXIS_H
diff --git a/extern/Eigen2/Eigen/src/Geometry/EulerAngles.h b/extern/Eigen3/Eigen/src/Geometry/EulerAngles.h
index 204118ac94d..d246a6ebf4a 100644
--- a/extern/Eigen2/Eigen/src/Geometry/EulerAngles.h
+++ b/extern/Eigen3/Eigen/src/Geometry/EulerAngles.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -26,7 +26,7 @@
#define EIGEN_EULERANGLES_H
/** \geometry_module \ingroup Geometry_Module
- * \nonstableyet
+ *
*
* \returns the Euler-angles of the rotation matrix \c *this using the convention defined by the triplet (\a a0,\a a1,\a a2)
*
@@ -43,48 +43,48 @@
*/
template<typename Derived>
inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>
-MatrixBase<Derived>::eulerAngles(int a0, int a1, int a2) const
+MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const
{
/* Implemented from Graphics Gems IV */
EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3)
Matrix<Scalar,3,1> res;
typedef Matrix<typename Derived::Scalar,2,1> Vector2;
- const Scalar epsilon = precision<Scalar>();
+ const Scalar epsilon = NumTraits<Scalar>::dummy_precision();
- const int odd = ((a0+1)%3 == a1) ? 0 : 1;
- const int i = a0;
- const int j = (a0 + 1 + odd)%3;
- const int k = (a0 + 2 - odd)%3;
+ const Index odd = ((a0+1)%3 == a1) ? 0 : 1;
+ const Index i = a0;
+ const Index j = (a0 + 1 + odd)%3;
+ const Index k = (a0 + 2 - odd)%3;
if (a0==a2)
{
Scalar s = Vector2(coeff(j,i) , coeff(k,i)).norm();
- res[1] = ei_atan2(s, coeff(i,i));
+ res[1] = internal::atan2(s, coeff(i,i));
if (s > epsilon)
{
- res[0] = ei_atan2(coeff(j,i), coeff(k,i));
- res[2] = ei_atan2(coeff(i,j),-coeff(i,k));
+ res[0] = internal::atan2(coeff(j,i), coeff(k,i));
+ res[2] = internal::atan2(coeff(i,j),-coeff(i,k));
}
else
{
res[0] = Scalar(0);
- res[2] = (coeff(i,i)>0?1:-1)*ei_atan2(-coeff(k,j), coeff(j,j));
+ res[2] = (coeff(i,i)>0?1:-1)*internal::atan2(-coeff(k,j), coeff(j,j));
}
}
else
{
Scalar c = Vector2(coeff(i,i) , coeff(i,j)).norm();
- res[1] = ei_atan2(-coeff(i,k), c);
+ res[1] = internal::atan2(-coeff(i,k), c);
if (c > epsilon)
{
- res[0] = ei_atan2(coeff(j,k), coeff(k,k));
- res[2] = ei_atan2(coeff(i,j), coeff(i,i));
+ res[0] = internal::atan2(coeff(j,k), coeff(k,k));
+ res[2] = internal::atan2(coeff(i,j), coeff(i,i));
}
else
{
res[0] = Scalar(0);
- res[2] = (coeff(i,k)>0?1:-1)*ei_atan2(-coeff(k,j), coeff(j,j));
+ res[2] = (coeff(i,k)>0?1:-1)*internal::atan2(-coeff(k,j), coeff(j,j));
}
}
if (!odd)
diff --git a/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h b/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h
new file mode 100644
index 00000000000..2bc4f7e87e3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h
@@ -0,0 +1,318 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_HOMOGENEOUS_H
+#define EIGEN_HOMOGENEOUS_H
+
+/** \geometry_module \ingroup Geometry_Module
+ *
+ * \class Homogeneous
+ *
+ * \brief Expression of one (or a set of) homogeneous vector(s)
+ *
+ * \param MatrixType the type of the object in which we are making homogeneous
+ *
+ * This class represents an expression of one (or a set of) homogeneous vector(s).
+ * It is the return type of MatrixBase::homogeneous() and most of the time
+ * this is the only way it is used.
+ *
+ * \sa MatrixBase::homogeneous()
+ */
+
+namespace internal {
+
+template<typename MatrixType,int Direction>
+struct traits<Homogeneous<MatrixType,Direction> >
+ : traits<MatrixType>
+{
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+ enum {
+ RowsPlusOne = (MatrixType::RowsAtCompileTime != Dynamic) ?
+ int(MatrixType::RowsAtCompileTime) + 1 : Dynamic,
+ ColsPlusOne = (MatrixType::ColsAtCompileTime != Dynamic) ?
+ int(MatrixType::ColsAtCompileTime) + 1 : Dynamic,
+ RowsAtCompileTime = Direction==Vertical ? RowsPlusOne : MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = Direction==Horizontal ? ColsPlusOne : MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ TmpFlags = _MatrixTypeNested::Flags & HereditaryBits,
+ Flags = ColsAtCompileTime==1 ? (TmpFlags & ~RowMajorBit)
+ : RowsAtCompileTime==1 ? (TmpFlags | RowMajorBit)
+ : TmpFlags,
+ CoeffReadCost = _MatrixTypeNested::CoeffReadCost
+ };
+};
+
+template<typename MatrixType,typename Lhs> struct homogeneous_left_product_impl;
+template<typename MatrixType,typename Rhs> struct homogeneous_right_product_impl;
+
+} // end namespace internal
+
+template<typename MatrixType,int _Direction> class Homogeneous
+ : public MatrixBase<Homogeneous<MatrixType,_Direction> >
+{
+ public:
+
+ enum { Direction = _Direction };
+
+ typedef MatrixBase<Homogeneous> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(Homogeneous)
+
+ inline Homogeneous(const MatrixType& matrix)
+ : m_matrix(matrix)
+ {}
+
+ inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical ? 1 : 0); }
+ inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); }
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ if( (int(Direction)==Vertical && row==m_matrix.rows())
+ || (int(Direction)==Horizontal && col==m_matrix.cols()))
+ return 1;
+ return m_matrix.coeff(row, col);
+ }
+
+ template<typename Rhs>
+ inline const internal::homogeneous_right_product_impl<Homogeneous,Rhs>
+ operator* (const MatrixBase<Rhs>& rhs) const
+ {
+ eigen_assert(int(Direction)==Horizontal);
+ return internal::homogeneous_right_product_impl<Homogeneous,Rhs>(m_matrix,rhs.derived());
+ }
+
+ template<typename Lhs> friend
+ inline const internal::homogeneous_left_product_impl<Homogeneous,Lhs>
+ operator* (const MatrixBase<Lhs>& lhs, const Homogeneous& rhs)
+ {
+ eigen_assert(int(Direction)==Vertical);
+ return internal::homogeneous_left_product_impl<Homogeneous,Lhs>(lhs.derived(),rhs.m_matrix);
+ }
+
+ template<typename Scalar, int Dim, int Mode, int Options> friend
+ inline const internal::homogeneous_left_product_impl<Homogeneous,Transform<Scalar,Dim,Mode,Options> >
+ operator* (const Transform<Scalar,Dim,Mode,Options>& lhs, const Homogeneous& rhs)
+ {
+ eigen_assert(int(Direction)==Vertical);
+ return internal::homogeneous_left_product_impl<Homogeneous,Transform<Scalar,Dim,Mode,Options> >(lhs,rhs.m_matrix);
+ }
+
+ protected:
+ const typename MatrixType::Nested m_matrix;
+};
+
+/** \geometry_module
+ *
+ * \return an expression of the equivalent homogeneous vector
+ *
+ * \only_for_vectors
+ *
+ * Example: \include MatrixBase_homogeneous.cpp
+ * Output: \verbinclude MatrixBase_homogeneous.out
+ *
+ * \sa class Homogeneous
+ */
+template<typename Derived>
+inline typename MatrixBase<Derived>::HomogeneousReturnType
+MatrixBase<Derived>::homogeneous() const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ return derived();
+}
+
+/** \geometry_module
+ *
+ * \returns a matrix expression of homogeneous column (or row) vectors
+ *
+ * Example: \include VectorwiseOp_homogeneous.cpp
+ * Output: \verbinclude VectorwiseOp_homogeneous.out
+ *
+ * \sa MatrixBase::homogeneous() */
+template<typename ExpressionType, int Direction>
+inline Homogeneous<ExpressionType,Direction>
+VectorwiseOp<ExpressionType,Direction>::homogeneous() const
+{
+ return _expression();
+}
+
+/** \geometry_module
+ *
+ * \returns an expression of the homogeneous normalized vector of \c *this
+ *
+ * Example: \include MatrixBase_hnormalized.cpp
+ * Output: \verbinclude MatrixBase_hnormalized.out
+ *
+ * \sa VectorwiseOp::hnormalized() */
+template<typename Derived>
+inline const typename MatrixBase<Derived>::HNormalizedReturnType
+MatrixBase<Derived>::hnormalized() const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+ return ConstStartMinusOne(derived(),0,0,
+ ColsAtCompileTime==1?size()-1:1,
+ ColsAtCompileTime==1?1:size()-1) / coeff(size()-1);
+}
+
+/** \geometry_module
+ *
+ * \returns an expression of the homogeneous normalized vector of \c *this
+ *
+ * Example: \include DirectionWise_hnormalized.cpp
+ * Output: \verbinclude DirectionWise_hnormalized.out
+ *
+ * \sa MatrixBase::hnormalized() */
+template<typename ExpressionType, int Direction>
+inline const typename VectorwiseOp<ExpressionType,Direction>::HNormalizedReturnType
+VectorwiseOp<ExpressionType,Direction>::hnormalized() const
+{
+ return HNormalized_Block(_expression(),0,0,
+ Direction==Vertical ? _expression().rows()-1 : _expression().rows(),
+ Direction==Horizontal ? _expression().cols()-1 : _expression().cols()).cwiseQuotient(
+ Replicate<HNormalized_Factors,
+ Direction==Vertical ? HNormalized_SizeMinusOne : 1,
+ Direction==Horizontal ? HNormalized_SizeMinusOne : 1>
+ (HNormalized_Factors(_expression(),
+ Direction==Vertical ? _expression().rows()-1:0,
+ Direction==Horizontal ? _expression().cols()-1:0,
+ Direction==Vertical ? 1 : _expression().rows(),
+ Direction==Horizontal ? 1 : _expression().cols()),
+ Direction==Vertical ? _expression().rows()-1 : 1,
+ Direction==Horizontal ? _expression().cols()-1 : 1));
+}
+
+namespace internal {
+
+template<typename MatrixOrTransformType>
+struct take_matrix_for_product
+{
+ typedef MatrixOrTransformType type;
+ static const type& run(const type &x) { return x; }
+};
+
+template<typename Scalar, int Dim, int Mode,int Options>
+struct take_matrix_for_product<Transform<Scalar, Dim, Mode, Options> >
+{
+ typedef Transform<Scalar, Dim, Mode, Options> TransformType;
+ typedef typename TransformType::ConstAffinePart type;
+ static const type run (const TransformType& x) { return x.affine(); }
+};
+
+template<typename Scalar, int Dim, int Options>
+struct take_matrix_for_product<Transform<Scalar, Dim, Projective, Options> >
+{
+ typedef Transform<Scalar, Dim, Projective, Options> TransformType;
+ typedef typename TransformType::MatrixType type;
+ static const type& run (const TransformType& x) { return x.matrix(); }
+};
+
+template<typename MatrixType,typename Lhs>
+struct traits<homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> >
+{
+ typedef typename take_matrix_for_product<Lhs>::type LhsMatrixType;
+ typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
+ typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned;
+ typedef typename make_proper_matrix_type<
+ typename traits<MatrixTypeCleaned>::Scalar,
+ LhsMatrixTypeCleaned::RowsAtCompileTime,
+ MatrixTypeCleaned::ColsAtCompileTime,
+ MatrixTypeCleaned::PlainObject::Options,
+ LhsMatrixTypeCleaned::MaxRowsAtCompileTime,
+ MatrixTypeCleaned::MaxColsAtCompileTime>::type ReturnType;
+};
+
+template<typename MatrixType,typename Lhs>
+struct homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs>
+ : public ReturnByValue<homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> >
+{
+ typedef typename traits<homogeneous_left_product_impl>::LhsMatrixType LhsMatrixType;
+ typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned;
+ typedef typename remove_all<typename LhsMatrixTypeCleaned::Nested>::type LhsMatrixTypeNested;
+ typedef typename MatrixType::Index Index;
+ homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs)
+ : m_lhs(take_matrix_for_product<Lhs>::run(lhs)),
+ m_rhs(rhs)
+ {}
+
+ inline Index rows() const { return m_lhs.rows(); }
+ inline Index cols() const { return m_rhs.cols(); }
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ // FIXME investigate how to allow lazy evaluation of this product when possible
+ dst = Block<const LhsMatrixTypeNested,
+ LhsMatrixTypeNested::RowsAtCompileTime,
+ LhsMatrixTypeNested::ColsAtCompileTime==Dynamic?Dynamic:LhsMatrixTypeNested::ColsAtCompileTime-1>
+ (m_lhs,0,0,m_lhs.rows(),m_lhs.cols()-1) * m_rhs;
+ dst += m_lhs.col(m_lhs.cols()-1).rowwise()
+ .template replicate<MatrixType::ColsAtCompileTime>(m_rhs.cols());
+ }
+
+ const typename LhsMatrixTypeCleaned::Nested m_lhs;
+ const typename MatrixType::Nested m_rhs;
+};
+
+template<typename MatrixType,typename Rhs>
+struct traits<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >
+{
+ typedef typename make_proper_matrix_type<typename traits<MatrixType>::Scalar,
+ MatrixType::RowsAtCompileTime,
+ Rhs::ColsAtCompileTime,
+ MatrixType::PlainObject::Options,
+ MatrixType::MaxRowsAtCompileTime,
+ Rhs::MaxColsAtCompileTime>::type ReturnType;
+};
+
+template<typename MatrixType,typename Rhs>
+struct homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs>
+ : public ReturnByValue<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >
+{
+ typedef typename remove_all<typename Rhs::Nested>::type RhsNested;
+ typedef typename MatrixType::Index Index;
+ homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {}
+
+ inline Index rows() const { return m_lhs.rows(); }
+ inline Index cols() const { return m_rhs.cols(); }
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ // FIXME investigate how to allow lazy evaluation of this product when possible
+ dst = m_lhs * Block<const RhsNested,
+ RhsNested::RowsAtCompileTime==Dynamic?Dynamic:RhsNested::RowsAtCompileTime-1,
+ RhsNested::ColsAtCompileTime>
+ (m_rhs,0,0,m_rhs.rows()-1,m_rhs.cols());
+ dst += m_rhs.row(m_rhs.rows()-1).colwise()
+ .template replicate<MatrixType::RowsAtCompileTime>(m_lhs.rows());
+ }
+
+ const typename MatrixType::Nested m_lhs;
+ const typename Rhs::Nested m_rhs;
+};
+
+} // end namespace internal
+
+#endif // EIGEN_HOMOGENEOUS_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h b/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h
new file mode 100644
index 00000000000..eb0a5877168
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h
@@ -0,0 +1,280 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_HYPERPLANE_H
+#define EIGEN_HYPERPLANE_H
+
+/** \geometry_module \ingroup Geometry_Module
+ *
+ * \class Hyperplane
+ *
+ * \brief A hyperplane
+ *
+ * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n.
+ * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane.
+ *
+ * \param _Scalar the scalar type, i.e., the type of the coefficients
+ * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+ * Notice that the dimension of the hyperplane is _AmbientDim-1.
+ *
+ * This class represents an hyperplane as the zero set of the implicit equation
+ * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part)
+ * and \f$ d \f$ is the distance (offset) to the origin.
+ */
+template <typename _Scalar, int _AmbientDim, int _Options>
+class Hyperplane
+{
+public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
+ enum {
+ AmbientDimAtCompileTime = _AmbientDim,
+ Options = _Options
+ };
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef DenseIndex Index;
+ typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+ typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic
+ ? Dynamic
+ : Index(AmbientDimAtCompileTime)+1,1,Options> Coefficients;
+ typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
+ typedef const Block<const Coefficients,AmbientDimAtCompileTime,1> ConstNormalReturnType;
+
+ /** Default constructor without initialization */
+ inline explicit Hyperplane() {}
+
+ template<int OtherOptions>
+ Hyperplane(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
+ : m_coeffs(other.coeffs())
+ {}
+
+ /** Constructs a dynamic-size hyperplane with \a _dim the dimension
+ * of the ambient space */
+ inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {}
+
+ /** Construct a plane from its normal \a n and a point \a e onto the plane.
+ * \warning the vector normal is assumed to be normalized.
+ */
+ inline Hyperplane(const VectorType& n, const VectorType& e)
+ : m_coeffs(n.size()+1)
+ {
+ normal() = n;
+ offset() = -n.dot(e);
+ }
+
+ /** Constructs a plane from its normal \a n and distance to the origin \a d
+ * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$.
+ * \warning the vector normal is assumed to be normalized.
+ */
+ inline Hyperplane(const VectorType& n, Scalar d)
+ : m_coeffs(n.size()+1)
+ {
+ normal() = n;
+ offset() = d;
+ }
+
+ /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space
+ * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made.
+ */
+ static inline Hyperplane Through(const VectorType& p0, const VectorType& p1)
+ {
+ Hyperplane result(p0.size());
+ result.normal() = (p1 - p0).unitOrthogonal();
+ result.offset() = -p0.dot(result.normal());
+ return result;
+ }
+
+ /** Constructs a hyperplane passing through the three points. The dimension of the ambient space
+ * is required to be exactly 3.
+ */
+ static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2)
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
+ Hyperplane result(p0.size());
+ result.normal() = (p2 - p0).cross(p1 - p0).normalized();
+ result.offset() = -p0.dot(result.normal());
+ return result;
+ }
+
+ /** Constructs a hyperplane passing through the parametrized line \a parametrized.
+ * If the dimension of the ambient space is greater than 2, then there isn't uniqueness,
+ * so an arbitrary choice is made.
+ */
+ // FIXME to be consitent with the rest this could be implemented as a static Through function ??
+ explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
+ {
+ normal() = parametrized.direction().unitOrthogonal();
+ offset() = -parametrized.origin().dot(normal());
+ }
+
+ ~Hyperplane() {}
+
+ /** \returns the dimension in which the plane holds */
+ inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); }
+
+ /** normalizes \c *this */
+ void normalize(void)
+ {
+ m_coeffs /= normal().norm();
+ }
+
+ /** \returns the signed distance between the plane \c *this and a point \a p.
+ * \sa absDistance()
+ */
+ inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); }
+
+ /** \returns the absolute distance between the plane \c *this and a point \a p.
+ * \sa signedDistance()
+ */
+ inline Scalar absDistance(const VectorType& p) const { return internal::abs(signedDistance(p)); }
+
+ /** \returns the projection of a point \a p onto the plane \c *this.
+ */
+ inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); }
+
+ /** \returns a constant reference to the unit normal vector of the plane, which corresponds
+ * to the linear part of the implicit equation.
+ */
+ inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); }
+
+ /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds
+ * to the linear part of the implicit equation.
+ */
+ inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); }
+
+ /** \returns the distance to the origin, which is also the "constant term" of the implicit equation
+ * \warning the vector normal is assumed to be normalized.
+ */
+ inline const Scalar& offset() const { return m_coeffs.coeff(dim()); }
+
+ /** \returns a non-constant reference to the distance to the origin, which is also the constant part
+ * of the implicit equation */
+ inline Scalar& offset() { return m_coeffs(dim()); }
+
+ /** \returns a constant reference to the coefficients c_i of the plane equation:
+ * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
+ */
+ inline const Coefficients& coeffs() const { return m_coeffs; }
+
+ /** \returns a non-constant reference to the coefficients c_i of the plane equation:
+ * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
+ */
+ inline Coefficients& coeffs() { return m_coeffs; }
+
+ /** \returns the intersection of *this with \a other.
+ *
+ * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines.
+ *
+ * \note If \a other is approximately parallel to *this, this method will return any point on *this.
+ */
+ VectorType intersection(const Hyperplane& other) const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
+ Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
+ // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests
+ // whether the two lines are approximately parallel.
+ if(internal::isMuchSmallerThan(det, Scalar(1)))
+ { // special case where the two lines are approximately parallel. Pick any point on the first line.
+ if(internal::abs(coeffs().coeff(1))>internal::abs(coeffs().coeff(0)))
+ return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));
+ else
+ return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));
+ }
+ else
+ { // general case
+ Scalar invdet = Scalar(1) / det;
+ return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)),
+ invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2)));
+ }
+ }
+
+ /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this.
+ *
+ * \param mat the Dim x Dim transformation matrix
+ * \param traits specifies whether the matrix \a mat represents an #Isometry
+ * or a more generic #Affine transformation. The default is #Affine.
+ */
+ template<typename XprType>
+ inline Hyperplane& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine)
+ {
+ if (traits==Affine)
+ normal() = mat.inverse().transpose() * normal();
+ else if (traits==Isometry)
+ normal() = mat * normal();
+ else
+ {
+ eigen_assert("invalid traits value in Hyperplane::transform()");
+ }
+ return *this;
+ }
+
+ /** Applies the transformation \a t to \c *this and returns a reference to \c *this.
+ *
+ * \param t the transformation of dimension Dim
+ * \param traits specifies whether the transformation \a t represents an #Isometry
+ * or a more generic #Affine transformation. The default is #Affine.
+ * Other kind of transformations are not supported.
+ */
+ template<int TrOptions>
+ inline Hyperplane& transform(const Transform<Scalar,AmbientDimAtCompileTime,Affine,TrOptions>& t,
+ TransformTraits traits = Affine)
+ {
+ transform(t.linear(), traits);
+ offset() -= normal().dot(t.translation());
+ return *this;
+ }
+
+ /** \returns \c *this with scalar type casted to \a NewScalarType
+ *
+ * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+ * then this function smartly returns a const reference to \c *this.
+ */
+ template<typename NewScalarType>
+ inline typename internal::cast_return_type<Hyperplane,
+ Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const
+ {
+ return typename internal::cast_return_type<Hyperplane,
+ Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);
+ }
+
+ /** Copy constructor with scalar type conversion */
+ template<typename OtherScalarType,int OtherOptions>
+ inline explicit Hyperplane(const Hyperplane<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)
+ { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+ /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+ * determined by \a prec.
+ *
+ * \sa MatrixBase::isApprox() */
+ template<int OtherOptions>
+ bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+protected:
+
+ Coefficients m_coeffs;
+};
+
+#endif // EIGEN_HYPERPLANE_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/OrthoMethods.h b/extern/Eigen3/Eigen/src/Geometry/OrthoMethods.h
new file mode 100644
index 00000000000..52b46988196
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/OrthoMethods.h
@@ -0,0 +1,229 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ORTHOMETHODS_H
+#define EIGEN_ORTHOMETHODS_H
+
+/** \geometry_module
+ *
+ * \returns the cross product of \c *this and \a other
+ *
+ * Here is a very good explanation of cross-product: http://xkcd.com/199/
+ * \sa MatrixBase::cross3()
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline typename MatrixBase<Derived>::template cross_product_return_type<OtherDerived>::type
+MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3)
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3)
+
+ // Note that there is no need for an expression here since the compiler
+ // optimize such a small temporary very well (even within a complex expression)
+ const typename internal::nested<Derived,2>::type lhs(derived());
+ const typename internal::nested<OtherDerived,2>::type rhs(other.derived());
+ return typename cross_product_return_type<OtherDerived>::type(
+ internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
+ internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
+ internal::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0))
+ );
+}
+
+namespace internal {
+
+template< int Arch,typename VectorLhs,typename VectorRhs,
+ typename Scalar = typename VectorLhs::Scalar,
+ bool Vectorizable = (VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit>
+struct cross3_impl {
+ inline static typename internal::plain_matrix_type<VectorLhs>::type
+ run(const VectorLhs& lhs, const VectorRhs& rhs)
+ {
+ return typename internal::plain_matrix_type<VectorLhs>::type(
+ internal::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
+ internal::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
+ internal::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)),
+ 0
+ );
+ }
+};
+
+}
+
+/** \geometry_module
+ *
+ * \returns the cross product of \c *this and \a other using only the x, y, and z coefficients
+ *
+ * The size of \c *this and \a other must be four. This function is especially useful
+ * when using 4D vectors instead of 3D ones to get advantage of SSE/AltiVec vectorization.
+ *
+ * \sa MatrixBase::cross()
+ */
+template<typename Derived>
+template<typename OtherDerived>
+inline typename MatrixBase<Derived>::PlainObject
+MatrixBase<Derived>::cross3(const MatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4)
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,4)
+
+ typedef typename internal::nested<Derived,2>::type DerivedNested;
+ typedef typename internal::nested<OtherDerived,2>::type OtherDerivedNested;
+ const DerivedNested lhs(derived());
+ const OtherDerivedNested rhs(other.derived());
+
+ return internal::cross3_impl<Architecture::Target,
+ typename internal::remove_all<DerivedNested>::type,
+ typename internal::remove_all<OtherDerivedNested>::type>::run(lhs,rhs);
+}
+
+/** \returns a matrix expression of the cross product of each column or row
+ * of the referenced expression with the \a other vector.
+ *
+ * The referenced matrix must have one dimension equal to 3.
+ * The result matrix has the same dimensions than the referenced one.
+ *
+ * \geometry_module
+ *
+ * \sa MatrixBase::cross() */
+template<typename ExpressionType, int Direction>
+template<typename OtherDerived>
+const typename VectorwiseOp<ExpressionType,Direction>::CrossReturnType
+VectorwiseOp<ExpressionType,Direction>::cross(const MatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ CrossReturnType res(_expression().rows(),_expression().cols());
+ if(Direction==Vertical)
+ {
+ eigen_assert(CrossReturnType::RowsAtCompileTime==3 && "the matrix must have exactly 3 rows");
+ res.row(0) = (_expression().row(1) * other.coeff(2) - _expression().row(2) * other.coeff(1)).conjugate();
+ res.row(1) = (_expression().row(2) * other.coeff(0) - _expression().row(0) * other.coeff(2)).conjugate();
+ res.row(2) = (_expression().row(0) * other.coeff(1) - _expression().row(1) * other.coeff(0)).conjugate();
+ }
+ else
+ {
+ eigen_assert(CrossReturnType::ColsAtCompileTime==3 && "the matrix must have exactly 3 columns");
+ res.col(0) = (_expression().col(1) * other.coeff(2) - _expression().col(2) * other.coeff(1)).conjugate();
+ res.col(1) = (_expression().col(2) * other.coeff(0) - _expression().col(0) * other.coeff(2)).conjugate();
+ res.col(2) = (_expression().col(0) * other.coeff(1) - _expression().col(1) * other.coeff(0)).conjugate();
+ }
+ return res;
+}
+
+namespace internal {
+
+template<typename Derived, int Size = Derived::SizeAtCompileTime>
+struct unitOrthogonal_selector
+{
+ typedef typename plain_matrix_type<Derived>::type VectorType;
+ typedef typename traits<Derived>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename Derived::Index Index;
+ typedef Matrix<Scalar,2,1> Vector2;
+ inline static VectorType run(const Derived& src)
+ {
+ VectorType perp = VectorType::Zero(src.size());
+ Index maxi = 0;
+ Index sndi = 0;
+ src.cwiseAbs().maxCoeff(&maxi);
+ if (maxi==0)
+ sndi = 1;
+ RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm();
+ perp.coeffRef(maxi) = -conj(src.coeff(sndi)) * invnm;
+ perp.coeffRef(sndi) = conj(src.coeff(maxi)) * invnm;
+
+ return perp;
+ }
+};
+
+template<typename Derived>
+struct unitOrthogonal_selector<Derived,3>
+{
+ typedef typename plain_matrix_type<Derived>::type VectorType;
+ typedef typename traits<Derived>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ inline static VectorType run(const Derived& src)
+ {
+ VectorType perp;
+ /* Let us compute the crossed product of *this with a vector
+ * that is not too close to being colinear to *this.
+ */
+
+ /* unless the x and y coords are both close to zero, we can
+ * simply take ( -y, x, 0 ) and normalize it.
+ */
+ if((!isMuchSmallerThan(src.x(), src.z()))
+ || (!isMuchSmallerThan(src.y(), src.z())))
+ {
+ RealScalar invnm = RealScalar(1)/src.template head<2>().norm();
+ perp.coeffRef(0) = -conj(src.y())*invnm;
+ perp.coeffRef(1) = conj(src.x())*invnm;
+ perp.coeffRef(2) = 0;
+ }
+ /* if both x and y are close to zero, then the vector is close
+ * to the z-axis, so it's far from colinear to the x-axis for instance.
+ * So we take the crossed product with (1,0,0) and normalize it.
+ */
+ else
+ {
+ RealScalar invnm = RealScalar(1)/src.template tail<2>().norm();
+ perp.coeffRef(0) = 0;
+ perp.coeffRef(1) = -conj(src.z())*invnm;
+ perp.coeffRef(2) = conj(src.y())*invnm;
+ }
+
+ return perp;
+ }
+};
+
+template<typename Derived>
+struct unitOrthogonal_selector<Derived,2>
+{
+ typedef typename plain_matrix_type<Derived>::type VectorType;
+ inline static VectorType run(const Derived& src)
+ { return VectorType(-conj(src.y()), conj(src.x())).normalized(); }
+};
+
+} // end namespace internal
+
+/** \returns a unit vector which is orthogonal to \c *this
+ *
+ * The size of \c *this must be at least 2. If the size is exactly 2,
+ * then the returned vector is a counter clock wise rotation of \c *this, i.e., (-y,x).normalized().
+ *
+ * \sa cross()
+ */
+template<typename Derived>
+typename MatrixBase<Derived>::PlainObject
+MatrixBase<Derived>::unitOrthogonal() const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ return internal::unitOrthogonal_selector<Derived>::run(derived());
+}
+
+#endif // EIGEN_ORTHOMETHODS_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/ParametrizedLine.h b/extern/Eigen3/Eigen/src/Geometry/ParametrizedLine.h
new file mode 100644
index 00000000000..b90f9c088a2
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/ParametrizedLine.h
@@ -0,0 +1,168 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PARAMETRIZEDLINE_H
+#define EIGEN_PARAMETRIZEDLINE_H
+
+/** \geometry_module \ingroup Geometry_Module
+ *
+ * \class ParametrizedLine
+ *
+ * \brief A parametrized line
+ *
+ * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit
+ * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to
+ * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$.
+ *
+ * \param _Scalar the scalar type, i.e., the type of the coefficients
+ * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+ */
+template <typename _Scalar, int _AmbientDim, int _Options>
+class ParametrizedLine
+{
+public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
+ enum {
+ AmbientDimAtCompileTime = _AmbientDim,
+ Options = _Options
+ };
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef DenseIndex Index;
+ typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
+
+ /** Default constructor without initialization */
+ inline explicit ParametrizedLine() {}
+
+ template<int OtherOptions>
+ ParametrizedLine(const ParametrizedLine<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
+ : m_origin(other.origin()), m_direction(other.direction())
+ {}
+
+ /** Constructs a dynamic-size line with \a _dim the dimension
+ * of the ambient space */
+ inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {}
+
+ /** Initializes a parametrized line of direction \a direction and origin \a origin.
+ * \warning the vector direction is assumed to be normalized.
+ */
+ ParametrizedLine(const VectorType& origin, const VectorType& direction)
+ : m_origin(origin), m_direction(direction) {}
+
+ template <int OtherOptions>
+ explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane);
+
+ /** Constructs a parametrized line going from \a p0 to \a p1. */
+ static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1)
+ { return ParametrizedLine(p0, (p1-p0).normalized()); }
+
+ ~ParametrizedLine() {}
+
+ /** \returns the dimension in which the line holds */
+ inline Index dim() const { return m_direction.size(); }
+
+ const VectorType& origin() const { return m_origin; }
+ VectorType& origin() { return m_origin; }
+
+ const VectorType& direction() const { return m_direction; }
+ VectorType& direction() { return m_direction; }
+
+ /** \returns the squared distance of a point \a p to its projection onto the line \c *this.
+ * \sa distance()
+ */
+ RealScalar squaredDistance(const VectorType& p) const
+ {
+ VectorType diff = p - origin();
+ return (diff - direction().dot(diff) * direction()).squaredNorm();
+ }
+ /** \returns the distance of a point \a p to its projection onto the line \c *this.
+ * \sa squaredDistance()
+ */
+ RealScalar distance(const VectorType& p) const { return internal::sqrt(squaredDistance(p)); }
+
+ /** \returns the projection of a point \a p onto the line \c *this. */
+ VectorType projection(const VectorType& p) const
+ { return origin() + direction().dot(p-origin()) * direction(); }
+
+ template <int OtherOptions>
+ Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
+
+ /** \returns \c *this with scalar type casted to \a NewScalarType
+ *
+ * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+ * then this function smartly returns a const reference to \c *this.
+ */
+ template<typename NewScalarType>
+ inline typename internal::cast_return_type<ParametrizedLine,
+ ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const
+ {
+ return typename internal::cast_return_type<ParametrizedLine,
+ ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);
+ }
+
+ /** Copy constructor with scalar type conversion */
+ template<typename OtherScalarType,int OtherOptions>
+ inline explicit ParametrizedLine(const ParametrizedLine<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)
+ {
+ m_origin = other.origin().template cast<Scalar>();
+ m_direction = other.direction().template cast<Scalar>();
+ }
+
+ /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+ * determined by \a prec.
+ *
+ * \sa MatrixBase::isApprox() */
+ bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
+
+protected:
+
+ VectorType m_origin, m_direction;
+};
+
+/** Constructs a parametrized line from a 2D hyperplane
+ *
+ * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line
+ */
+template <typename _Scalar, int _AmbientDim, int _Options>
+template <int OtherOptions>
+inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
+ direction() = hyperplane.normal().unitOrthogonal();
+ origin() = -hyperplane.normal()*hyperplane.offset();
+}
+
+/** \returns the parameter value of the intersection between \c *this and the given hyperplane
+ */
+template <typename _Scalar, int _AmbientDim, int _Options>
+template <int OtherOptions>
+inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
+{
+ return -(hyperplane.offset()+hyperplane.normal().dot(origin()))
+ / hyperplane.normal().dot(direction());
+}
+
+#endif // EIGEN_PARAMETRIZEDLINE_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/Quaternion.h b/extern/Eigen3/Eigen/src/Geometry/Quaternion.h
new file mode 100644
index 00000000000..2662d60fed1
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/Quaternion.h
@@ -0,0 +1,751 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Mathieu Gautier <mathieu.gautier@cea.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_QUATERNION_H
+#define EIGEN_QUATERNION_H
+
+/***************************************************************************
+* Definition of QuaternionBase<Derived>
+* The implementation is at the end of the file
+***************************************************************************/
+
+namespace internal {
+template<typename Other,
+ int OtherRows=Other::RowsAtCompileTime,
+ int OtherCols=Other::ColsAtCompileTime>
+struct quaternionbase_assign_impl;
+}
+
+template<class Derived>
+class QuaternionBase : public RotationBase<Derived, 3>
+{
+ typedef RotationBase<Derived, 3> Base;
+public:
+ using Base::operator*;
+ using Base::derived;
+
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::traits<Derived>::Coefficients Coefficients;
+ enum {
+ Flags = Eigen::internal::traits<Derived>::Flags
+ };
+
+ // typedef typename Matrix<Scalar,4,1> Coefficients;
+ /** the type of a 3D vector */
+ typedef Matrix<Scalar,3,1> Vector3;
+ /** the equivalent rotation matrix type */
+ typedef Matrix<Scalar,3,3> Matrix3;
+ /** the equivalent angle-axis type */
+ typedef AngleAxis<Scalar> AngleAxisType;
+
+
+
+ /** \returns the \c x coefficient */
+ inline Scalar x() const { return this->derived().coeffs().coeff(0); }
+ /** \returns the \c y coefficient */
+ inline Scalar y() const { return this->derived().coeffs().coeff(1); }
+ /** \returns the \c z coefficient */
+ inline Scalar z() const { return this->derived().coeffs().coeff(2); }
+ /** \returns the \c w coefficient */
+ inline Scalar w() const { return this->derived().coeffs().coeff(3); }
+
+ /** \returns a reference to the \c x coefficient */
+ inline Scalar& x() { return this->derived().coeffs().coeffRef(0); }
+ /** \returns a reference to the \c y coefficient */
+ inline Scalar& y() { return this->derived().coeffs().coeffRef(1); }
+ /** \returns a reference to the \c z coefficient */
+ inline Scalar& z() { return this->derived().coeffs().coeffRef(2); }
+ /** \returns a reference to the \c w coefficient */
+ inline Scalar& w() { return this->derived().coeffs().coeffRef(3); }
+
+ /** \returns a read-only vector expression of the imaginary part (x,y,z) */
+ inline const VectorBlock<const Coefficients,3> vec() const { return coeffs().template head<3>(); }
+
+ /** \returns a vector expression of the imaginary part (x,y,z) */
+ inline VectorBlock<Coefficients,3> vec() { return coeffs().template head<3>(); }
+
+ /** \returns a read-only vector expression of the coefficients (x,y,z,w) */
+ inline const typename internal::traits<Derived>::Coefficients& coeffs() const { return derived().coeffs(); }
+
+ /** \returns a vector expression of the coefficients (x,y,z,w) */
+ inline typename internal::traits<Derived>::Coefficients& coeffs() { return derived().coeffs(); }
+
+ EIGEN_STRONG_INLINE QuaternionBase<Derived>& operator=(const QuaternionBase<Derived>& other);
+ template<class OtherDerived> EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase<OtherDerived>& other);
+
+// disabled this copy operator as it is giving very strange compilation errors when compiling
+// test_stdvector with GCC 4.4.2. This looks like a GCC bug though, so feel free to re-enable it if it's
+// useful; however notice that we already have the templated operator= above and e.g. in MatrixBase
+// we didn't have to add, in addition to templated operator=, such a non-templated copy operator.
+// Derived& operator=(const QuaternionBase& other)
+// { return operator=<Derived>(other); }
+
+ Derived& operator=(const AngleAxisType& aa);
+ template<class OtherDerived> Derived& operator=(const MatrixBase<OtherDerived>& m);
+
+ /** \returns a quaternion representing an identity rotation
+ * \sa MatrixBase::Identity()
+ */
+ inline static Quaternion<Scalar> Identity() { return Quaternion<Scalar>(1, 0, 0, 0); }
+
+ /** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
+ */
+ inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; }
+
+ /** \returns the squared norm of the quaternion's coefficients
+ * \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
+ */
+ inline Scalar squaredNorm() const { return coeffs().squaredNorm(); }
+
+ /** \returns the norm of the quaternion's coefficients
+ * \sa QuaternionBase::squaredNorm(), MatrixBase::norm()
+ */
+ inline Scalar norm() const { return coeffs().norm(); }
+
+ /** Normalizes the quaternion \c *this
+ * \sa normalized(), MatrixBase::normalize() */
+ inline void normalize() { coeffs().normalize(); }
+ /** \returns a normalized copy of \c *this
+ * \sa normalize(), MatrixBase::normalized() */
+ inline Quaternion<Scalar> normalized() const { return Quaternion<Scalar>(coeffs().normalized()); }
+
+ /** \returns the dot product of \c *this and \a other
+ * Geometrically speaking, the dot product of two unit quaternions
+ * corresponds to the cosine of half the angle between the two rotations.
+ * \sa angularDistance()
+ */
+ template<class OtherDerived> inline Scalar dot(const QuaternionBase<OtherDerived>& other) const { return coeffs().dot(other.coeffs()); }
+
+ template<class OtherDerived> Scalar angularDistance(const QuaternionBase<OtherDerived>& other) const;
+
+ /** \returns an equivalent 3x3 rotation matrix */
+ Matrix3 toRotationMatrix() const;
+
+ /** \returns the quaternion which transform \a a into \a b through a rotation */
+ template<typename Derived1, typename Derived2>
+ Derived& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
+
+ template<class OtherDerived> EIGEN_STRONG_INLINE Quaternion<Scalar> operator* (const QuaternionBase<OtherDerived>& q) const;
+ template<class OtherDerived> EIGEN_STRONG_INLINE Derived& operator*= (const QuaternionBase<OtherDerived>& q);
+
+ /** \returns the quaternion describing the inverse rotation */
+ Quaternion<Scalar> inverse() const;
+
+ /** \returns the conjugated quaternion */
+ Quaternion<Scalar> conjugate() const;
+
+ /** \returns an interpolation for a constant motion between \a other and \c *this
+ * \a t in [0;1]
+ * see http://en.wikipedia.org/wiki/Slerp
+ */
+ template<class OtherDerived> Quaternion<Scalar> slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const;
+
+ /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+ * determined by \a prec.
+ *
+ * \sa MatrixBase::isApprox() */
+ template<class OtherDerived>
+ bool isApprox(const QuaternionBase<OtherDerived>& other, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ { return coeffs().isApprox(other.coeffs(), prec); }
+
+ /** return the result vector of \a v through the rotation*/
+ EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const;
+
+ /** \returns \c *this with scalar type casted to \a NewScalarType
+ *
+ * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+ * then this function smartly returns a const reference to \c *this.
+ */
+ template<typename NewScalarType>
+ inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const
+ {
+ return typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type(
+ coeffs().template cast<NewScalarType>());
+ }
+
+#ifdef EIGEN_QUATERNIONBASE_PLUGIN
+# include EIGEN_QUATERNIONBASE_PLUGIN
+#endif
+};
+
+/***************************************************************************
+* Definition/implementation of Quaternion<Scalar>
+***************************************************************************/
+
+/** \geometry_module \ingroup Geometry_Module
+ *
+ * \class Quaternion
+ *
+ * \brief The quaternion class used to represent 3D orientations and rotations
+ *
+ * \param _Scalar the scalar type, i.e., the type of the coefficients
+ *
+ * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
+ * orientations and rotations of objects in three dimensions. Compared to other representations
+ * like Euler angles or 3x3 matrices, quatertions offer the following advantages:
+ * \li \b compact storage (4 scalars)
+ * \li \b efficient to compose (28 flops),
+ * \li \b stable spherical interpolation
+ *
+ * The following two typedefs are provided for convenience:
+ * \li \c Quaternionf for \c float
+ * \li \c Quaterniond for \c double
+ *
+ * \sa class AngleAxis, class Transform
+ */
+
+namespace internal {
+template<typename _Scalar,int _Options>
+struct traits<Quaternion<_Scalar,_Options> >
+{
+ typedef Quaternion<_Scalar,_Options> PlainObject;
+ typedef _Scalar Scalar;
+ typedef Matrix<_Scalar,4,1,_Options> Coefficients;
+ enum{
+ IsAligned = bool(EIGEN_ALIGN) && ((int(_Options)&Aligned)==Aligned),
+ Flags = IsAligned ? (AlignedBit | LvalueBit) : LvalueBit
+ };
+};
+}
+
+template<typename _Scalar, int _Options>
+class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >{
+ typedef QuaternionBase<Quaternion<_Scalar,_Options> > Base;
+public:
+ typedef _Scalar Scalar;
+
+ EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion)
+ using Base::operator*=;
+
+ typedef typename internal::traits<Quaternion<Scalar,_Options> >::Coefficients Coefficients;
+ typedef typename Base::AngleAxisType AngleAxisType;
+
+ /** Default constructor leaving the quaternion uninitialized. */
+ inline Quaternion() {}
+
+ /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from
+ * its four coefficients \a w, \a x, \a y and \a z.
+ *
+ * \warning Note the order of the arguments: the real \a w coefficient first,
+ * while internally the coefficients are stored in the following order:
+ * [\c x, \c y, \c z, \c w]
+ */
+ inline Quaternion(Scalar w, Scalar x, Scalar y, Scalar z) : m_coeffs(x, y, z, w){}
+
+ /** Constructs and initialize a quaternion from the array data */
+ inline Quaternion(const Scalar* data) : m_coeffs(data) {}
+
+ /** Copy constructor */
+ template<class Derived> EIGEN_STRONG_INLINE Quaternion(const QuaternionBase<Derived>& other) { this->Base::operator=(other); }
+
+ /** Constructs and initializes a quaternion from the angle-axis \a aa */
+ explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; }
+
+ /** Constructs and initializes a quaternion from either:
+ * - a rotation matrix expression,
+ * - a 4D vector expression representing quaternion coefficients.
+ */
+ template<typename Derived>
+ explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }
+
+ inline Coefficients& coeffs() { return m_coeffs;}
+ inline const Coefficients& coeffs() const { return m_coeffs;}
+
+protected:
+ Coefficients m_coeffs;
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ EIGEN_STRONG_INLINE static void _check_template_params()
+ {
+ EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options,
+ INVALID_MATRIX_TEMPLATE_PARAMETERS)
+ }
+#endif
+};
+
+/** \ingroup Geometry_Module
+ * single precision quaternion type */
+typedef Quaternion<float> Quaternionf;
+/** \ingroup Geometry_Module
+ * double precision quaternion type */
+typedef Quaternion<double> Quaterniond;
+
+/***************************************************************************
+* Specialization of Map<Quaternion<Scalar>>
+***************************************************************************/
+
+namespace internal {
+ template<typename _Scalar, int _Options>
+ struct traits<Map<Quaternion<_Scalar>, _Options> >:
+ traits<Quaternion<_Scalar, _Options> >
+ {
+ typedef _Scalar Scalar;
+ typedef Map<Matrix<_Scalar,4,1>, _Options> Coefficients;
+
+ typedef traits<Quaternion<_Scalar, _Options> > TraitsBase;
+ enum {
+ IsAligned = TraitsBase::IsAligned,
+
+ Flags = TraitsBase::Flags
+ };
+ };
+}
+
+namespace internal {
+ template<typename _Scalar, int _Options>
+ struct traits<Map<const Quaternion<_Scalar>, _Options> >:
+ traits<Quaternion<_Scalar> >
+ {
+ typedef _Scalar Scalar;
+ typedef Map<const Matrix<_Scalar,4,1>, _Options> Coefficients;
+
+ typedef traits<Quaternion<_Scalar, _Options> > TraitsBase;
+ enum {
+ IsAligned = TraitsBase::IsAligned,
+ Flags = TraitsBase::Flags & ~LvalueBit
+ };
+ };
+}
+
+/** \brief Quaternion expression mapping a constant memory buffer
+ *
+ * \param _Scalar the type of the Quaternion coefficients
+ * \param _Options see class Map
+ *
+ * This is a specialization of class Map for Quaternion. This class allows to view
+ * a 4 scalar memory buffer as an Eigen's Quaternion object.
+ *
+ * \sa class Map, class Quaternion, class QuaternionBase
+ */
+template<typename _Scalar, int _Options>
+class Map<const Quaternion<_Scalar>, _Options >
+ : public QuaternionBase<Map<const Quaternion<_Scalar>, _Options> >
+{
+ typedef QuaternionBase<Map<const Quaternion<_Scalar>, _Options> > Base;
+
+ public:
+ typedef _Scalar Scalar;
+ typedef typename internal::traits<Map>::Coefficients Coefficients;
+ EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
+ using Base::operator*=;
+
+ /** Constructs a Mapped Quaternion object from the pointer \a coeffs
+ *
+ * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
+ * \code *coeffs == {x, y, z, w} \endcode
+ *
+ * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
+ EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {}
+
+ inline const Coefficients& coeffs() const { return m_coeffs;}
+
+ protected:
+ const Coefficients m_coeffs;
+};
+
+/** \brief Expression of a quaternion from a memory buffer
+ *
+ * \param _Scalar the type of the Quaternion coefficients
+ * \param _Options see class Map
+ *
+ * This is a specialization of class Map for Quaternion. This class allows to view
+ * a 4 scalar memory buffer as an Eigen's Quaternion object.
+ *
+ * \sa class Map, class Quaternion, class QuaternionBase
+ */
+template<typename _Scalar, int _Options>
+class Map<Quaternion<_Scalar>, _Options >
+ : public QuaternionBase<Map<Quaternion<_Scalar>, _Options> >
+{
+ typedef QuaternionBase<Map<Quaternion<_Scalar>, _Options> > Base;
+
+ public:
+ typedef _Scalar Scalar;
+ typedef typename internal::traits<Map>::Coefficients Coefficients;
+ EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
+ using Base::operator*=;
+
+ /** Constructs a Mapped Quaternion object from the pointer \a coeffs
+ *
+ * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
+ * \code *coeffs == {x, y, z, w} \endcode
+ *
+ * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
+ EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {}
+
+ inline Coefficients& coeffs() { return m_coeffs; }
+ inline const Coefficients& coeffs() const { return m_coeffs; }
+
+ protected:
+ Coefficients m_coeffs;
+};
+
+/** \ingroup Geometry_Module
+ * Map an unaligned array of single precision scalar as a quaternion */
+typedef Map<Quaternion<float>, 0> QuaternionMapf;
+/** \ingroup Geometry_Module
+ * Map an unaligned array of double precision scalar as a quaternion */
+typedef Map<Quaternion<double>, 0> QuaternionMapd;
+/** \ingroup Geometry_Module
+ * Map a 16-bits aligned array of double precision scalars as a quaternion */
+typedef Map<Quaternion<float>, Aligned> QuaternionMapAlignedf;
+/** \ingroup Geometry_Module
+ * Map a 16-bits aligned array of double precision scalars as a quaternion */
+typedef Map<Quaternion<double>, Aligned> QuaternionMapAlignedd;
+
+/***************************************************************************
+* Implementation of QuaternionBase methods
+***************************************************************************/
+
+// Generic Quaternion * Quaternion product
+// This product can be specialized for a given architecture via the Arch template argument.
+namespace internal {
+template<int Arch, class Derived1, class Derived2, typename Scalar, int _Options> struct quat_product
+{
+ EIGEN_STRONG_INLINE static Quaternion<Scalar> run(const QuaternionBase<Derived1>& a, const QuaternionBase<Derived2>& b){
+ return Quaternion<Scalar>
+ (
+ a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),
+ a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),
+ a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),
+ a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()
+ );
+ }
+};
+}
+
+/** \returns the concatenation of two rotations as a quaternion-quaternion product */
+template <class Derived>
+template <class OtherDerived>
+EIGEN_STRONG_INLINE Quaternion<typename internal::traits<Derived>::Scalar>
+QuaternionBase<Derived>::operator* (const QuaternionBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ return internal::quat_product<Architecture::Target, Derived, OtherDerived,
+ typename internal::traits<Derived>::Scalar,
+ internal::traits<Derived>::IsAligned && internal::traits<OtherDerived>::IsAligned>::run(*this, other);
+}
+
+/** \sa operator*(Quaternion) */
+template <class Derived>
+template <class OtherDerived>
+EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const QuaternionBase<OtherDerived>& other)
+{
+ derived() = derived() * other.derived();
+ return derived();
+}
+
+/** Rotation of a vector by a quaternion.
+ * \remarks If the quaternion is used to rotate several points (>1)
+ * then it is much more efficient to first convert it to a 3x3 Matrix.
+ * Comparison of the operation cost for n transformations:
+ * - Quaternion2: 30n
+ * - Via a Matrix3: 24 + 15n
+ */
+template <class Derived>
+EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
+QuaternionBase<Derived>::_transformVector(Vector3 v) const
+{
+ // Note that this algorithm comes from the optimization by hand
+ // of the conversion to a Matrix followed by a Matrix/Vector product.
+ // It appears to be much faster than the common algorithm found
+ // in the litterature (30 versus 39 flops). It also requires two
+ // Vector3 as temporaries.
+ Vector3 uv = this->vec().cross(v);
+ uv += uv;
+ return v + this->w() * uv + this->vec().cross(uv);
+}
+
+template<class Derived>
+EIGEN_STRONG_INLINE QuaternionBase<Derived>& QuaternionBase<Derived>::operator=(const QuaternionBase<Derived>& other)
+{
+ coeffs() = other.coeffs();
+ return derived();
+}
+
+template<class Derived>
+template<class OtherDerived>
+EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const QuaternionBase<OtherDerived>& other)
+{
+ coeffs() = other.coeffs();
+ return derived();
+}
+
+/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this
+ */
+template<class Derived>
+EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa)
+{
+ Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
+ this->w() = internal::cos(ha);
+ this->vec() = internal::sin(ha) * aa.axis();
+ return derived();
+}
+
+/** Set \c *this from the expression \a xpr:
+ * - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion
+ * - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix
+ * and \a xpr is converted to a quaternion
+ */
+
+template<class Derived>
+template<class MatrixDerived>
+inline Derived& QuaternionBase<Derived>::operator=(const MatrixBase<MatrixDerived>& xpr)
+{
+ EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename MatrixDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ internal::quaternionbase_assign_impl<MatrixDerived>::run(*this, xpr.derived());
+ return derived();
+}
+
+/** Convert the quaternion to a 3x3 rotation matrix. The quaternion is required to
+ * be normalized, otherwise the result is undefined.
+ */
+template<class Derived>
+inline typename QuaternionBase<Derived>::Matrix3
+QuaternionBase<Derived>::toRotationMatrix(void) const
+{
+ // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!)
+ // if not inlined then the cost of the return by value is huge ~ +35%,
+ // however, not inlining this function is an order of magnitude slower, so
+ // it has to be inlined, and so the return by value is not an issue
+ Matrix3 res;
+
+ const Scalar tx = 2*this->x();
+ const Scalar ty = 2*this->y();
+ const Scalar tz = 2*this->z();
+ const Scalar twx = tx*this->w();
+ const Scalar twy = ty*this->w();
+ const Scalar twz = tz*this->w();
+ const Scalar txx = tx*this->x();
+ const Scalar txy = ty*this->x();
+ const Scalar txz = tz*this->x();
+ const Scalar tyy = ty*this->y();
+ const Scalar tyz = tz*this->y();
+ const Scalar tzz = tz*this->z();
+
+ res.coeffRef(0,0) = 1-(tyy+tzz);
+ res.coeffRef(0,1) = txy-twz;
+ res.coeffRef(0,2) = txz+twy;
+ res.coeffRef(1,0) = txy+twz;
+ res.coeffRef(1,1) = 1-(txx+tzz);
+ res.coeffRef(1,2) = tyz-twx;
+ res.coeffRef(2,0) = txz-twy;
+ res.coeffRef(2,1) = tyz+twx;
+ res.coeffRef(2,2) = 1-(txx+tyy);
+
+ return res;
+}
+
+/** Sets \c *this to be a quaternion representing a rotation between
+ * the two arbitrary vectors \a a and \a b. In other words, the built
+ * rotation represent a rotation sending the line of direction \a a
+ * to the line of direction \a b, both lines passing through the origin.
+ *
+ * \returns a reference to \c *this.
+ *
+ * Note that the two input vectors do \b not have to be normalized, and
+ * do not need to have the same norm.
+ */
+template<class Derived>
+template<typename Derived1, typename Derived2>
+inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
+{
+ using std::max;
+ Vector3 v0 = a.normalized();
+ Vector3 v1 = b.normalized();
+ Scalar c = v1.dot(v0);
+
+ // if dot == -1, vectors are nearly opposites
+ // => accuraletly compute the rotation axis by computing the
+ // intersection of the two planes. This is done by solving:
+ // x^T v0 = 0
+ // x^T v1 = 0
+ // under the constraint:
+ // ||x|| = 1
+ // which yields a singular value problem
+ if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
+ {
+ c = max<Scalar>(c,-1);
+ Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
+ JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
+ Vector3 axis = svd.matrixV().col(2);
+
+ Scalar w2 = (Scalar(1)+c)*Scalar(0.5);
+ this->w() = internal::sqrt(w2);
+ this->vec() = axis * internal::sqrt(Scalar(1) - w2);
+ return derived();
+ }
+ Vector3 axis = v0.cross(v1);
+ Scalar s = internal::sqrt((Scalar(1)+c)*Scalar(2));
+ Scalar invs = Scalar(1)/s;
+ this->vec() = axis * invs;
+ this->w() = s * Scalar(0.5);
+
+ return derived();
+}
+
+/** \returns the multiplicative inverse of \c *this
+ * Note that in most cases, i.e., if you simply want the opposite rotation,
+ * and/or the quaternion is normalized, then it is enough to use the conjugate.
+ *
+ * \sa QuaternionBase::conjugate()
+ */
+template <class Derived>
+inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Derived>::inverse() const
+{
+ // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ??
+ Scalar n2 = this->squaredNorm();
+ if (n2 > 0)
+ return Quaternion<Scalar>(conjugate().coeffs() / n2);
+ else
+ {
+ // return an invalid result to flag the error
+ return Quaternion<Scalar>(Coefficients::Zero());
+ }
+}
+
+/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse
+ * if the quaternion is normalized.
+ * The conjugate of a quaternion represents the opposite rotation.
+ *
+ * \sa Quaternion2::inverse()
+ */
+template <class Derived>
+inline Quaternion<typename internal::traits<Derived>::Scalar>
+QuaternionBase<Derived>::conjugate() const
+{
+ return Quaternion<Scalar>(this->w(),-this->x(),-this->y(),-this->z());
+}
+
+/** \returns the angle (in radian) between two rotations
+ * \sa dot()
+ */
+template <class Derived>
+template <class OtherDerived>
+inline typename internal::traits<Derived>::Scalar
+QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
+{
+ using std::acos;
+ double d = internal::abs(this->dot(other));
+ if (d>=1.0)
+ return Scalar(0);
+ return static_cast<Scalar>(2 * acos(d));
+}
+
+/** \returns the spherical linear interpolation between the two quaternions
+ * \c *this and \a other at the parameter \a t
+ */
+template <class Derived>
+template <class OtherDerived>
+Quaternion<typename internal::traits<Derived>::Scalar>
+QuaternionBase<Derived>::slerp(Scalar t, const QuaternionBase<OtherDerived>& other) const
+{
+ using std::acos;
+ static const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon();
+ Scalar d = this->dot(other);
+ Scalar absD = internal::abs(d);
+
+ Scalar scale0;
+ Scalar scale1;
+
+ if (absD>=one)
+ {
+ scale0 = Scalar(1) - t;
+ scale1 = t;
+ }
+ else
+ {
+ // theta is the angle between the 2 quaternions
+ Scalar theta = acos(absD);
+ Scalar sinTheta = internal::sin(theta);
+
+ scale0 = internal::sin( ( Scalar(1) - t ) * theta) / sinTheta;
+ scale1 = internal::sin( ( t * theta) ) / sinTheta;
+ if (d<0)
+ scale1 = -scale1;
+ }
+
+ return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
+}
+
+namespace internal {
+
+// set from a rotation matrix
+template<typename Other>
+struct quaternionbase_assign_impl<Other,3,3>
+{
+ typedef typename Other::Scalar Scalar;
+ typedef DenseIndex Index;
+ template<class Derived> inline static void run(QuaternionBase<Derived>& q, const Other& mat)
+ {
+ // This algorithm comes from "Quaternion Calculus and Fast Animation",
+ // Ken Shoemake, 1987 SIGGRAPH course notes
+ Scalar t = mat.trace();
+ if (t > Scalar(0))
+ {
+ t = sqrt(t + Scalar(1.0));
+ q.w() = Scalar(0.5)*t;
+ t = Scalar(0.5)/t;
+ q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t;
+ q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t;
+ q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t;
+ }
+ else
+ {
+ DenseIndex i = 0;
+ if (mat.coeff(1,1) > mat.coeff(0,0))
+ i = 1;
+ if (mat.coeff(2,2) > mat.coeff(i,i))
+ i = 2;
+ DenseIndex j = (i+1)%3;
+ DenseIndex k = (j+1)%3;
+
+ t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
+ q.coeffs().coeffRef(i) = Scalar(0.5) * t;
+ t = Scalar(0.5)/t;
+ q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t;
+ q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t;
+ q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t;
+ }
+ }
+};
+
+// set from a vector of coefficients assumed to be a quaternion
+template<typename Other>
+struct quaternionbase_assign_impl<Other,4,1>
+{
+ typedef typename Other::Scalar Scalar;
+ template<class Derived> inline static void run(QuaternionBase<Derived>& q, const Other& vec)
+ {
+ q.coeffs() = vec;
+ }
+};
+
+} // end namespace internal
+
+#endif // EIGEN_QUATERNION_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h b/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h
new file mode 100644
index 00000000000..e1214bd3ebb
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h
@@ -0,0 +1,165 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ROTATION2D_H
+#define EIGEN_ROTATION2D_H
+
+/** \geometry_module \ingroup Geometry_Module
+ *
+ * \class Rotation2D
+ *
+ * \brief Represents a rotation/orientation in a 2 dimensional space.
+ *
+ * \param _Scalar the scalar type, i.e., the type of the coefficients
+ *
+ * This class is equivalent to a single scalar representing a counter clock wise rotation
+ * as a single angle in radian. It provides some additional features such as the automatic
+ * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar
+ * interface to Quaternion in order to facilitate the writing of generic algorithms
+ * dealing with rotations.
+ *
+ * \sa class Quaternion, class Transform
+ */
+
+namespace internal {
+
+template<typename _Scalar> struct traits<Rotation2D<_Scalar> >
+{
+ typedef _Scalar Scalar;
+};
+} // end namespace internal
+
+template<typename _Scalar>
+class Rotation2D : public RotationBase<Rotation2D<_Scalar>,2>
+{
+ typedef RotationBase<Rotation2D<_Scalar>,2> Base;
+
+public:
+
+ using Base::operator*;
+
+ enum { Dim = 2 };
+ /** the scalar type of the coefficients */
+ typedef _Scalar Scalar;
+ typedef Matrix<Scalar,2,1> Vector2;
+ typedef Matrix<Scalar,2,2> Matrix2;
+
+protected:
+
+ Scalar m_angle;
+
+public:
+
+ /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
+ inline Rotation2D(Scalar a) : m_angle(a) {}
+
+ /** \returns the rotation angle */
+ inline Scalar angle() const { return m_angle; }
+
+ /** \returns a read-write reference to the rotation angle */
+ inline Scalar& angle() { return m_angle; }
+
+ /** \returns the inverse rotation */
+ inline Rotation2D inverse() const { return -m_angle; }
+
+ /** Concatenates two rotations */
+ inline Rotation2D operator*(const Rotation2D& other) const
+ { return m_angle + other.m_angle; }
+
+ /** Concatenates two rotations */
+ inline Rotation2D& operator*=(const Rotation2D& other)
+ { return m_angle += other.m_angle; return *this; }
+
+ /** Applies the rotation to a 2D vector */
+ Vector2 operator* (const Vector2& vec) const
+ { return toRotationMatrix() * vec; }
+
+ template<typename Derived>
+ Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
+ Matrix2 toRotationMatrix(void) const;
+
+ /** \returns the spherical interpolation between \c *this and \a other using
+ * parameter \a t. It is in fact equivalent to a linear interpolation.
+ */
+ inline Rotation2D slerp(Scalar t, const Rotation2D& other) const
+ { return m_angle * (1-t) + other.angle() * t; }
+
+ /** \returns \c *this with scalar type casted to \a NewScalarType
+ *
+ * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+ * then this function smartly returns a const reference to \c *this.
+ */
+ template<typename NewScalarType>
+ inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const
+ { return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }
+
+ /** Copy constructor with scalar type conversion */
+ template<typename OtherScalarType>
+ inline explicit Rotation2D(const Rotation2D<OtherScalarType>& other)
+ {
+ m_angle = Scalar(other.angle());
+ }
+
+ inline static Rotation2D Identity() { return Rotation2D(0); }
+
+ /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+ * determined by \a prec.
+ *
+ * \sa MatrixBase::isApprox() */
+ bool isApprox(const Rotation2D& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ { return internal::isApprox(m_angle,other.m_angle, prec); }
+};
+
+/** \ingroup Geometry_Module
+ * single precision 2D rotation type */
+typedef Rotation2D<float> Rotation2Df;
+/** \ingroup Geometry_Module
+ * double precision 2D rotation type */
+typedef Rotation2D<double> Rotation2Dd;
+
+/** Set \c *this from a 2x2 rotation matrix \a mat.
+ * In other words, this function extract the rotation angle
+ * from the rotation matrix.
+ */
+template<typename Scalar>
+template<typename Derived>
+Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
+{
+ EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
+ m_angle = internal::atan2(mat.coeff(1,0), mat.coeff(0,0));
+ return *this;
+}
+
+/** Constructs and \returns an equivalent 2x2 rotation matrix.
+ */
+template<typename Scalar>
+typename Rotation2D<Scalar>::Matrix2
+Rotation2D<Scalar>::toRotationMatrix(void) const
+{
+ Scalar sinA = internal::sin(m_angle);
+ Scalar cosA = internal::cos(m_angle);
+ return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
+}
+
+#endif // EIGEN_ROTATION2D_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/RotationBase.h b/extern/Eigen3/Eigen/src/Geometry/RotationBase.h
new file mode 100644
index 00000000000..1abf06bb640
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/RotationBase.h
@@ -0,0 +1,217 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ROTATIONBASE_H
+#define EIGEN_ROTATIONBASE_H
+
+// forward declaration
+namespace internal {
+template<typename RotationDerived, typename MatrixType, bool IsVector=MatrixType::IsVectorAtCompileTime>
+struct rotation_base_generic_product_selector;
+}
+
+/** \class RotationBase
+ *
+ * \brief Common base class for compact rotation representations
+ *
+ * \param Derived is the derived type, i.e., a rotation type
+ * \param _Dim the dimension of the space
+ */
+template<typename Derived, int _Dim>
+class RotationBase
+{
+ public:
+ enum { Dim = _Dim };
+ /** the scalar type of the coefficients */
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+
+ /** corresponding linear transformation matrix type */
+ typedef Matrix<Scalar,Dim,Dim> RotationMatrixType;
+ typedef Matrix<Scalar,Dim,1> VectorType;
+
+ public:
+ inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+ inline Derived& derived() { return *static_cast<Derived*>(this); }
+
+ /** \returns an equivalent rotation matrix */
+ inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); }
+
+ /** \returns an equivalent rotation matrix
+ * This function is added to be conform with the Transform class' naming scheme.
+ */
+ inline RotationMatrixType matrix() const { return derived().toRotationMatrix(); }
+
+ /** \returns the inverse rotation */
+ inline Derived inverse() const { return derived().inverse(); }
+
+ /** \returns the concatenation of the rotation \c *this with a translation \a t */
+ inline Transform<Scalar,Dim,Isometry> operator*(const Translation<Scalar,Dim>& t) const
+ { return Transform<Scalar,Dim,Isometry>(*this) * t; }
+
+ /** \returns the concatenation of the rotation \c *this with a uniform scaling \a s */
+ inline RotationMatrixType operator*(const UniformScaling<Scalar>& s) const
+ { return toRotationMatrix() * s.factor(); }
+
+ /** \returns the concatenation of the rotation \c *this with a generic expression \a e
+ * \a e can be:
+ * - a DimxDim linear transformation matrix
+ * - a DimxDim diagonal matrix (axis aligned scaling)
+ * - a vector of size Dim
+ */
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE typename internal::rotation_base_generic_product_selector<Derived,OtherDerived,OtherDerived::IsVectorAtCompileTime>::ReturnType
+ operator*(const EigenBase<OtherDerived>& e) const
+ { return internal::rotation_base_generic_product_selector<Derived,OtherDerived>::run(derived(), e.derived()); }
+
+ /** \returns the concatenation of a linear transformation \a l with the rotation \a r */
+ template<typename OtherDerived> friend
+ inline RotationMatrixType operator*(const EigenBase<OtherDerived>& l, const Derived& r)
+ { return l.derived() * r.toRotationMatrix(); }
+
+ /** \returns the concatenation of a scaling \a l with the rotation \a r */
+ friend inline Transform<Scalar,Dim,Affine> operator*(const DiagonalMatrix<Scalar,Dim>& l, const Derived& r)
+ {
+ Transform<Scalar,Dim,Affine> res(r);
+ res.linear().applyOnTheLeft(l);
+ return res;
+ }
+
+ /** \returns the concatenation of the rotation \c *this with a transformation \a t */
+ template<int Mode, int Options>
+ inline Transform<Scalar,Dim,Mode> operator*(const Transform<Scalar,Dim,Mode,Options>& t) const
+ { return toRotationMatrix() * t; }
+
+ template<typename OtherVectorType>
+ inline VectorType _transformVector(const OtherVectorType& v) const
+ { return toRotationMatrix() * v; }
+};
+
+namespace internal {
+
+// implementation of the generic product rotation * matrix
+template<typename RotationDerived, typename MatrixType>
+struct rotation_base_generic_product_selector<RotationDerived,MatrixType,false>
+{
+ enum { Dim = RotationDerived::Dim };
+ typedef Matrix<typename RotationDerived::Scalar,Dim,Dim> ReturnType;
+ inline static ReturnType run(const RotationDerived& r, const MatrixType& m)
+ { return r.toRotationMatrix() * m; }
+};
+
+template<typename RotationDerived, typename Scalar, int Dim, int MaxDim>
+struct rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix<Scalar,Dim,MaxDim>, false >
+{
+ typedef Transform<Scalar,Dim,Affine> ReturnType;
+ inline static ReturnType run(const RotationDerived& r, const DiagonalMatrix<Scalar,Dim,MaxDim>& m)
+ {
+ ReturnType res(r);
+ res.linear() *= m;
+ return res;
+ }
+};
+
+template<typename RotationDerived,typename OtherVectorType>
+struct rotation_base_generic_product_selector<RotationDerived,OtherVectorType,true>
+{
+ enum { Dim = RotationDerived::Dim };
+ typedef Matrix<typename RotationDerived::Scalar,Dim,1> ReturnType;
+ EIGEN_STRONG_INLINE static ReturnType run(const RotationDerived& r, const OtherVectorType& v)
+ {
+ return r._transformVector(v);
+ }
+};
+
+} // end namespace internal
+
+/** \geometry_module
+ *
+ * \brief Constructs a Dim x Dim rotation matrix from the rotation \a r
+ */
+template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
+template<typename OtherDerived>
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
+::Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
+{
+ EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
+ *this = r.toRotationMatrix();
+}
+
+/** \geometry_module
+ *
+ * \brief Set a Dim x Dim rotation matrix from the rotation \a r
+ */
+template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
+template<typename OtherDerived>
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>&
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
+::operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
+{
+ EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
+ return *this = r.toRotationMatrix();
+}
+
+namespace internal {
+
+/** \internal
+ *
+ * Helper function to return an arbitrary rotation object to a rotation matrix.
+ *
+ * \param Scalar the numeric type of the matrix coefficients
+ * \param Dim the dimension of the current space
+ *
+ * It returns a Dim x Dim fixed size matrix.
+ *
+ * Default specializations are provided for:
+ * - any scalar type (2D),
+ * - any matrix expression,
+ * - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D)
+ *
+ * Currently toRotationMatrix is only used by Transform.
+ *
+ * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis
+ */
+template<typename Scalar, int Dim>
+inline static Matrix<Scalar,2,2> toRotationMatrix(const Scalar& s)
+{
+ EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return Rotation2D<Scalar>(s).toRotationMatrix();
+}
+
+template<typename Scalar, int Dim, typename OtherDerived>
+inline static Matrix<Scalar,Dim,Dim> toRotationMatrix(const RotationBase<OtherDerived,Dim>& r)
+{
+ return r.toRotationMatrix();
+}
+
+template<typename Scalar, int Dim, typename OtherDerived>
+inline static const MatrixBase<OtherDerived>& toRotationMatrix(const MatrixBase<OtherDerived>& mat)
+{
+ EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim,
+ YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return mat;
+}
+
+} // end namespace internal
+
+#endif // EIGEN_ROTATIONBASE_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/Scaling.h b/extern/Eigen3/Eigen/src/Geometry/Scaling.h
new file mode 100644
index 00000000000..c911d13e1d3
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/Scaling.h
@@ -0,0 +1,182 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SCALING_H
+#define EIGEN_SCALING_H
+
+/** \geometry_module \ingroup Geometry_Module
+ *
+ * \class Scaling
+ *
+ * \brief Represents a generic uniform scaling transformation
+ *
+ * \param _Scalar the scalar type, i.e., the type of the coefficients.
+ *
+ * This class represent a uniform scaling transformation. It is the return
+ * type of Scaling(Scalar), and most of the time this is the only way it
+ * is used. In particular, this class is not aimed to be used to store a scaling transformation,
+ * but rather to make easier the constructions and updates of Transform objects.
+ *
+ * To represent an axis aligned scaling, use the DiagonalMatrix class.
+ *
+ * \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform
+ */
+template<typename _Scalar>
+class UniformScaling
+{
+public:
+ /** the scalar type of the coefficients */
+ typedef _Scalar Scalar;
+
+protected:
+
+ Scalar m_factor;
+
+public:
+
+ /** Default constructor without initialization. */
+ UniformScaling() {}
+ /** Constructs and initialize a uniform scaling transformation */
+ explicit inline UniformScaling(const Scalar& s) : m_factor(s) {}
+
+ inline const Scalar& factor() const { return m_factor; }
+ inline Scalar& factor() { return m_factor; }
+
+ /** Concatenates two uniform scaling */
+ inline UniformScaling operator* (const UniformScaling& other) const
+ { return UniformScaling(m_factor * other.factor()); }
+
+ /** Concatenates a uniform scaling and a translation */
+ template<int Dim>
+ inline Transform<Scalar,Dim,Affine> operator* (const Translation<Scalar,Dim>& t) const;
+
+ /** Concatenates a uniform scaling and an affine transformation */
+ template<int Dim, int Mode, int Options>
+ inline Transform<Scalar,Dim,Mode> operator* (const Transform<Scalar,Dim, Mode, Options>& t) const;
+
+ /** Concatenates a uniform scaling and a linear transformation matrix */
+ // TODO returns an expression
+ template<typename Derived>
+ inline typename internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
+ { return other * m_factor; }
+
+ template<typename Derived,int Dim>
+ inline Matrix<Scalar,Dim,Dim> operator*(const RotationBase<Derived,Dim>& r) const
+ { return r.toRotationMatrix() * m_factor; }
+
+ /** \returns the inverse scaling */
+ inline UniformScaling inverse() const
+ { return UniformScaling(Scalar(1)/m_factor); }
+
+ /** \returns \c *this with scalar type casted to \a NewScalarType
+ *
+ * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+ * then this function smartly returns a const reference to \c *this.
+ */
+ template<typename NewScalarType>
+ inline UniformScaling<NewScalarType> cast() const
+ { return UniformScaling<NewScalarType>(NewScalarType(m_factor)); }
+
+ /** Copy constructor with scalar type conversion */
+ template<typename OtherScalarType>
+ inline explicit UniformScaling(const UniformScaling<OtherScalarType>& other)
+ { m_factor = Scalar(other.factor()); }
+
+ /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+ * determined by \a prec.
+ *
+ * \sa MatrixBase::isApprox() */
+ bool isApprox(const UniformScaling& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ { return internal::isApprox(m_factor, other.factor(), prec); }
+
+};
+
+/** Concatenates a linear transformation matrix and a uniform scaling */
+// NOTE this operator is defiend in MatrixBase and not as a friend function
+// of UniformScaling to fix an internal crash of Intel's ICC
+template<typename Derived> typename MatrixBase<Derived>::ScalarMultipleReturnType
+MatrixBase<Derived>::operator*(const UniformScaling<Scalar>& s) const
+{ return derived() * s.factor(); }
+
+/** Constructs a uniform scaling from scale factor \a s */
+static inline UniformScaling<float> Scaling(float s) { return UniformScaling<float>(s); }
+/** Constructs a uniform scaling from scale factor \a s */
+static inline UniformScaling<double> Scaling(double s) { return UniformScaling<double>(s); }
+/** Constructs a uniform scaling from scale factor \a s */
+template<typename RealScalar>
+static inline UniformScaling<std::complex<RealScalar> > Scaling(const std::complex<RealScalar>& s)
+{ return UniformScaling<std::complex<RealScalar> >(s); }
+
+/** Constructs a 2D axis aligned scaling */
+template<typename Scalar>
+static inline DiagonalMatrix<Scalar,2> Scaling(Scalar sx, Scalar sy)
+{ return DiagonalMatrix<Scalar,2>(sx, sy); }
+/** Constructs a 3D axis aligned scaling */
+template<typename Scalar>
+static inline DiagonalMatrix<Scalar,3> Scaling(Scalar sx, Scalar sy, Scalar sz)
+{ return DiagonalMatrix<Scalar,3>(sx, sy, sz); }
+
+/** Constructs an axis aligned scaling expression from vector expression \a coeffs
+ * This is an alias for coeffs.asDiagonal()
+ */
+template<typename Derived>
+static inline const DiagonalWrapper<const Derived> Scaling(const MatrixBase<Derived>& coeffs)
+{ return coeffs.asDiagonal(); }
+
+/** \addtogroup Geometry_Module */
+//@{
+/** \deprecated */
+typedef DiagonalMatrix<float, 2> AlignedScaling2f;
+/** \deprecated */
+typedef DiagonalMatrix<double,2> AlignedScaling2d;
+/** \deprecated */
+typedef DiagonalMatrix<float, 3> AlignedScaling3f;
+/** \deprecated */
+typedef DiagonalMatrix<double,3> AlignedScaling3d;
+//@}
+
+template<typename Scalar>
+template<int Dim>
+inline Transform<Scalar,Dim,Affine>
+UniformScaling<Scalar>::operator* (const Translation<Scalar,Dim>& t) const
+{
+ Transform<Scalar,Dim,Affine> res;
+ res.matrix().setZero();
+ res.linear().diagonal().fill(factor());
+ res.translation() = factor() * t.vector();
+ res(Dim,Dim) = Scalar(1);
+ return res;
+}
+
+template<typename Scalar>
+template<int Dim,int Mode,int Options>
+inline Transform<Scalar,Dim,Mode>
+UniformScaling<Scalar>::operator* (const Transform<Scalar,Dim, Mode, Options>& t) const
+{
+ Transform<Scalar,Dim,Mode> res = t;
+ res.prescale(factor());
+ return res;
+}
+
+#endif // EIGEN_SCALING_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/Transform.h b/extern/Eigen3/Eigen/src/Geometry/Transform.h
new file mode 100644
index 00000000000..19d012572d4
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/Transform.h
@@ -0,0 +1,1396 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRANSFORM_H
+#define EIGEN_TRANSFORM_H
+
+namespace internal {
+
+template<typename Transform>
+struct transform_traits
+{
+ enum
+ {
+ Dim = Transform::Dim,
+ HDim = Transform::HDim,
+ Mode = Transform::Mode,
+ IsProjective = (Mode==Projective)
+ };
+};
+
+template< typename TransformType,
+ typename MatrixType,
+ int Case = transform_traits<TransformType>::IsProjective ? 0
+ : int(MatrixType::RowsAtCompileTime) == int(transform_traits<TransformType>::HDim) ? 1
+ : 2>
+struct transform_right_product_impl;
+
+template< typename Other,
+ int Mode,
+ int Options,
+ int Dim,
+ int HDim,
+ int OtherRows=Other::RowsAtCompileTime,
+ int OtherCols=Other::ColsAtCompileTime>
+struct transform_left_product_impl;
+
+template< typename Lhs,
+ typename Rhs,
+ bool AnyProjective =
+ transform_traits<Lhs>::IsProjective ||
+ transform_traits<Lhs>::IsProjective>
+struct transform_transform_product_impl;
+
+template< typename Other,
+ int Mode,
+ int Options,
+ int Dim,
+ int HDim,
+ int OtherRows=Other::RowsAtCompileTime,
+ int OtherCols=Other::ColsAtCompileTime>
+struct transform_construct_from_matrix;
+
+template<typename TransformType> struct transform_take_affine_part;
+
+} // end namespace internal
+
+/** \geometry_module \ingroup Geometry_Module
+ *
+ * \class Transform
+ *
+ * \brief Represents an homogeneous transformation in a N dimensional space
+ *
+ * \tparam _Scalar the scalar type, i.e., the type of the coefficients
+ * \tparam _Dim the dimension of the space
+ * \tparam _Mode the type of the transformation. Can be:
+ * - #Affine: the transformation is stored as a (Dim+1)^2 matrix,
+ * where the last row is assumed to be [0 ... 0 1].
+ * - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix.
+ * - #Projective: the transformation is stored as a (Dim+1)^2 matrix
+ * without any assumption.
+ * \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor.
+ * These Options are passed directly to the underlying matrix type.
+ *
+ * The homography is internally represented and stored by a matrix which
+ * is available through the matrix() method. To understand the behavior of
+ * this class you have to think a Transform object as its internal
+ * matrix representation. The chosen convention is right multiply:
+ *
+ * \code v' = T * v \endcode
+ *
+ * Therefore, an affine transformation matrix M is shaped like this:
+ *
+ * \f$ \left( \begin{array}{cc}
+ * linear & translation\\
+ * 0 ... 0 & 1
+ * \end{array} \right) \f$
+ *
+ * Note that for a projective transformation the last row can be anything,
+ * and then the interpretation of different parts might be sightly different.
+ *
+ * However, unlike a plain matrix, the Transform class provides many features
+ * simplifying both its assembly and usage. In particular, it can be composed
+ * with any other transformations (Transform,Translation,RotationBase,Matrix)
+ * and can be directly used to transform implicit homogeneous vectors. All these
+ * operations are handled via the operator*. For the composition of transformations,
+ * its principle consists to first convert the right/left hand sides of the product
+ * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.
+ * Of course, internally, operator* tries to perform the minimal number of operations
+ * according to the nature of each terms. Likewise, when applying the transform
+ * to non homogeneous vectors, the latters are automatically promoted to homogeneous
+ * one before doing the matrix product. The convertions to homogeneous representations
+ * are performed as follow:
+ *
+ * \b Translation t (Dim)x(1):
+ * \f$ \left( \begin{array}{cc}
+ * I & t \\
+ * 0\,...\,0 & 1
+ * \end{array} \right) \f$
+ *
+ * \b Rotation R (Dim)x(Dim):
+ * \f$ \left( \begin{array}{cc}
+ * R & 0\\
+ * 0\,...\,0 & 1
+ * \end{array} \right) \f$
+ *
+ * \b Linear \b Matrix L (Dim)x(Dim):
+ * \f$ \left( \begin{array}{cc}
+ * L & 0\\
+ * 0\,...\,0 & 1
+ * \end{array} \right) \f$
+ *
+ * \b Affine \b Matrix A (Dim)x(Dim+1):
+ * \f$ \left( \begin{array}{c}
+ * A\\
+ * 0\,...\,0\,1
+ * \end{array} \right) \f$
+ *
+ * \b Column \b vector v (Dim)x(1):
+ * \f$ \left( \begin{array}{c}
+ * v\\
+ * 1
+ * \end{array} \right) \f$
+ *
+ * \b Set \b of \b column \b vectors V1...Vn (Dim)x(n):
+ * \f$ \left( \begin{array}{ccc}
+ * v_1 & ... & v_n\\
+ * 1 & ... & 1
+ * \end{array} \right) \f$
+ *
+ * The concatenation of a Transform object with any kind of other transformation
+ * always returns a Transform object.
+ *
+ * A little exception to the "as pure matrix product" rule is the case of the
+ * transformation of non homogeneous vectors by an affine transformation. In
+ * that case the last matrix row can be ignored, and the product returns non
+ * homogeneous vectors.
+ *
+ * Since, for instance, a Dim x Dim matrix is interpreted as a linear transformation,
+ * it is not possible to directly transform Dim vectors stored in a Dim x Dim matrix.
+ * The solution is either to use a Dim x Dynamic matrix or explicitly request a
+ * vector transformation by making the vector homogeneous:
+ * \code
+ * m' = T * m.colwise().homogeneous();
+ * \endcode
+ * Note that there is zero overhead.
+ *
+ * Conversion methods from/to Qt's QMatrix and QTransform are available if the
+ * preprocessor token EIGEN_QT_SUPPORT is defined.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TRANSFORM_PLUGIN.
+ *
+ * \sa class Matrix, class Quaternion
+ */
+template<typename _Scalar, int _Dim, int _Mode, int _Options>
+class Transform
+{
+public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))
+ enum {
+ Mode = _Mode,
+ Options = _Options,
+ Dim = _Dim, ///< space dimension in which the transformation holds
+ HDim = _Dim+1, ///< size of a respective homogeneous vector
+ Rows = int(Mode)==(AffineCompact) ? Dim : HDim
+ };
+ /** the scalar type of the coefficients */
+ typedef _Scalar Scalar;
+ typedef DenseIndex Index;
+ /** type of the matrix used to represent the transformation */
+ typedef typename internal::make_proper_matrix_type<Scalar,Rows,HDim,Options>::type MatrixType;
+ /** constified MatrixType */
+ typedef const MatrixType ConstMatrixType;
+ /** type of the matrix used to represent the linear part of the transformation */
+ typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;
+ /** type of read/write reference to the linear part of the transformation */
+ typedef Block<MatrixType,Dim,Dim> LinearPart;
+ /** type of read reference to the linear part of the transformation */
+ typedef const Block<ConstMatrixType,Dim,Dim> ConstLinearPart;
+ /** type of read/write reference to the affine part of the transformation */
+ typedef typename internal::conditional<int(Mode)==int(AffineCompact),
+ MatrixType&,
+ Block<MatrixType,Dim,HDim> >::type AffinePart;
+ /** type of read reference to the affine part of the transformation */
+ typedef typename internal::conditional<int(Mode)==int(AffineCompact),
+ const MatrixType&,
+ const Block<const MatrixType,Dim,HDim> >::type ConstAffinePart;
+ /** type of a vector */
+ typedef Matrix<Scalar,Dim,1> VectorType;
+ /** type of a read/write reference to the translation part of the rotation */
+ typedef Block<MatrixType,Dim,1> TranslationPart;
+ /** type of a read reference to the translation part of the rotation */
+ typedef const Block<ConstMatrixType,Dim,1> ConstTranslationPart;
+ /** corresponding translation type */
+ typedef Translation<Scalar,Dim> TranslationType;
+
+ // this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0
+ enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) };
+ /** The return type of the product between a diagonal matrix and a transform */
+ typedef Transform<Scalar,Dim,TransformTimeDiagonalMode> TransformTimeDiagonalReturnType;
+
+protected:
+
+ MatrixType m_matrix;
+
+public:
+
+ /** Default constructor without initialization of the meaningful coefficients.
+ * If Mode==Affine, then the last row is set to [0 ... 0 1] */
+ inline Transform()
+ {
+ check_template_params();
+ if (int(Mode)==Affine)
+ makeAffine();
+ }
+
+ inline Transform(const Transform& other)
+ {
+ check_template_params();
+ m_matrix = other.m_matrix;
+ }
+
+ inline explicit Transform(const TranslationType& t)
+ {
+ check_template_params();
+ *this = t;
+ }
+ inline explicit Transform(const UniformScaling<Scalar>& s)
+ {
+ check_template_params();
+ *this = s;
+ }
+ template<typename Derived>
+ inline explicit Transform(const RotationBase<Derived, Dim>& r)
+ {
+ check_template_params();
+ *this = r;
+ }
+
+ inline Transform& operator=(const Transform& other)
+ { m_matrix = other.m_matrix; return *this; }
+
+ typedef internal::transform_take_affine_part<Transform> take_affine_part;
+
+ /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */
+ template<typename OtherDerived>
+ inline explicit Transform(const EigenBase<OtherDerived>& other)
+ {
+ check_template_params();
+ internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
+ }
+
+ /** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */
+ template<typename OtherDerived>
+ inline Transform& operator=(const EigenBase<OtherDerived>& other)
+ {
+ internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
+ return *this;
+ }
+
+ template<int OtherOptions>
+ inline Transform(const Transform<Scalar,Dim,Mode,OtherOptions>& other)
+ {
+ check_template_params();
+ // only the options change, we can directly copy the matrices
+ m_matrix = other.matrix();
+ }
+
+ template<int OtherMode,int OtherOptions>
+ inline Transform(const Transform<Scalar,Dim,OtherMode,OtherOptions>& other)
+ {
+ check_template_params();
+ // prevent conversions as:
+ // Affine | AffineCompact | Isometry = Projective
+ EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)),
+ YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
+
+ // prevent conversions as:
+ // Isometry = Affine | AffineCompact
+ EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)),
+ YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
+
+ enum { ModeIsAffineCompact = Mode == int(AffineCompact),
+ OtherModeIsAffineCompact = OtherMode == int(AffineCompact)
+ };
+
+ if(ModeIsAffineCompact == OtherModeIsAffineCompact)
+ {
+ // We need the block expression because the code is compiled for all
+ // combinations of transformations and will trigger a compile time error
+ // if one tries to assign the matrices directly
+ m_matrix.template block<Dim,Dim+1>(0,0) = other.matrix().template block<Dim,Dim+1>(0,0);
+ makeAffine();
+ }
+ else if(OtherModeIsAffineCompact)
+ {
+ typedef typename Transform<Scalar,Dim,OtherMode,OtherOptions>::MatrixType OtherMatrixType;
+ internal::transform_construct_from_matrix<OtherMatrixType,Mode,Options,Dim,HDim>::run(this, other.matrix());
+ }
+ else
+ {
+ // here we know that Mode == AffineCompact and OtherMode != AffineCompact.
+ // if OtherMode were Projective, the static assert above would already have caught it.
+ // So the only possibility is that OtherMode == Affine
+ linear() = other.linear();
+ translation() = other.translation();
+ }
+ }
+
+ template<typename OtherDerived>
+ Transform(const ReturnByValue<OtherDerived>& other)
+ {
+ check_template_params();
+ other.evalTo(*this);
+ }
+
+ template<typename OtherDerived>
+ Transform& operator=(const ReturnByValue<OtherDerived>& other)
+ {
+ other.evalTo(*this);
+ return *this;
+ }
+
+ #ifdef EIGEN_QT_SUPPORT
+ inline Transform(const QMatrix& other);
+ inline Transform& operator=(const QMatrix& other);
+ inline QMatrix toQMatrix(void) const;
+ inline Transform(const QTransform& other);
+ inline Transform& operator=(const QTransform& other);
+ inline QTransform toQTransform(void) const;
+ #endif
+
+ /** shortcut for m_matrix(row,col);
+ * \sa MatrixBase::operator(Index,Index) const */
+ inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); }
+ /** shortcut for m_matrix(row,col);
+ * \sa MatrixBase::operator(Index,Index) */
+ inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); }
+
+ /** \returns a read-only expression of the transformation matrix */
+ inline const MatrixType& matrix() const { return m_matrix; }
+ /** \returns a writable expression of the transformation matrix */
+ inline MatrixType& matrix() { return m_matrix; }
+
+ /** \returns a read-only expression of the linear part of the transformation */
+ inline ConstLinearPart linear() const { return m_matrix.template block<Dim,Dim>(0,0); }
+ /** \returns a writable expression of the linear part of the transformation */
+ inline LinearPart linear() { return m_matrix.template block<Dim,Dim>(0,0); }
+
+ /** \returns a read-only expression of the Dim x HDim affine part of the transformation */
+ inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); }
+ /** \returns a writable expression of the Dim x HDim affine part of the transformation */
+ inline AffinePart affine() { return take_affine_part::run(m_matrix); }
+
+ /** \returns a read-only expression of the translation vector of the transformation */
+ inline ConstTranslationPart translation() const { return m_matrix.template block<Dim,1>(0,Dim); }
+ /** \returns a writable expression of the translation vector of the transformation */
+ inline TranslationPart translation() { return m_matrix.template block<Dim,1>(0,Dim); }
+
+ /** \returns an expression of the product between the transform \c *this and a matrix expression \a other
+ *
+ * The right hand side \a other might be either:
+ * \li a vector of size Dim,
+ * \li an homogeneous vector of size Dim+1,
+ * \li a set of vectors of size Dim x Dynamic,
+ * \li a set of homogeneous vectors of size Dim+1 x Dynamic,
+ * \li a linear transformation matrix of size Dim x Dim,
+ * \li an affine transformation matrix of size Dim x Dim+1,
+ * \li a transformation matrix of size Dim+1 x Dim+1.
+ */
+ // note: this function is defined here because some compilers cannot find the respective declaration
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl<Transform, OtherDerived>::ResultType
+ operator * (const EigenBase<OtherDerived> &other) const
+ { return internal::transform_right_product_impl<Transform, OtherDerived>::run(*this,other.derived()); }
+
+ /** \returns the product expression of a transformation matrix \a a times a transform \a b
+ *
+ * The left hand side \a other might be either:
+ * \li a linear transformation matrix of size Dim x Dim,
+ * \li an affine transformation matrix of size Dim x Dim+1,
+ * \li a general transformation matrix of size Dim+1 x Dim+1.
+ */
+ template<typename OtherDerived> friend
+ inline const typename internal::transform_left_product_impl<OtherDerived,Mode,Options,_Dim,_Dim+1>::ResultType
+ operator * (const EigenBase<OtherDerived> &a, const Transform &b)
+ { return internal::transform_left_product_impl<OtherDerived,Mode,Options,Dim,HDim>::run(a.derived(),b); }
+
+ /** \returns The product expression of a transform \a a times a diagonal matrix \a b
+ *
+ * The rhs diagonal matrix is interpreted as an affine scaling transformation. The
+ * product results in a Transform of the same type (mode) as the lhs only if the lhs
+ * mode is no isometry. In that case, the returned transform is an affinity.
+ */
+ template<typename DiagonalDerived>
+ inline const TransformTimeDiagonalReturnType
+ operator * (const DiagonalBase<DiagonalDerived> &b) const
+ {
+ TransformTimeDiagonalReturnType res(*this);
+ res.linear() *= b;
+ return res;
+ }
+
+ /** \returns The product expression of a diagonal matrix \a a times a transform \a b
+ *
+ * The lhs diagonal matrix is interpreted as an affine scaling transformation. The
+ * product results in a Transform of the same type (mode) as the lhs only if the lhs
+ * mode is no isometry. In that case, the returned transform is an affinity.
+ */
+ template<typename DiagonalDerived>
+ friend inline TransformTimeDiagonalReturnType
+ operator * (const DiagonalBase<DiagonalDerived> &a, const Transform &b)
+ {
+ TransformTimeDiagonalReturnType res;
+ res.linear().noalias() = a*b.linear();
+ res.translation().noalias() = a*b.translation();
+ if (Mode!=int(AffineCompact))
+ res.matrix().row(Dim) = b.matrix().row(Dim);
+ return res;
+ }
+
+ template<typename OtherDerived>
+ inline Transform& operator*=(const EigenBase<OtherDerived>& other) { return *this = *this * other; }
+
+ /** Concatenates two transformations */
+ inline const Transform operator * (const Transform& other) const
+ {
+ return internal::transform_transform_product_impl<Transform,Transform>::run(*this,other);
+ }
+
+ /** Concatenates two different transformations */
+ template<int OtherMode,int OtherOptions>
+ inline const typename internal::transform_transform_product_impl<
+ Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::ResultType
+ operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const
+ {
+ return internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::run(*this,other);
+ }
+
+ /** \sa MatrixBase::setIdentity() */
+ void setIdentity() { m_matrix.setIdentity(); }
+
+ /**
+ * \brief Returns an identity transformation.
+ * \todo In the future this function should be returning a Transform expression.
+ */
+ static const Transform Identity()
+ {
+ return Transform(MatrixType::Identity());
+ }
+
+ template<typename OtherDerived>
+ inline Transform& scale(const MatrixBase<OtherDerived> &other);
+
+ template<typename OtherDerived>
+ inline Transform& prescale(const MatrixBase<OtherDerived> &other);
+
+ inline Transform& scale(Scalar s);
+ inline Transform& prescale(Scalar s);
+
+ template<typename OtherDerived>
+ inline Transform& translate(const MatrixBase<OtherDerived> &other);
+
+ template<typename OtherDerived>
+ inline Transform& pretranslate(const MatrixBase<OtherDerived> &other);
+
+ template<typename RotationType>
+ inline Transform& rotate(const RotationType& rotation);
+
+ template<typename RotationType>
+ inline Transform& prerotate(const RotationType& rotation);
+
+ Transform& shear(Scalar sx, Scalar sy);
+ Transform& preshear(Scalar sx, Scalar sy);
+
+ inline Transform& operator=(const TranslationType& t);
+ inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
+ inline Transform operator*(const TranslationType& t) const;
+
+ inline Transform& operator=(const UniformScaling<Scalar>& t);
+ inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
+ inline Transform operator*(const UniformScaling<Scalar>& s) const;
+
+ inline Transform& operator*=(const DiagonalMatrix<Scalar,Dim>& s) { linear() *= s; return *this; }
+
+ template<typename Derived>
+ inline Transform& operator=(const RotationBase<Derived,Dim>& r);
+ template<typename Derived>
+ inline Transform& operator*=(const RotationBase<Derived,Dim>& r) { return rotate(r.toRotationMatrix()); }
+ template<typename Derived>
+ inline Transform operator*(const RotationBase<Derived,Dim>& r) const;
+
+ const LinearMatrixType rotation() const;
+ template<typename RotationMatrixType, typename ScalingMatrixType>
+ void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;
+ template<typename ScalingMatrixType, typename RotationMatrixType>
+ void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const;
+
+ template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
+ Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
+ const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale);
+
+ inline Transform inverse(TransformTraits traits = (TransformTraits)Mode) const;
+
+ /** \returns a const pointer to the column major internal matrix */
+ const Scalar* data() const { return m_matrix.data(); }
+ /** \returns a non-const pointer to the column major internal matrix */
+ Scalar* data() { return m_matrix.data(); }
+
+ /** \returns \c *this with scalar type casted to \a NewScalarType
+ *
+ * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+ * then this function smartly returns a const reference to \c *this.
+ */
+ template<typename NewScalarType>
+ inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type cast() const
+ { return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type(*this); }
+
+ /** Copy constructor with scalar type conversion */
+ template<typename OtherScalarType>
+ inline explicit Transform(const Transform<OtherScalarType,Dim,Mode,Options>& other)
+ {
+ check_template_params();
+ m_matrix = other.matrix().template cast<Scalar>();
+ }
+
+ /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+ * determined by \a prec.
+ *
+ * \sa MatrixBase::isApprox() */
+ bool isApprox(const Transform& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ { return m_matrix.isApprox(other.m_matrix, prec); }
+
+ /** Sets the last row to [0 ... 0 1]
+ */
+ void makeAffine()
+ {
+ if(int(Mode)!=int(AffineCompact))
+ {
+ matrix().template block<1,Dim>(Dim,0).setZero();
+ matrix().coeffRef(Dim,Dim) = 1;
+ }
+ }
+
+ /** \internal
+ * \returns the Dim x Dim linear part if the transformation is affine,
+ * and the HDim x Dim part for projective transformations.
+ */
+ inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt()
+ { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }
+ /** \internal
+ * \returns the Dim x Dim linear part if the transformation is affine,
+ * and the HDim x Dim part for projective transformations.
+ */
+ inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt() const
+ { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }
+
+ /** \internal
+ * \returns the translation part if the transformation is affine,
+ * and the last column for projective transformations.
+ */
+ inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt()
+ { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }
+ /** \internal
+ * \returns the translation part if the transformation is affine,
+ * and the last column for projective transformations.
+ */
+ inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt() const
+ { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }
+
+
+ #ifdef EIGEN_TRANSFORM_PLUGIN
+ #include EIGEN_TRANSFORM_PLUGIN
+ #endif
+
+protected:
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ EIGEN_STRONG_INLINE static void check_template_params()
+ {
+ EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
+ }
+ #endif
+
+};
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,Isometry> Isometry2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,Isometry> Isometry3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,Isometry> Isometry2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,Isometry> Isometry3d;
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,Affine> Affine2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,Affine> Affine3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,Affine> Affine2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,Affine> Affine3d;
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,AffineCompact> AffineCompact2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,AffineCompact> AffineCompact3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,AffineCompact> AffineCompact2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,AffineCompact> AffineCompact3d;
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,Projective> Projective2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,Projective> Projective3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,Projective> Projective2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,Projective> Projective3d;
+
+/**************************
+*** Optional QT support ***
+**************************/
+
+#ifdef EIGEN_QT_SUPPORT
+/** Initializes \c *this from a QMatrix assuming the dimension is 2.
+ *
+ * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+ */
+template<typename Scalar, int Dim, int Mode,int Options>
+Transform<Scalar,Dim,Mode,Options>::Transform(const QMatrix& other)
+{
+ check_template_params();
+ *this = other;
+}
+
+/** Set \c *this from a QMatrix assuming the dimension is 2.
+ *
+ * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+ */
+template<typename Scalar, int Dim, int Mode,int Options>
+Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QMatrix& other)
+{
+ EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ m_matrix << other.m11(), other.m21(), other.dx(),
+ other.m12(), other.m22(), other.dy(),
+ 0, 0, 1;
+ return *this;
+}
+
+/** \returns a QMatrix from \c *this assuming the dimension is 2.
+ *
+ * \warning this conversion might loss data if \c *this is not affine
+ *
+ * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+QMatrix Transform<Scalar,Dim,Mode,Options>::toQMatrix(void) const
+{
+ check_template_params();
+ EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
+ m_matrix.coeff(0,1), m_matrix.coeff(1,1),
+ m_matrix.coeff(0,2), m_matrix.coeff(1,2));
+}
+
+/** Initializes \c *this from a QTransform assuming the dimension is 2.
+ *
+ * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+ */
+template<typename Scalar, int Dim, int Mode,int Options>
+Transform<Scalar,Dim,Mode,Options>::Transform(const QTransform& other)
+{
+ check_template_params();
+ *this = other;
+}
+
+/** Set \c *this from a QTransform assuming the dimension is 2.
+ *
+ * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QTransform& other)
+{
+ check_template_params();
+ EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ if (Mode == int(AffineCompact))
+ m_matrix << other.m11(), other.m21(), other.dx(),
+ other.m12(), other.m22(), other.dy();
+ else
+ m_matrix << other.m11(), other.m21(), other.dx(),
+ other.m12(), other.m22(), other.dy(),
+ other.m13(), other.m23(), other.m33();
+ return *this;
+}
+
+/** \returns a QTransform from \c *this assuming the dimension is 2.
+ *
+ * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+QTransform Transform<Scalar,Dim,Mode,Options>::toQTransform(void) const
+{
+ EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ if (Mode == int(AffineCompact))
+ return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
+ m_matrix.coeff(0,1), m_matrix.coeff(1,1),
+ m_matrix.coeff(0,2), m_matrix.coeff(1,2));
+ else
+ return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0),
+ m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1),
+ m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2));
+}
+#endif
+
+/*********************
+*** Procedural API ***
+*********************/
+
+/** Applies on the right the non uniform scale transformation represented
+ * by the vector \a other to \c *this and returns a reference to \c *this.
+ * \sa prescale()
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::scale(const MatrixBase<OtherDerived> &other)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+ EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+ linearExt().noalias() = (linearExt() * other.asDiagonal());
+ return *this;
+}
+
+/** Applies on the right a uniform scale of a factor \a c to \c *this
+ * and returns a reference to \c *this.
+ * \sa prescale(Scalar)
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(Scalar s)
+{
+ EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+ linearExt() *= s;
+ return *this;
+}
+
+/** Applies on the left the non uniform scale transformation represented
+ * by the vector \a other to \c *this and returns a reference to \c *this.
+ * \sa scale()
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::prescale(const MatrixBase<OtherDerived> &other)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+ EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+ m_matrix.template block<Dim,HDim>(0,0).noalias() = (other.asDiagonal() * m_matrix.template block<Dim,HDim>(0,0));
+ return *this;
+}
+
+/** Applies on the left a uniform scale of a factor \a c to \c *this
+ * and returns a reference to \c *this.
+ * \sa scale(Scalar)
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(Scalar s)
+{
+ EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+ m_matrix.template topRows<Dim>() *= s;
+ return *this;
+}
+
+/** Applies on the right the translation matrix represented by the vector \a other
+ * to \c *this and returns a reference to \c *this.
+ * \sa pretranslate()
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::translate(const MatrixBase<OtherDerived> &other)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+ translationExt() += linearExt() * other;
+ return *this;
+}
+
+/** Applies on the left the translation matrix represented by the vector \a other
+ * to \c *this and returns a reference to \c *this.
+ * \sa translate()
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::pretranslate(const MatrixBase<OtherDerived> &other)
+{
+ EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+ if(int(Mode)==int(Projective))
+ affine() += other * m_matrix.row(Dim);
+ else
+ translation() += other;
+ return *this;
+}
+
+/** Applies on the right the rotation represented by the rotation \a rotation
+ * to \c *this and returns a reference to \c *this.
+ *
+ * The template parameter \a RotationType is the type of the rotation which
+ * must be known by internal::toRotationMatrix<>.
+ *
+ * Natively supported types includes:
+ * - any scalar (2D),
+ * - a Dim x Dim matrix expression,
+ * - a Quaternion (3D),
+ * - a AngleAxis (3D)
+ *
+ * This mechanism is easily extendable to support user types such as Euler angles,
+ * or a pair of Quaternion for 4D rotations.
+ *
+ * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType)
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename RotationType>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::rotate(const RotationType& rotation)
+{
+ linearExt() *= internal::toRotationMatrix<Scalar,Dim>(rotation);
+ return *this;
+}
+
+/** Applies on the left the rotation represented by the rotation \a rotation
+ * to \c *this and returns a reference to \c *this.
+ *
+ * See rotate() for further details.
+ *
+ * \sa rotate()
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename RotationType>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::prerotate(const RotationType& rotation)
+{
+ m_matrix.template block<Dim,HDim>(0,0) = internal::toRotationMatrix<Scalar,Dim>(rotation)
+ * m_matrix.template block<Dim,HDim>(0,0);
+ return *this;
+}
+
+/** Applies on the right the shear transformation represented
+ * by the vector \a other to \c *this and returns a reference to \c *this.
+ * \warning 2D only.
+ * \sa preshear()
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::shear(Scalar sx, Scalar sy)
+{
+ EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+ VectorType tmp = linear().col(0)*sy + linear().col(1);
+ linear() << linear().col(0) + linear().col(1)*sx, tmp;
+ return *this;
+}
+
+/** Applies on the left the shear transformation represented
+ * by the vector \a other to \c *this and returns a reference to \c *this.
+ * \warning 2D only.
+ * \sa shear()
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::preshear(Scalar sx, Scalar sy)
+{
+ EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+ EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+ m_matrix.template block<Dim,HDim>(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0);
+ return *this;
+}
+
+/******************************************************
+*** Scaling, Translation and Rotation compatibility ***
+******************************************************/
+
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const TranslationType& t)
+{
+ linear().setIdentity();
+ translation() = t.vector();
+ makeAffine();
+ return *this;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const TranslationType& t) const
+{
+ Transform res = *this;
+ res.translate(t.vector());
+ return res;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const UniformScaling<Scalar>& s)
+{
+ m_matrix.setZero();
+ linear().diagonal().fill(s.factor());
+ makeAffine();
+ return *this;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const UniformScaling<Scalar>& s) const
+{
+ Transform res = *this;
+ res.scale(s.factor());
+ return res;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename Derived>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const RotationBase<Derived,Dim>& r)
+{
+ linear() = internal::toRotationMatrix<Scalar,Dim>(r);
+ translation().setZero();
+ makeAffine();
+ return *this;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename Derived>
+inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const RotationBase<Derived,Dim>& r) const
+{
+ Transform res = *this;
+ res.rotate(r.derived());
+ return res;
+}
+
+/************************
+*** Special functions ***
+************************/
+
+/** \returns the rotation part of the transformation
+ *
+ *
+ * \svd_module
+ *
+ * \sa computeRotationScaling(), computeScalingRotation(), class SVD
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+const typename Transform<Scalar,Dim,Mode,Options>::LinearMatrixType
+Transform<Scalar,Dim,Mode,Options>::rotation() const
+{
+ LinearMatrixType result;
+ computeRotationScaling(&result, (LinearMatrixType*)0);
+ return result;
+}
+
+
+/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
+ * not necessarily positive.
+ *
+ * If either pointer is zero, the corresponding computation is skipped.
+ *
+ *
+ *
+ * \svd_module
+ *
+ * \sa computeScalingRotation(), rotation(), class SVD
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename RotationMatrixType, typename ScalingMatrixType>
+void Transform<Scalar,Dim,Mode,Options>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
+{
+ JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
+
+ Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+ VectorType sv(svd.singularValues());
+ sv.coeffRef(0) *= x;
+ if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint());
+ if(rotation)
+ {
+ LinearMatrixType m(svd.matrixU());
+ m.col(0) /= x;
+ rotation->lazyAssign(m * svd.matrixV().adjoint());
+ }
+}
+
+/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
+ * not necessarily positive.
+ *
+ * If either pointer is zero, the corresponding computation is skipped.
+ *
+ *
+ *
+ * \svd_module
+ *
+ * \sa computeRotationScaling(), rotation(), class SVD
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename ScalingMatrixType, typename RotationMatrixType>
+void Transform<Scalar,Dim,Mode,Options>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
+{
+ JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
+
+ Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+ VectorType sv(svd.singularValues());
+ sv.coeffRef(0) *= x;
+ if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint());
+ if(rotation)
+ {
+ LinearMatrixType m(svd.matrixU());
+ m.col(0) /= x;
+ rotation->lazyAssign(m * svd.matrixV().adjoint());
+ }
+}
+
+/** Convenient method to set \c *this from a position, orientation and scale
+ * of a 3D object.
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
+ const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale)
+{
+ linear() = internal::toRotationMatrix<Scalar,Dim>(orientation);
+ linear() *= scale.asDiagonal();
+ translation() = position;
+ makeAffine();
+ return *this;
+}
+
+namespace internal {
+
+// selector needed to avoid taking the inverse of a 3x4 matrix
+template<typename TransformType, int Mode=TransformType::Mode>
+struct projective_transform_inverse
+{
+ static inline void run(const TransformType&, TransformType&)
+ {}
+};
+
+template<typename TransformType>
+struct projective_transform_inverse<TransformType, Projective>
+{
+ static inline void run(const TransformType& m, TransformType& res)
+ {
+ res.matrix() = m.matrix().inverse();
+ }
+};
+
+} // end namespace internal
+
+
+/**
+ *
+ * \returns the inverse transformation according to some given knowledge
+ * on \c *this.
+ *
+ * \param hint allows to optimize the inversion process when the transformation
+ * is known to be not a general transformation (optional). The possible values are:
+ * - #Projective if the transformation is not necessarily affine, i.e., if the
+ * last row is not guaranteed to be [0 ... 0 1]
+ * - #Affine if the last row can be assumed to be [0 ... 0 1]
+ * - #Isometry if the transformation is only a concatenations of translations
+ * and rotations.
+ * The default is the template class parameter \c Mode.
+ *
+ * \warning unless \a traits is always set to NoShear or NoScaling, this function
+ * requires the generic inverse method of MatrixBase defined in the LU module. If
+ * you forget to include this module, then you will get hard to debug linking errors.
+ *
+ * \sa MatrixBase::inverse()
+ */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>
+Transform<Scalar,Dim,Mode,Options>::inverse(TransformTraits hint) const
+{
+ Transform res;
+ if (hint == Projective)
+ {
+ internal::projective_transform_inverse<Transform>::run(*this, res);
+ }
+ else
+ {
+ if (hint == Isometry)
+ {
+ res.matrix().template topLeftCorner<Dim,Dim>() = linear().transpose();
+ }
+ else if(hint&Affine)
+ {
+ res.matrix().template topLeftCorner<Dim,Dim>() = linear().inverse();
+ }
+ else
+ {
+ eigen_assert(false && "Invalid transform traits in Transform::Inverse");
+ }
+ // translation and remaining parts
+ res.matrix().template topRightCorner<Dim,1>()
+ = - res.matrix().template topLeftCorner<Dim,Dim>() * translation();
+ res.makeAffine(); // we do need this, because in the beginning res is uninitialized
+ }
+ return res;
+}
+
+namespace internal {
+
+/*****************************************************
+*** Specializations of take affine part ***
+*****************************************************/
+
+template<typename TransformType> struct transform_take_affine_part {
+ typedef typename TransformType::MatrixType MatrixType;
+ typedef typename TransformType::AffinePart AffinePart;
+ typedef typename TransformType::ConstAffinePart ConstAffinePart;
+ static inline AffinePart run(MatrixType& m)
+ { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }
+ static inline ConstAffinePart run(const MatrixType& m)
+ { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }
+};
+
+template<typename Scalar, int Dim, int Options>
+struct transform_take_affine_part<Transform<Scalar,Dim,AffineCompact, Options> > {
+ typedef typename Transform<Scalar,Dim,AffineCompact,Options>::MatrixType MatrixType;
+ static inline MatrixType& run(MatrixType& m) { return m; }
+ static inline const MatrixType& run(const MatrixType& m) { return m; }
+};
+
+/*****************************************************
+*** Specializations of construct from matrix ***
+*****************************************************/
+
+template<typename Other, int Mode, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,Dim>
+{
+ static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
+ {
+ transform->linear() = other;
+ transform->translation().setZero();
+ transform->makeAffine();
+ }
+};
+
+template<typename Other, int Mode, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,HDim>
+{
+ static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
+ {
+ transform->affine() = other;
+ transform->makeAffine();
+ }
+};
+
+template<typename Other, int Mode, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, HDim,HDim>
+{
+ static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
+ { transform->matrix() = other; }
+};
+
+template<typename Other, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, AffineCompact,Options,Dim,HDim, HDim,HDim>
+{
+ static inline void run(Transform<typename Other::Scalar,Dim,AffineCompact,Options> *transform, const Other& other)
+ { transform->matrix() = other.template block<Dim,HDim>(0,0); }
+};
+
+/**********************************************************
+*** Specializations of operator* with rhs EigenBase ***
+**********************************************************/
+
+template<int LhsMode,int RhsMode>
+struct transform_product_result
+{
+ enum
+ {
+ Mode =
+ (LhsMode == (int)Projective || RhsMode == (int)Projective ) ? Projective :
+ (LhsMode == (int)Affine || RhsMode == (int)Affine ) ? Affine :
+ (LhsMode == (int)AffineCompact || RhsMode == (int)AffineCompact ) ? AffineCompact :
+ (LhsMode == (int)Isometry || RhsMode == (int)Isometry ) ? Isometry : Projective
+ };
+};
+
+template< typename TransformType, typename MatrixType >
+struct transform_right_product_impl< TransformType, MatrixType, 0 >
+{
+ typedef typename MatrixType::PlainObject ResultType;
+
+ EIGEN_STRONG_INLINE static ResultType run(const TransformType& T, const MatrixType& other)
+ {
+ return T.matrix() * other;
+ }
+};
+
+template< typename TransformType, typename MatrixType >
+struct transform_right_product_impl< TransformType, MatrixType, 1 >
+{
+ enum {
+ Dim = TransformType::Dim,
+ HDim = TransformType::HDim,
+ OtherRows = MatrixType::RowsAtCompileTime,
+ OtherCols = MatrixType::ColsAtCompileTime
+ };
+
+ typedef typename MatrixType::PlainObject ResultType;
+
+ EIGEN_STRONG_INLINE static ResultType run(const TransformType& T, const MatrixType& other)
+ {
+ EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
+
+ typedef Block<ResultType, Dim, OtherCols> TopLeftLhs;
+
+ ResultType res(other.rows(),other.cols());
+ TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other;
+ res.row(OtherRows-1) = other.row(OtherRows-1);
+
+ return res;
+ }
+};
+
+template< typename TransformType, typename MatrixType >
+struct transform_right_product_impl< TransformType, MatrixType, 2 >
+{
+ enum {
+ Dim = TransformType::Dim,
+ HDim = TransformType::HDim,
+ OtherRows = MatrixType::RowsAtCompileTime,
+ OtherCols = MatrixType::ColsAtCompileTime
+ };
+
+ typedef typename MatrixType::PlainObject ResultType;
+
+ EIGEN_STRONG_INLINE static ResultType run(const TransformType& T, const MatrixType& other)
+ {
+ EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
+
+ typedef Block<ResultType, Dim, OtherCols> TopLeftLhs;
+
+ ResultType res(other.rows(),other.cols());
+ TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.linear() * other;
+ TopLeftLhs(res, 0, 0, Dim, other.cols()).colwise() += T.translation();
+
+ return res;
+ }
+};
+
+/**********************************************************
+*** Specializations of operator* with lhs EigenBase ***
+**********************************************************/
+
+// generic HDim x HDim matrix * T => Projective
+template<typename Other,int Mode, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, HDim,HDim>
+{
+ typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
+ typedef typename TransformType::MatrixType MatrixType;
+ typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;
+ static ResultType run(const Other& other,const TransformType& tr)
+ { return ResultType(other * tr.matrix()); }
+};
+
+// generic HDim x HDim matrix * AffineCompact => Projective
+template<typename Other, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, HDim,HDim>
+{
+ typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;
+ typedef typename TransformType::MatrixType MatrixType;
+ typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;
+ static ResultType run(const Other& other,const TransformType& tr)
+ {
+ ResultType res;
+ res.matrix().noalias() = other.template block<HDim,Dim>(0,0) * tr.matrix();
+ res.matrix().col(Dim) += other.col(Dim);
+ return res;
+ }
+};
+
+// affine matrix * T
+template<typename Other,int Mode, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,HDim>
+{
+ typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
+ typedef typename TransformType::MatrixType MatrixType;
+ typedef TransformType ResultType;
+ static ResultType run(const Other& other,const TransformType& tr)
+ {
+ ResultType res;
+ res.affine().noalias() = other * tr.matrix();
+ res.matrix().row(Dim) = tr.matrix().row(Dim);
+ return res;
+ }
+};
+
+// affine matrix * AffineCompact
+template<typename Other, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, Dim,HDim>
+{
+ typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;
+ typedef typename TransformType::MatrixType MatrixType;
+ typedef TransformType ResultType;
+ static ResultType run(const Other& other,const TransformType& tr)
+ {
+ ResultType res;
+ res.matrix().noalias() = other.template block<Dim,Dim>(0,0) * tr.matrix();
+ res.translation() += other.col(Dim);
+ return res;
+ }
+};
+
+// linear matrix * T
+template<typename Other,int Mode, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,Dim>
+{
+ typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
+ typedef typename TransformType::MatrixType MatrixType;
+ typedef TransformType ResultType;
+ static ResultType run(const Other& other, const TransformType& tr)
+ {
+ TransformType res;
+ if(Mode!=int(AffineCompact))
+ res.matrix().row(Dim) = tr.matrix().row(Dim);
+ res.matrix().template topRows<Dim>().noalias()
+ = other * tr.matrix().template topRows<Dim>();
+ return res;
+ }
+};
+
+/**********************************************************
+*** Specializations of operator* with another Transform ***
+**********************************************************/
+
+template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>
+struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,false >
+{
+ enum { ResultMode = transform_product_result<LhsMode,RhsMode>::Mode };
+ typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;
+ typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;
+ typedef Transform<Scalar,Dim,ResultMode,LhsOptions> ResultType;
+ static ResultType run(const Lhs& lhs, const Rhs& rhs)
+ {
+ ResultType res;
+ res.linear() = lhs.linear() * rhs.linear();
+ res.translation() = lhs.linear() * rhs.translation() + lhs.translation();
+ res.makeAffine();
+ return res;
+ }
+};
+
+template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>
+struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,true >
+{
+ typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;
+ typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;
+ typedef Transform<Scalar,Dim,Projective> ResultType;
+ static ResultType run(const Lhs& lhs, const Rhs& rhs)
+ {
+ return ResultType( lhs.matrix() * rhs.matrix() );
+ }
+};
+
+} // end namespace internal
+
+#endif // EIGEN_TRANSFORM_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/Translation.h b/extern/Eigen3/Eigen/src/Geometry/Translation.h
new file mode 100644
index 00000000000..d8fe50f987e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/Translation.h
@@ -0,0 +1,215 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_TRANSLATION_H
+#define EIGEN_TRANSLATION_H
+
+/** \geometry_module \ingroup Geometry_Module
+ *
+ * \class Translation
+ *
+ * \brief Represents a translation transformation
+ *
+ * \param _Scalar the scalar type, i.e., the type of the coefficients.
+ * \param _Dim the dimension of the space, can be a compile time value or Dynamic
+ *
+ * \note This class is not aimed to be used to store a translation transformation,
+ * but rather to make easier the constructions and updates of Transform objects.
+ *
+ * \sa class Scaling, class Transform
+ */
+template<typename _Scalar, int _Dim>
+class Translation
+{
+public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim)
+ /** dimension of the space */
+ enum { Dim = _Dim };
+ /** the scalar type of the coefficients */
+ typedef _Scalar Scalar;
+ /** corresponding vector type */
+ typedef Matrix<Scalar,Dim,1> VectorType;
+ /** corresponding linear transformation matrix type */
+ typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
+ /** corresponding affine transformation type */
+ typedef Transform<Scalar,Dim,Affine> AffineTransformType;
+
+protected:
+
+ VectorType m_coeffs;
+
+public:
+
+ /** Default constructor without initialization. */
+ Translation() {}
+ /** */
+ inline Translation(const Scalar& sx, const Scalar& sy)
+ {
+ eigen_assert(Dim==2);
+ m_coeffs.x() = sx;
+ m_coeffs.y() = sy;
+ }
+ /** */
+ inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz)
+ {
+ eigen_assert(Dim==3);
+ m_coeffs.x() = sx;
+ m_coeffs.y() = sy;
+ m_coeffs.z() = sz;
+ }
+ /** Constructs and initialize the translation transformation from a vector of translation coefficients */
+ explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {}
+
+ /** \brief Retruns the x-translation by value. **/
+ inline Scalar x() const { return m_coeffs.x(); }
+ /** \brief Retruns the y-translation by value. **/
+ inline Scalar y() const { return m_coeffs.y(); }
+ /** \brief Retruns the z-translation by value. **/
+ inline Scalar z() const { return m_coeffs.z(); }
+
+ /** \brief Retruns the x-translation as a reference. **/
+ inline Scalar& x() { return m_coeffs.x(); }
+ /** \brief Retruns the y-translation as a reference. **/
+ inline Scalar& y() { return m_coeffs.y(); }
+ /** \brief Retruns the z-translation as a reference. **/
+ inline Scalar& z() { return m_coeffs.z(); }
+
+ const VectorType& vector() const { return m_coeffs; }
+ VectorType& vector() { return m_coeffs; }
+
+ const VectorType& translation() const { return m_coeffs; }
+ VectorType& translation() { return m_coeffs; }
+
+ /** Concatenates two translation */
+ inline Translation operator* (const Translation& other) const
+ { return Translation(m_coeffs + other.m_coeffs); }
+
+ /** Concatenates a translation and a uniform scaling */
+ inline AffineTransformType operator* (const UniformScaling<Scalar>& other) const;
+
+ /** Concatenates a translation and a linear transformation */
+ template<typename OtherDerived>
+ inline AffineTransformType operator* (const EigenBase<OtherDerived>& linear) const;
+
+ /** Concatenates a translation and a rotation */
+ template<typename Derived>
+ inline AffineTransformType operator*(const RotationBase<Derived,Dim>& r) const
+ { return *this * r.toRotationMatrix(); }
+
+ /** \returns the concatenation of a linear transformation \a l with the translation \a t */
+ // its a nightmare to define a templated friend function outside its declaration
+ template<typename OtherDerived> friend
+ inline AffineTransformType operator*(const EigenBase<OtherDerived>& linear, const Translation& t)
+ {
+ AffineTransformType res;
+ res.matrix().setZero();
+ res.linear() = linear.derived();
+ res.translation() = linear.derived() * t.m_coeffs;
+ res.matrix().row(Dim).setZero();
+ res(Dim,Dim) = Scalar(1);
+ return res;
+ }
+
+ /** Concatenates a translation and a transformation */
+ template<int Mode, int Options>
+ inline Transform<Scalar,Dim,Mode> operator* (const Transform<Scalar,Dim,Mode,Options>& t) const
+ {
+ Transform<Scalar,Dim,Mode> res = t;
+ res.pretranslate(m_coeffs);
+ return res;
+ }
+
+ /** Applies translation to vector */
+ inline VectorType operator* (const VectorType& other) const
+ { return m_coeffs + other; }
+
+ /** \returns the inverse translation (opposite) */
+ Translation inverse() const { return Translation(-m_coeffs); }
+
+ Translation& operator=(const Translation& other)
+ {
+ m_coeffs = other.m_coeffs;
+ return *this;
+ }
+
+ static const Translation Identity() { return Translation(VectorType::Zero()); }
+
+ /** \returns \c *this with scalar type casted to \a NewScalarType
+ *
+ * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+ * then this function smartly returns a const reference to \c *this.
+ */
+ template<typename NewScalarType>
+ inline typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type cast() const
+ { return typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type(*this); }
+
+ /** Copy constructor with scalar type conversion */
+ template<typename OtherScalarType>
+ inline explicit Translation(const Translation<OtherScalarType,Dim>& other)
+ { m_coeffs = other.vector().template cast<Scalar>(); }
+
+ /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+ * determined by \a prec.
+ *
+ * \sa MatrixBase::isApprox() */
+ bool isApprox(const Translation& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+ { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+};
+
+/** \addtogroup Geometry_Module */
+//@{
+typedef Translation<float, 2> Translation2f;
+typedef Translation<double,2> Translation2d;
+typedef Translation<float, 3> Translation3f;
+typedef Translation<double,3> Translation3d;
+//@}
+
+template<typename Scalar, int Dim>
+inline typename Translation<Scalar,Dim>::AffineTransformType
+Translation<Scalar,Dim>::operator* (const UniformScaling<Scalar>& other) const
+{
+ AffineTransformType res;
+ res.matrix().setZero();
+ res.linear().diagonal().fill(other.factor());
+ res.translation() = m_coeffs;
+ res(Dim,Dim) = Scalar(1);
+ return res;
+}
+
+template<typename Scalar, int Dim>
+template<typename OtherDerived>
+inline typename Translation<Scalar,Dim>::AffineTransformType
+Translation<Scalar,Dim>::operator* (const EigenBase<OtherDerived>& linear) const
+{
+ AffineTransformType res;
+ res.matrix().setZero();
+ res.linear() = linear.derived();
+ res.translation() = m_coeffs;
+ res.matrix().row(Dim).setZero();
+ res(Dim,Dim) = Scalar(1);
+ return res;
+}
+
+#endif // EIGEN_TRANSLATION_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/Umeyama.h b/extern/Eigen3/Eigen/src/Geometry/Umeyama.h
new file mode 100644
index 00000000000..b50f461730e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/Umeyama.h
@@ -0,0 +1,183 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_UMEYAMA_H
+#define EIGEN_UMEYAMA_H
+
+// This file requires the user to include
+// * Eigen/Core
+// * Eigen/LU
+// * Eigen/SVD
+// * Eigen/Array
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+// These helpers are required since it allows to use mixed types as parameters
+// for the Umeyama. The problem with mixed parameters is that the return type
+// cannot trivially be deduced when float and double types are mixed.
+namespace internal {
+
+// Compile time return type deduction for different MatrixBase types.
+// Different means here different alignment and parameters but the same underlying
+// real scalar type.
+template<typename MatrixType, typename OtherMatrixType>
+struct umeyama_transform_matrix_type
+{
+ enum {
+ MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime),
+
+ // When possible we want to choose some small fixed size value since the result
+ // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want.
+ HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1
+ };
+
+ typedef Matrix<typename traits<MatrixType>::Scalar,
+ HomogeneousDimension,
+ HomogeneousDimension,
+ AutoAlign | (traits<MatrixType>::Flags & RowMajorBit ? RowMajor : ColMajor),
+ HomogeneousDimension,
+ HomogeneousDimension
+ > type;
+};
+
+}
+
+#endif
+
+/**
+* \geometry_module \ingroup Geometry_Module
+*
+* \brief Returns the transformation between two point sets.
+*
+* The algorithm is based on:
+* "Least-squares estimation of transformation parameters between two point patterns",
+* Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573
+*
+* It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that
+* \f{align*}
+* \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2
+* \f}
+* is minimized.
+*
+* The algorithm is based on the analysis of the covariance matrix
+* \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$
+* of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where
+* \f$d\f$ is corresponding to the dimension (which is typically small).
+* The analysis is involving the SVD having a complexity of \f$O(d^3)\f$
+* though the actual computational effort lies in the covariance
+* matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when
+* the input point sets have dimension \f$d \times m\f$.
+*
+* Currently the method is working only for floating point matrices.
+*
+* \todo Should the return type of umeyama() become a Transform?
+*
+* \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$.
+* \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$.
+* \param with_scaling Sets \f$ c=1 \f$ when <code>false</code> is passed.
+* \return The homogeneous transformation
+* \f{align*}
+* T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix}
+* \f}
+* minimizing the resudiual above. This transformation is always returned as an
+* Eigen::Matrix.
+*/
+template <typename Derived, typename OtherDerived>
+typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type
+umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, bool with_scaling = true)
+{
+ typedef typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType;
+ typedef typename internal::traits<TransformationMatrixType>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename Derived::Index Index;
+
+ EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) };
+
+ typedef Matrix<Scalar, Dimension, 1> VectorType;
+ typedef Matrix<Scalar, Dimension, Dimension> MatrixType;
+ typedef typename internal::plain_matrix_type_row_major<Derived>::type RowMajorMatrixType;
+
+ const Index m = src.rows(); // dimension
+ const Index n = src.cols(); // number of measurements
+
+ // required for demeaning ...
+ const RealScalar one_over_n = 1 / static_cast<RealScalar>(n);
+
+ // computation of mean
+ const VectorType src_mean = src.rowwise().sum() * one_over_n;
+ const VectorType dst_mean = dst.rowwise().sum() * one_over_n;
+
+ // demeaning of src and dst points
+ const RowMajorMatrixType src_demean = src.colwise() - src_mean;
+ const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean;
+
+ // Eq. (36)-(37)
+ const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n;
+
+ // Eq. (38)
+ const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose();
+
+ JacobiSVD<MatrixType> svd(sigma, ComputeFullU | ComputeFullV);
+
+ // Initialize the resulting transformation with an identity matrix...
+ TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1);
+
+ // Eq. (39)
+ VectorType S = VectorType::Ones(m);
+ if (sigma.determinant()<0) S(m-1) = -1;
+
+ // Eq. (40) and (43)
+ const VectorType& d = svd.singularValues();
+ Index rank = 0; for (Index i=0; i<m; ++i) if (!internal::isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
+ if (rank == m-1) {
+ if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) {
+ Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose();
+ } else {
+ const Scalar s = S(m-1); S(m-1) = -1;
+ Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
+ S(m-1) = s;
+ }
+ } else {
+ Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
+ }
+
+ // Eq. (42)
+ const Scalar c = 1/src_var * svd.singularValues().dot(S);
+
+ // Eq. (41)
+ // Note that we first assign dst_mean to the destination so that there no need
+ // for a temporary.
+ Rt.col(m).head(m) = dst_mean;
+ Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean;
+
+ if (with_scaling) Rt.block(0,0,m,m) *= c;
+
+ return Rt;
+}
+
+#endif // EIGEN_UMEYAMA_H
diff --git a/extern/Eigen3/Eigen/src/Geometry/arch/Geometry_SSE.h b/extern/Eigen3/Eigen/src/Geometry/arch/Geometry_SSE.h
new file mode 100644
index 00000000000..cbe695c7259
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Geometry/arch/Geometry_SSE.h
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Rohit Garg <rpg.314@gmail.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_GEOMETRY_SSE_H
+#define EIGEN_GEOMETRY_SSE_H
+
+namespace internal {
+
+template<class Derived, class OtherDerived>
+struct quat_product<Architecture::SSE, Derived, OtherDerived, float, Aligned>
+{
+ inline static Quaternion<float> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
+ {
+ const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0,0,0,0x80000000));
+ Quaternion<float> res;
+ __m128 a = _a.coeffs().template packet<Aligned>(0);
+ __m128 b = _b.coeffs().template packet<Aligned>(0);
+ __m128 flip1 = _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a,1,2,0,2),
+ vec4f_swizzle1(b,2,0,1,2)),mask);
+ __m128 flip2 = _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a,3,3,3,1),
+ vec4f_swizzle1(b,0,1,2,1)),mask);
+ pstore(&res.x(),
+ _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)),
+ _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0),
+ vec4f_swizzle1(b,1,2,0,0))),
+ _mm_add_ps(flip1,flip2)));
+ return res;
+ }
+};
+
+template<typename VectorLhs,typename VectorRhs>
+struct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true>
+{
+ inline static typename plain_matrix_type<VectorLhs>::type
+ run(const VectorLhs& lhs, const VectorRhs& rhs)
+ {
+ __m128 a = lhs.template packet<VectorLhs::Flags&AlignedBit ? Aligned : Unaligned>(0);
+ __m128 b = rhs.template packet<VectorRhs::Flags&AlignedBit ? Aligned : Unaligned>(0);
+ __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3));
+ __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3));
+ typename plain_matrix_type<VectorLhs>::type res;
+ pstore(&res.x(),_mm_sub_ps(mul1,mul2));
+ return res;
+ }
+};
+
+
+
+
+template<class Derived, class OtherDerived>
+struct quat_product<Architecture::SSE, Derived, OtherDerived, double, Aligned>
+{
+ inline static Quaternion<double> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
+ {
+ const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
+
+ Quaternion<double> res;
+
+ const double* a = _a.coeffs().data();
+ Packet2d b_xy = _b.coeffs().template packet<Aligned>(0);
+ Packet2d b_zw = _b.coeffs().template packet<Aligned>(2);
+ Packet2d a_xx = pset1<Packet2d>(a[0]);
+ Packet2d a_yy = pset1<Packet2d>(a[1]);
+ Packet2d a_zz = pset1<Packet2d>(a[2]);
+ Packet2d a_ww = pset1<Packet2d>(a[3]);
+
+ // two temporaries:
+ Packet2d t1, t2;
+
+ /*
+ * t1 = ww*xy + yy*zw
+ * t2 = zz*xy - xx*zw
+ * res.xy = t1 +/- swap(t2)
+ */
+ t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw));
+ t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw));
+#ifdef __SSE3__
+ EIGEN_UNUSED_VARIABLE(mask)
+ pstore(&res.x(), _mm_addsub_pd(t1, preverse(t2)));
+#else
+ pstore(&res.x(), padd(t1, pxor(mask,preverse(t2))));
+#endif
+
+ /*
+ * t1 = ww*zw - yy*xy
+ * t2 = zz*zw + xx*xy
+ * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2)
+ */
+ t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy));
+ t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy));
+#ifdef __SSE3__
+ EIGEN_UNUSED_VARIABLE(mask)
+ pstore(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2)));
+#else
+ pstore(&res.z(), psub(t1, pxor(mask,preverse(t2))));
+#endif
+
+ return res;
+}
+};
+
+} // end namespace internal
+
+#endif // EIGEN_GEOMETRY_SSE_H
diff --git a/extern/Eigen3/Eigen/src/Householder/BlockHouseholder.h b/extern/Eigen3/Eigen/src/Householder/BlockHouseholder.h
new file mode 100644
index 00000000000..23ce1bfbd46
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Householder/BlockHouseholder.h
@@ -0,0 +1,79 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Vincent Lejeune
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BLOCK_HOUSEHOLDER_H
+#define EIGEN_BLOCK_HOUSEHOLDER_H
+
+// This file contains some helper function to deal with block householder reflectors
+
+namespace internal {
+
+/** \internal */
+template<typename TriangularFactorType,typename VectorsType,typename CoeffsType>
+void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs)
+{
+ typedef typename TriangularFactorType::Index Index;
+ typedef typename VectorsType::Scalar Scalar;
+ const Index nbVecs = vectors.cols();
+ eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs);
+
+ for(Index i = 0; i < nbVecs; i++)
+ {
+ Index rs = vectors.rows() - i;
+ Scalar Vii = vectors(i,i);
+ vectors.const_cast_derived().coeffRef(i,i) = Scalar(1);
+ triFactor.col(i).head(i).noalias() = -hCoeffs(i) * vectors.block(i, 0, rs, i).adjoint()
+ * vectors.col(i).tail(rs);
+ vectors.const_cast_derived().coeffRef(i, i) = Vii;
+ // FIXME add .noalias() once the triangular product can work inplace
+ triFactor.col(i).head(i) = triFactor.block(0,0,i,i).template triangularView<Upper>()
+ * triFactor.col(i).head(i);
+ triFactor(i,i) = hCoeffs(i);
+ }
+}
+
+/** \internal */
+template<typename MatrixType,typename VectorsType,typename CoeffsType>
+void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs)
+{
+ typedef typename MatrixType::Index Index;
+ enum { TFactorSize = MatrixType::ColsAtCompileTime };
+ Index nbVecs = vectors.cols();
+ Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize> T(nbVecs,nbVecs);
+ make_block_householder_triangular_factor(T, vectors, hCoeffs);
+
+ const TriangularView<VectorsType, UnitLower>& V(vectors);
+
+ // A -= V T V^* A
+ Matrix<typename MatrixType::Scalar,VectorsType::ColsAtCompileTime,MatrixType::ColsAtCompileTime,0,
+ VectorsType::MaxColsAtCompileTime,MatrixType::MaxColsAtCompileTime> tmp = V.adjoint() * mat;
+ // FIXME add .noalias() once the triangular product can work inplace
+ tmp = T.template triangularView<Upper>().adjoint() * tmp;
+ mat.noalias() -= V * tmp;
+}
+
+} // end namespace internal
+
+#endif // EIGEN_BLOCK_HOUSEHOLDER_H
diff --git a/extern/Eigen3/Eigen/src/Householder/Householder.h b/extern/Eigen3/Eigen/src/Householder/Householder.h
new file mode 100644
index 00000000000..74139c0dcce
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Householder/Householder.h
@@ -0,0 +1,133 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_HOUSEHOLDER_H
+#define EIGEN_HOUSEHOLDER_H
+
+namespace internal {
+template<int n> struct decrement_size
+{
+ enum {
+ ret = n==Dynamic ? n : n-1
+ };
+};
+}
+
+template<typename Derived>
+void MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta)
+{
+ VectorBlock<Derived, internal::decrement_size<Base::SizeAtCompileTime>::ret> essentialPart(derived(), 1, size()-1);
+ makeHouseholder(essentialPart, tau, beta);
+}
+
+/** Computes the elementary reflector H such that:
+ * \f$ H *this = [ beta 0 ... 0]^T \f$
+ * where the transformation H is:
+ * \f$ H = I - tau v v^*\f$
+ * and the vector v is:
+ * \f$ v^T = [1 essential^T] \f$
+ *
+ * On output:
+ * \param essential the essential part of the vector \c v
+ * \param tau the scaling factor of the householder transformation
+ * \param beta the result of H * \c *this
+ *
+ * \sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(),
+ * MatrixBase::applyHouseholderOnTheRight()
+ */
+template<typename Derived>
+template<typename EssentialPart>
+void MatrixBase<Derived>::makeHouseholder(
+ EssentialPart& essential,
+ Scalar& tau,
+ RealScalar& beta) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart)
+ VectorBlock<const Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1);
+
+ RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm();
+ Scalar c0 = coeff(0);
+
+ if(tailSqNorm == RealScalar(0) && internal::imag(c0)==RealScalar(0))
+ {
+ tau = RealScalar(0);
+ beta = internal::real(c0);
+ essential.setZero();
+ }
+ else
+ {
+ beta = internal::sqrt(internal::abs2(c0) + tailSqNorm);
+ if (internal::real(c0)>=RealScalar(0))
+ beta = -beta;
+ essential = tail / (c0 - beta);
+ tau = internal::conj((beta - c0) / beta);
+ }
+}
+
+template<typename Derived>
+template<typename EssentialPart>
+void MatrixBase<Derived>::applyHouseholderOnTheLeft(
+ const EssentialPart& essential,
+ const Scalar& tau,
+ Scalar* workspace)
+{
+ if(rows() == 1)
+ {
+ *this *= Scalar(1)-tau;
+ }
+ else
+ {
+ Map<typename internal::plain_row_type<PlainObject>::type> tmp(workspace,cols());
+ Block<Derived, EssentialPart::SizeAtCompileTime, Derived::ColsAtCompileTime> bottom(derived(), 1, 0, rows()-1, cols());
+ tmp.noalias() = essential.adjoint() * bottom;
+ tmp += this->row(0);
+ this->row(0) -= tau * tmp;
+ bottom.noalias() -= tau * essential * tmp;
+ }
+}
+
+template<typename Derived>
+template<typename EssentialPart>
+void MatrixBase<Derived>::applyHouseholderOnTheRight(
+ const EssentialPart& essential,
+ const Scalar& tau,
+ Scalar* workspace)
+{
+ if(cols() == 1)
+ {
+ *this *= Scalar(1)-tau;
+ }
+ else
+ {
+ Map<typename internal::plain_col_type<PlainObject>::type> tmp(workspace,rows());
+ Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1);
+ tmp.noalias() = right * essential.conjugate();
+ tmp += this->col(0);
+ this->col(0) -= tau * tmp;
+ right.noalias() -= tau * tmp * essential.transpose();
+ }
+}
+
+#endif // EIGEN_HOUSEHOLDER_H
diff --git a/extern/Eigen3/Eigen/src/Householder/HouseholderSequence.h b/extern/Eigen3/Eigen/src/Householder/HouseholderSequence.h
new file mode 100644
index 00000000000..717f29c99e9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Householder/HouseholderSequence.h
@@ -0,0 +1,429 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_HOUSEHOLDER_SEQUENCE_H
+#define EIGEN_HOUSEHOLDER_SEQUENCE_H
+
+/** \ingroup Householder_Module
+ * \householder_module
+ * \class HouseholderSequence
+ * \brief Sequence of Householder reflections acting on subspaces with decreasing size
+ * \tparam VectorsType type of matrix containing the Householder vectors
+ * \tparam CoeffsType type of vector containing the Householder coefficients
+ * \tparam Side either OnTheLeft (the default) or OnTheRight
+ *
+ * This class represents a product sequence of Householder reflections where the first Householder reflection
+ * acts on the whole space, the second Householder reflection leaves the one-dimensional subspace spanned by
+ * the first unit vector invariant, the third Householder reflection leaves the two-dimensional subspace
+ * spanned by the first two unit vectors invariant, and so on up to the last reflection which leaves all but
+ * one dimensions invariant and acts only on the last dimension. Such sequences of Householder reflections
+ * are used in several algorithms to zero out certain parts of a matrix. Indeed, the methods
+ * HessenbergDecomposition::matrixQ(), Tridiagonalization::matrixQ(), HouseholderQR::householderQ(),
+ * and ColPivHouseholderQR::householderQ() all return a %HouseholderSequence.
+ *
+ * More precisely, the class %HouseholderSequence represents an \f$ n \times n \f$ matrix \f$ H \f$ of the
+ * form \f$ H = \prod_{i=0}^{n-1} H_i \f$ where the i-th Householder reflection is \f$ H_i = I - h_i v_i
+ * v_i^* \f$. The i-th Householder coefficient \f$ h_i \f$ is a scalar and the i-th Householder vector \f$
+ * v_i \f$ is a vector of the form
+ * \f[
+ * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ].
+ * \f]
+ * The last \f$ n-i \f$ entries of \f$ v_i \f$ are called the essential part of the Householder vector.
+ *
+ * Typical usages are listed below, where H is a HouseholderSequence:
+ * \code
+ * A.applyOnTheRight(H); // A = A * H
+ * A.applyOnTheLeft(H); // A = H * A
+ * A.applyOnTheRight(H.adjoint()); // A = A * H^*
+ * A.applyOnTheLeft(H.adjoint()); // A = H^* * A
+ * MatrixXd Q = H; // conversion to a dense matrix
+ * \endcode
+ * In addition to the adjoint, you can also apply the inverse (=adjoint), the transpose, and the conjugate operators.
+ *
+ * See the documentation for HouseholderSequence(const VectorsType&, const CoeffsType&) for an example.
+ *
+ * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+ */
+
+namespace internal {
+
+template<typename VectorsType, typename CoeffsType, int Side>
+struct traits<HouseholderSequence<VectorsType,CoeffsType,Side> >
+{
+ typedef typename VectorsType::Scalar Scalar;
+ typedef typename VectorsType::Index Index;
+ typedef typename VectorsType::StorageKind StorageKind;
+ enum {
+ RowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::RowsAtCompileTime
+ : traits<VectorsType>::ColsAtCompileTime,
+ ColsAtCompileTime = RowsAtCompileTime,
+ MaxRowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::MaxRowsAtCompileTime
+ : traits<VectorsType>::MaxColsAtCompileTime,
+ MaxColsAtCompileTime = MaxRowsAtCompileTime,
+ Flags = 0
+ };
+};
+
+template<typename VectorsType, typename CoeffsType, int Side>
+struct hseq_side_dependent_impl
+{
+ typedef Block<const VectorsType, Dynamic, 1> EssentialVectorType;
+ typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType;
+ typedef typename VectorsType::Index Index;
+ static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
+ {
+ Index start = k+1+h.m_shift;
+ return Block<const VectorsType,Dynamic,1>(h.m_vectors, start, k, h.rows()-start, 1);
+ }
+};
+
+template<typename VectorsType, typename CoeffsType>
+struct hseq_side_dependent_impl<VectorsType, CoeffsType, OnTheRight>
+{
+ typedef Transpose<Block<const VectorsType, 1, Dynamic> > EssentialVectorType;
+ typedef HouseholderSequence<VectorsType, CoeffsType, OnTheRight> HouseholderSequenceType;
+ typedef typename VectorsType::Index Index;
+ static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
+ {
+ Index start = k+1+h.m_shift;
+ return Block<const VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose();
+ }
+};
+
+template<typename OtherScalarType, typename MatrixType> struct matrix_type_times_scalar_type
+{
+ typedef typename scalar_product_traits<OtherScalarType, typename MatrixType::Scalar>::ReturnType
+ ResultScalar;
+ typedef Matrix<ResultScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime,
+ 0, MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime> Type;
+};
+
+} // end namespace internal
+
+template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence
+ : public EigenBase<HouseholderSequence<VectorsType,CoeffsType,Side> >
+{
+ enum {
+ RowsAtCompileTime = internal::traits<HouseholderSequence>::RowsAtCompileTime,
+ ColsAtCompileTime = internal::traits<HouseholderSequence>::ColsAtCompileTime,
+ MaxRowsAtCompileTime = internal::traits<HouseholderSequence>::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = internal::traits<HouseholderSequence>::MaxColsAtCompileTime
+ };
+ typedef typename internal::traits<HouseholderSequence>::Scalar Scalar;
+ typedef typename VectorsType::Index Index;
+
+ typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType
+ EssentialVectorType;
+
+ public:
+
+ typedef HouseholderSequence<
+ VectorsType,
+ typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ typename internal::remove_all<typename CoeffsType::ConjugateReturnType>::type,
+ CoeffsType>::type,
+ Side
+ > ConjugateReturnType;
+
+ /** \brief Constructor.
+ * \param[in] v %Matrix containing the essential parts of the Householder vectors
+ * \param[in] h Vector containing the Householder coefficients
+ *
+ * Constructs the Householder sequence with coefficients given by \p h and vectors given by \p v. The
+ * i-th Householder coefficient \f$ h_i \f$ is given by \p h(i) and the essential part of the i-th
+ * Householder vector \f$ v_i \f$ is given by \p v(k,i) with \p k > \p i (the subdiagonal part of the
+ * i-th column). If \p v has fewer columns than rows, then the Householder sequence contains as many
+ * Householder reflections as there are columns.
+ *
+ * \note The %HouseholderSequence object stores \p v and \p h by reference.
+ *
+ * Example: \include HouseholderSequence_HouseholderSequence.cpp
+ * Output: \verbinclude HouseholderSequence_HouseholderSequence.out
+ *
+ * \sa setLength(), setShift()
+ */
+ HouseholderSequence(const VectorsType& v, const CoeffsType& h)
+ : m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()),
+ m_shift(0)
+ {
+ }
+
+ /** \brief Copy constructor. */
+ HouseholderSequence(const HouseholderSequence& other)
+ : m_vectors(other.m_vectors),
+ m_coeffs(other.m_coeffs),
+ m_trans(other.m_trans),
+ m_length(other.m_length),
+ m_shift(other.m_shift)
+ {
+ }
+
+ /** \brief Number of rows of transformation viewed as a matrix.
+ * \returns Number of rows
+ * \details This equals the dimension of the space that the transformation acts on.
+ */
+ Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); }
+
+ /** \brief Number of columns of transformation viewed as a matrix.
+ * \returns Number of columns
+ * \details This equals the dimension of the space that the transformation acts on.
+ */
+ Index cols() const { return rows(); }
+
+ /** \brief Essential part of a Householder vector.
+ * \param[in] k Index of Householder reflection
+ * \returns Vector containing non-trivial entries of k-th Householder vector
+ *
+ * This function returns the essential part of the Householder vector \f$ v_i \f$. This is a vector of
+ * length \f$ n-i \f$ containing the last \f$ n-i \f$ entries of the vector
+ * \f[
+ * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ].
+ * \f]
+ * The index \f$ i \f$ equals \p k + shift(), corresponding to the k-th column of the matrix \p v
+ * passed to the constructor.
+ *
+ * \sa setShift(), shift()
+ */
+ const EssentialVectorType essentialVector(Index k) const
+ {
+ eigen_assert(k >= 0 && k < m_length);
+ return internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::essentialVector(*this, k);
+ }
+
+ /** \brief %Transpose of the Householder sequence. */
+ HouseholderSequence transpose() const
+ {
+ return HouseholderSequence(*this).setTrans(!m_trans);
+ }
+
+ /** \brief Complex conjugate of the Householder sequence. */
+ ConjugateReturnType conjugate() const
+ {
+ return ConjugateReturnType(m_vectors, m_coeffs.conjugate())
+ .setTrans(m_trans)
+ .setLength(m_length)
+ .setShift(m_shift);
+ }
+
+ /** \brief Adjoint (conjugate transpose) of the Householder sequence. */
+ ConjugateReturnType adjoint() const
+ {
+ return conjugate().setTrans(!m_trans);
+ }
+
+ /** \brief Inverse of the Householder sequence (equals the adjoint). */
+ ConjugateReturnType inverse() const { return adjoint(); }
+
+ /** \internal */
+ template<typename DestType> void evalTo(DestType& dst) const
+ {
+ Index vecs = m_length;
+ // FIXME find a way to pass this temporary if the user wants to
+ Matrix<Scalar, DestType::RowsAtCompileTime, 1,
+ AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> temp(rows());
+ if( internal::is_same<typename internal::remove_all<VectorsType>::type,DestType>::value
+ && internal::extract_data(dst) == internal::extract_data(m_vectors))
+ {
+ // in-place
+ dst.diagonal().setOnes();
+ dst.template triangularView<StrictlyUpper>().setZero();
+ for(Index k = vecs-1; k >= 0; --k)
+ {
+ Index cornerSize = rows() - k - m_shift;
+ if(m_trans)
+ dst.bottomRightCorner(cornerSize, cornerSize)
+ .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
+ else
+ dst.bottomRightCorner(cornerSize, cornerSize)
+ .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
+
+ // clear the off diagonal vector
+ dst.col(k).tail(rows()-k-1).setZero();
+ }
+ // clear the remaining columns if needed
+ for(Index k = 0; k<cols()-vecs ; ++k)
+ dst.col(k).tail(rows()-k-1).setZero();
+ }
+ else
+ {
+ dst.setIdentity(rows(), rows());
+ for(Index k = vecs-1; k >= 0; --k)
+ {
+ Index cornerSize = rows() - k - m_shift;
+ if(m_trans)
+ dst.bottomRightCorner(cornerSize, cornerSize)
+ .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
+ else
+ dst.bottomRightCorner(cornerSize, cornerSize)
+ .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &temp.coeffRef(0));
+ }
+ }
+ }
+
+ /** \internal */
+ template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const
+ {
+ Matrix<Scalar,1,Dest::RowsAtCompileTime> temp(dst.rows());
+ for(Index k = 0; k < m_length; ++k)
+ {
+ Index actual_k = m_trans ? m_length-k-1 : k;
+ dst.rightCols(rows()-m_shift-actual_k)
+ .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0));
+ }
+ }
+
+ /** \internal */
+ template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const
+ {
+ Matrix<Scalar,1,Dest::ColsAtCompileTime> temp(dst.cols());
+ for(Index k = 0; k < m_length; ++k)
+ {
+ Index actual_k = m_trans ? k : m_length-k-1;
+ dst.bottomRows(rows()-m_shift-actual_k)
+ .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), &temp.coeffRef(0));
+ }
+ }
+
+ /** \brief Computes the product of a Householder sequence with a matrix.
+ * \param[in] other %Matrix being multiplied.
+ * \returns Expression object representing the product.
+ *
+ * This function computes \f$ HM \f$ where \f$ H \f$ is the Householder sequence represented by \p *this
+ * and \f$ M \f$ is the matrix \p other.
+ */
+ template<typename OtherDerived>
+ typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type operator*(const MatrixBase<OtherDerived>& other) const
+ {
+ typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type
+ res(other.template cast<typename internal::matrix_type_times_scalar_type<Scalar,OtherDerived>::ResultScalar>());
+ applyThisOnTheLeft(res);
+ return res;
+ }
+
+ template<typename _VectorsType, typename _CoeffsType, int _Side> friend struct internal::hseq_side_dependent_impl;
+
+ /** \brief Sets the length of the Householder sequence.
+ * \param [in] length New value for the length.
+ *
+ * By default, the length \f$ n \f$ of the Householder sequence \f$ H = H_0 H_1 \ldots H_{n-1} \f$ is set
+ * to the number of columns of the matrix \p v passed to the constructor, or the number of rows if that
+ * is smaller. After this function is called, the length equals \p length.
+ *
+ * \sa length()
+ */
+ HouseholderSequence& setLength(Index length)
+ {
+ m_length = length;
+ return *this;
+ }
+
+ /** \brief Sets the shift of the Householder sequence.
+ * \param [in] shift New value for the shift.
+ *
+ * By default, a %HouseholderSequence object represents \f$ H = H_0 H_1 \ldots H_{n-1} \f$ and the i-th
+ * column of the matrix \p v passed to the constructor corresponds to the i-th Householder
+ * reflection. After this function is called, the object represents \f$ H = H_{\mathrm{shift}}
+ * H_{\mathrm{shift}+1} \ldots H_{n-1} \f$ and the i-th column of \p v corresponds to the (shift+i)-th
+ * Householder reflection.
+ *
+ * \sa shift()
+ */
+ HouseholderSequence& setShift(Index shift)
+ {
+ m_shift = shift;
+ return *this;
+ }
+
+ Index length() const { return m_length; } /**< \brief Returns the length of the Householder sequence. */
+ Index shift() const { return m_shift; } /**< \brief Returns the shift of the Householder sequence. */
+
+ /* Necessary for .adjoint() and .conjugate() */
+ template <typename VectorsType2, typename CoeffsType2, int Side2> friend class HouseholderSequence;
+
+ protected:
+
+ /** \brief Sets the transpose flag.
+ * \param [in] trans New value of the transpose flag.
+ *
+ * By default, the transpose flag is not set. If the transpose flag is set, then this object represents
+ * \f$ H^T = H_{n-1}^T \ldots H_1^T H_0^T \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$.
+ *
+ * \sa trans()
+ */
+ HouseholderSequence& setTrans(bool trans)
+ {
+ m_trans = trans;
+ return *this;
+ }
+
+ bool trans() const { return m_trans; } /**< \brief Returns the transpose flag. */
+
+ typename VectorsType::Nested m_vectors;
+ typename CoeffsType::Nested m_coeffs;
+ bool m_trans;
+ Index m_length;
+ Index m_shift;
+};
+
+/** \brief Computes the product of a matrix with a Householder sequence.
+ * \param[in] other %Matrix being multiplied.
+ * \param[in] h %HouseholderSequence being multiplied.
+ * \returns Expression object representing the product.
+ *
+ * This function computes \f$ MH \f$ where \f$ M \f$ is the matrix \p other and \f$ H \f$ is the
+ * Householder sequence represented by \p h.
+ */
+template<typename OtherDerived, typename VectorsType, typename CoeffsType, int Side>
+typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::Type operator*(const MatrixBase<OtherDerived>& other, const HouseholderSequence<VectorsType,CoeffsType,Side>& h)
+{
+ typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::Type
+ res(other.template cast<typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::ResultScalar>());
+ h.applyThisOnTheRight(res);
+ return res;
+}
+
+/** \ingroup Householder_Module \householder_module
+ * \brief Convenience function for constructing a Householder sequence.
+ * \returns A HouseholderSequence constructed from the specified arguments.
+ */
+template<typename VectorsType, typename CoeffsType>
+HouseholderSequence<VectorsType,CoeffsType> householderSequence(const VectorsType& v, const CoeffsType& h)
+{
+ return HouseholderSequence<VectorsType,CoeffsType,OnTheLeft>(v, h);
+}
+
+/** \ingroup Householder_Module \householder_module
+ * \brief Convenience function for constructing a Householder sequence.
+ * \returns A HouseholderSequence constructed from the specified arguments.
+ * \details This function differs from householderSequence() in that the template argument \p OnTheSide of
+ * the constructed HouseholderSequence is set to OnTheRight, instead of the default OnTheLeft.
+ */
+template<typename VectorsType, typename CoeffsType>
+HouseholderSequence<VectorsType,CoeffsType,OnTheRight> rightHouseholderSequence(const VectorsType& v, const CoeffsType& h)
+{
+ return HouseholderSequence<VectorsType,CoeffsType,OnTheRight>(v, h);
+}
+
+#endif // EIGEN_HOUSEHOLDER_SEQUENCE_H
diff --git a/extern/Eigen3/Eigen/src/Jacobi/Jacobi.h b/extern/Eigen3/Eigen/src/Jacobi/Jacobi.h
new file mode 100644
index 00000000000..98dea6800bc
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Jacobi/Jacobi.h
@@ -0,0 +1,430 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_JACOBI_H
+#define EIGEN_JACOBI_H
+
+/** \ingroup Jacobi_Module
+ * \jacobi_module
+ * \class JacobiRotation
+ * \brief Rotation given by a cosine-sine pair.
+ *
+ * This class represents a Jacobi or Givens rotation.
+ * This is a 2D rotation in the plane \c J of angle \f$ \theta \f$ defined by
+ * its cosine \c c and sine \c s as follow:
+ * \f$ J = \left ( \begin{array}{cc} c & \overline s \\ -s & \overline c \end{array} \right ) \f$
+ *
+ * You can apply the respective counter-clockwise rotation to a column vector \c v by
+ * applying its adjoint on the left: \f$ v = J^* v \f$ that translates to the following Eigen code:
+ * \code
+ * v.applyOnTheLeft(J.adjoint());
+ * \endcode
+ *
+ * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+ */
+template<typename Scalar> class JacobiRotation
+{
+ public:
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** Default constructor without any initialization. */
+ JacobiRotation() {}
+
+ /** Construct a planar rotation from a cosine-sine pair (\a c, \c s). */
+ JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {}
+
+ Scalar& c() { return m_c; }
+ Scalar c() const { return m_c; }
+ Scalar& s() { return m_s; }
+ Scalar s() const { return m_s; }
+
+ /** Concatenates two planar rotation */
+ JacobiRotation operator*(const JacobiRotation& other)
+ {
+ return JacobiRotation(m_c * other.m_c - internal::conj(m_s) * other.m_s,
+ internal::conj(m_c * internal::conj(other.m_s) + internal::conj(m_s) * internal::conj(other.m_c)));
+ }
+
+ /** Returns the transposed transformation */
+ JacobiRotation transpose() const { return JacobiRotation(m_c, -internal::conj(m_s)); }
+
+ /** Returns the adjoint transformation */
+ JacobiRotation adjoint() const { return JacobiRotation(internal::conj(m_c), -m_s); }
+
+ template<typename Derived>
+ bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
+ bool makeJacobi(RealScalar x, Scalar y, RealScalar z);
+
+ void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
+
+ protected:
+ void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::true_type);
+ void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::false_type);
+
+ Scalar m_c, m_s;
+};
+
+/** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix
+ * \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$
+ *
+ * \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+ */
+template<typename Scalar>
+bool JacobiRotation<Scalar>::makeJacobi(RealScalar x, Scalar y, RealScalar z)
+{
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ if(y == Scalar(0))
+ {
+ m_c = Scalar(1);
+ m_s = Scalar(0);
+ return false;
+ }
+ else
+ {
+ RealScalar tau = (x-z)/(RealScalar(2)*internal::abs(y));
+ RealScalar w = internal::sqrt(internal::abs2(tau) + RealScalar(1));
+ RealScalar t;
+ if(tau>RealScalar(0))
+ {
+ t = RealScalar(1) / (tau + w);
+ }
+ else
+ {
+ t = RealScalar(1) / (tau - w);
+ }
+ RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+ RealScalar n = RealScalar(1) / internal::sqrt(internal::abs2(t)+RealScalar(1));
+ m_s = - sign_t * (internal::conj(y) / internal::abs(y)) * internal::abs(t) * n;
+ m_c = n;
+ return true;
+ }
+}
+
+/** Makes \c *this as a Jacobi rotation \c J such that applying \a J on both the right and left sides of the 2x2 selfadjoint matrix
+ * \f$ B = \left ( \begin{array}{cc} \text{this}_{pp} & \text{this}_{pq} \\ (\text{this}_{pq})^* & \text{this}_{qq} \end{array} \right )\f$ yields
+ * a diagonal matrix \f$ A = J^* B J \f$
+ *
+ * Example: \include Jacobi_makeJacobi.cpp
+ * Output: \verbinclude Jacobi_makeJacobi.out
+ *
+ * \sa JacobiRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+ */
+template<typename Scalar>
+template<typename Derived>
+inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, typename Derived::Index p, typename Derived::Index q)
+{
+ return makeJacobi(internal::real(m.coeff(p,p)), m.coeff(p,q), internal::real(m.coeff(q,q)));
+}
+
+/** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector
+ * \f$ V = \left ( \begin{array}{c} p \\ q \end{array} \right )\f$ yields:
+ * \f$ G^* V = \left ( \begin{array}{c} r \\ 0 \end{array} \right )\f$.
+ *
+ * The value of \a z is returned if \a z is not null (the default is null).
+ * Also note that G is built such that the cosine is always real.
+ *
+ * Example: \include Jacobi_makeGivens.cpp
+ * Output: \verbinclude Jacobi_makeGivens.out
+ *
+ * This function implements the continuous Givens rotation generation algorithm
+ * found in Anderson (2000), Discontinuous Plane Rotations and the Symmetric Eigenvalue Problem.
+ * LAPACK Working Note 150, University of Tennessee, UT-CS-00-454, December 4, 2000.
+ *
+ * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+ */
+template<typename Scalar>
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* z)
+{
+ makeGivens(p, q, z, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type());
+}
+
+
+// specialization for complexes
+template<typename Scalar>
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type)
+{
+ if(q==Scalar(0))
+ {
+ m_c = internal::real(p)<0 ? Scalar(-1) : Scalar(1);
+ m_s = 0;
+ if(r) *r = m_c * p;
+ }
+ else if(p==Scalar(0))
+ {
+ m_c = 0;
+ m_s = -q/internal::abs(q);
+ if(r) *r = internal::abs(q);
+ }
+ else
+ {
+ RealScalar p1 = internal::norm1(p);
+ RealScalar q1 = internal::norm1(q);
+ if(p1>=q1)
+ {
+ Scalar ps = p / p1;
+ RealScalar p2 = internal::abs2(ps);
+ Scalar qs = q / p1;
+ RealScalar q2 = internal::abs2(qs);
+
+ RealScalar u = internal::sqrt(RealScalar(1) + q2/p2);
+ if(internal::real(p)<RealScalar(0))
+ u = -u;
+
+ m_c = Scalar(1)/u;
+ m_s = -qs*internal::conj(ps)*(m_c/p2);
+ if(r) *r = p * u;
+ }
+ else
+ {
+ Scalar ps = p / q1;
+ RealScalar p2 = internal::abs2(ps);
+ Scalar qs = q / q1;
+ RealScalar q2 = internal::abs2(qs);
+
+ RealScalar u = q1 * internal::sqrt(p2 + q2);
+ if(internal::real(p)<RealScalar(0))
+ u = -u;
+
+ p1 = internal::abs(p);
+ ps = p/p1;
+ m_c = p1/u;
+ m_s = -internal::conj(ps) * (q/u);
+ if(r) *r = ps * u;
+ }
+ }
+}
+
+// specialization for reals
+template<typename Scalar>
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type)
+{
+
+ if(q==Scalar(0))
+ {
+ m_c = p<Scalar(0) ? Scalar(-1) : Scalar(1);
+ m_s = Scalar(0);
+ if(r) *r = internal::abs(p);
+ }
+ else if(p==Scalar(0))
+ {
+ m_c = Scalar(0);
+ m_s = q<Scalar(0) ? Scalar(1) : Scalar(-1);
+ if(r) *r = internal::abs(q);
+ }
+ else if(internal::abs(p) > internal::abs(q))
+ {
+ Scalar t = q/p;
+ Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t));
+ if(p<Scalar(0))
+ u = -u;
+ m_c = Scalar(1)/u;
+ m_s = -t * m_c;
+ if(r) *r = p * u;
+ }
+ else
+ {
+ Scalar t = p/q;
+ Scalar u = internal::sqrt(Scalar(1) + internal::abs2(t));
+ if(q<Scalar(0))
+ u = -u;
+ m_s = -Scalar(1)/u;
+ m_c = -t * m_s;
+ if(r) *r = q * u;
+ }
+
+}
+
+/****************************************************************************************
+* Implementation of MatrixBase methods
+****************************************************************************************/
+
+/** \jacobi_module
+ * Applies the clock wise 2D rotation \a j to the set of 2D vectors of cordinates \a x and \a y:
+ * \f$ \left ( \begin{array}{cc} x \\ y \end{array} \right ) = J \left ( \begin{array}{cc} x \\ y \end{array} \right ) \f$
+ *
+ * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+ */
+namespace internal {
+template<typename VectorX, typename VectorY, typename OtherScalar>
+void apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation<OtherScalar>& j);
+}
+
+/** \jacobi_module
+ * Applies the rotation in the plane \a j to the rows \a p and \a q of \c *this, i.e., it computes B = J * B,
+ * with \f$ B = \left ( \begin{array}{cc} \text{*this.row}(p) \\ \text{*this.row}(q) \end{array} \right ) \f$.
+ *
+ * \sa class JacobiRotation, MatrixBase::applyOnTheRight(), internal::apply_rotation_in_the_plane()
+ */
+template<typename Derived>
+template<typename OtherScalar>
+inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j)
+{
+ RowXpr x(this->row(p));
+ RowXpr y(this->row(q));
+ internal::apply_rotation_in_the_plane(x, y, j);
+}
+
+/** \ingroup Jacobi_Module
+ * Applies the rotation in the plane \a j to the columns \a p and \a q of \c *this, i.e., it computes B = B * J
+ * with \f$ B = \left ( \begin{array}{cc} \text{*this.col}(p) & \text{*this.col}(q) \end{array} \right ) \f$.
+ *
+ * \sa class JacobiRotation, MatrixBase::applyOnTheLeft(), internal::apply_rotation_in_the_plane()
+ */
+template<typename Derived>
+template<typename OtherScalar>
+inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j)
+{
+ ColXpr x(this->col(p));
+ ColXpr y(this->col(q));
+ internal::apply_rotation_in_the_plane(x, y, j.transpose());
+}
+
+namespace internal {
+template<typename VectorX, typename VectorY, typename OtherScalar>
+void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation<OtherScalar>& j)
+{
+ typedef typename VectorX::Index Index;
+ typedef typename VectorX::Scalar Scalar;
+ enum { PacketSize = packet_traits<Scalar>::size };
+ typedef typename packet_traits<Scalar>::type Packet;
+ eigen_assert(_x.size() == _y.size());
+ Index size = _x.size();
+ Index incrx = _x.innerStride();
+ Index incry = _y.innerStride();
+
+ Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0);
+ Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0);
+
+ /*** dynamic-size vectorized paths ***/
+
+ if(VectorX::SizeAtCompileTime == Dynamic &&
+ (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
+ ((incrx==1 && incry==1) || PacketSize == 1))
+ {
+ // both vectors are sequentially stored in memory => vectorization
+ enum { Peeling = 2 };
+
+ Index alignedStart = first_aligned(y, size);
+ Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
+
+ const Packet pc = pset1<Packet>(j.c());
+ const Packet ps = pset1<Packet>(j.s());
+ conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
+
+ for(Index i=0; i<alignedStart; ++i)
+ {
+ Scalar xi = x[i];
+ Scalar yi = y[i];
+ x[i] = j.c() * xi + conj(j.s()) * yi;
+ y[i] = -j.s() * xi + conj(j.c()) * yi;
+ }
+
+ Scalar* EIGEN_RESTRICT px = x + alignedStart;
+ Scalar* EIGEN_RESTRICT py = y + alignedStart;
+
+ if(first_aligned(x, size)==alignedStart)
+ {
+ for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)
+ {
+ Packet xi = pload<Packet>(px);
+ Packet yi = pload<Packet>(py);
+ pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+ pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+ px += PacketSize;
+ py += PacketSize;
+ }
+ }
+ else
+ {
+ Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
+ for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
+ {
+ Packet xi = ploadu<Packet>(px);
+ Packet xi1 = ploadu<Packet>(px+PacketSize);
+ Packet yi = pload <Packet>(py);
+ Packet yi1 = pload <Packet>(py+PacketSize);
+ pstoreu(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+ pstoreu(px+PacketSize, padd(pmul(pc,xi1),pcj.pmul(ps,yi1)));
+ pstore (py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+ pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pmul(ps,xi1)));
+ px += Peeling*PacketSize;
+ py += Peeling*PacketSize;
+ }
+ if(alignedEnd!=peelingEnd)
+ {
+ Packet xi = ploadu<Packet>(x+peelingEnd);
+ Packet yi = pload <Packet>(y+peelingEnd);
+ pstoreu(x+peelingEnd, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+ pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+ }
+ }
+
+ for(Index i=alignedEnd; i<size; ++i)
+ {
+ Scalar xi = x[i];
+ Scalar yi = y[i];
+ x[i] = j.c() * xi + conj(j.s()) * yi;
+ y[i] = -j.s() * xi + conj(j.c()) * yi;
+ }
+ }
+
+ /*** fixed-size vectorized path ***/
+ else if(VectorX::SizeAtCompileTime != Dynamic &&
+ (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
+ (VectorX::Flags & VectorY::Flags & AlignedBit))
+ {
+ const Packet pc = pset1<Packet>(j.c());
+ const Packet ps = pset1<Packet>(j.s());
+ conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
+ Scalar* EIGEN_RESTRICT px = x;
+ Scalar* EIGEN_RESTRICT py = y;
+ for(Index i=0; i<size; i+=PacketSize)
+ {
+ Packet xi = pload<Packet>(px);
+ Packet yi = pload<Packet>(py);
+ pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+ pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+ px += PacketSize;
+ py += PacketSize;
+ }
+ }
+
+ /*** non-vectorized path ***/
+ else
+ {
+ for(Index i=0; i<size; ++i)
+ {
+ Scalar xi = *x;
+ Scalar yi = *y;
+ *x = j.c() * xi + conj(j.s()) * yi;
+ *y = -j.s() * xi + conj(j.c()) * yi;
+ x += incrx;
+ y += incry;
+ }
+ }
+}
+}
+
+#endif // EIGEN_JACOBI_H
diff --git a/extern/Eigen2/Eigen/src/LU/Determinant.h b/extern/Eigen3/Eigen/src/LU/Determinant.h
index 4f435054ac6..b4fe36eb061 100644
--- a/extern/Eigen2/Eigen/src/LU/Determinant.h
+++ b/extern/Eigen3/Eigen/src/LU/Determinant.h
@@ -1,5 +1,5 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
@@ -25,8 +25,10 @@
#ifndef EIGEN_DETERMINANT_H
#define EIGEN_DETERMINANT_H
+namespace internal {
+
template<typename Derived>
-inline const typename Derived::Scalar ei_bruteforce_det3_helper
+inline const typename Derived::Scalar bruteforce_det3_helper
(const MatrixBase<Derived>& matrix, int a, int b, int c)
{
return matrix.coeff(0,a)
@@ -34,89 +36,77 @@ inline const typename Derived::Scalar ei_bruteforce_det3_helper
}
template<typename Derived>
-const typename Derived::Scalar ei_bruteforce_det4_helper
+const typename Derived::Scalar bruteforce_det4_helper
(const MatrixBase<Derived>& matrix, int j, int k, int m, int n)
{
return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))
* (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));
}
-const int TriangularDeterminant = 0;
-
template<typename Derived,
- int DeterminantType =
- (Derived::Flags & (UpperTriangularBit | LowerTriangularBit))
- ? TriangularDeterminant : Derived::RowsAtCompileTime
-> struct ei_determinant_impl
-{
- static inline typename ei_traits<Derived>::Scalar run(const Derived& m)
- {
- return m.lu().determinant();
- }
-};
-
-template<typename Derived> struct ei_determinant_impl<Derived, TriangularDeterminant>
+ int DeterminantType = Derived::RowsAtCompileTime
+> struct determinant_impl
{
- static inline typename ei_traits<Derived>::Scalar run(const Derived& m)
+ static inline typename traits<Derived>::Scalar run(const Derived& m)
{
- if (Derived::Flags & UnitDiagBit)
- return 1;
- else if (Derived::Flags & ZeroDiagBit)
- return 0;
- else
- return m.diagonal().redux(ei_scalar_product_op<typename ei_traits<Derived>::Scalar>());
+ if(Derived::ColsAtCompileTime==Dynamic && m.rows()==0)
+ return typename traits<Derived>::Scalar(1);
+ return m.partialPivLu().determinant();
}
};
-template<typename Derived> struct ei_determinant_impl<Derived, 1>
+template<typename Derived> struct determinant_impl<Derived, 1>
{
- static inline typename ei_traits<Derived>::Scalar run(const Derived& m)
+ static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0);
}
};
-template<typename Derived> struct ei_determinant_impl<Derived, 2>
+template<typename Derived> struct determinant_impl<Derived, 2>
{
- static inline typename ei_traits<Derived>::Scalar run(const Derived& m)
+ static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1);
}
};
-template<typename Derived> struct ei_determinant_impl<Derived, 3>
+template<typename Derived> struct determinant_impl<Derived, 3>
{
- static typename ei_traits<Derived>::Scalar run(const Derived& m)
+ static inline typename traits<Derived>::Scalar run(const Derived& m)
{
- return ei_bruteforce_det3_helper(m,0,1,2)
- - ei_bruteforce_det3_helper(m,1,0,2)
- + ei_bruteforce_det3_helper(m,2,0,1);
+ return bruteforce_det3_helper(m,0,1,2)
+ - bruteforce_det3_helper(m,1,0,2)
+ + bruteforce_det3_helper(m,2,0,1);
}
};
-template<typename Derived> struct ei_determinant_impl<Derived, 4>
+template<typename Derived> struct determinant_impl<Derived, 4>
{
- static typename ei_traits<Derived>::Scalar run(const Derived& m)
+ static typename traits<Derived>::Scalar run(const Derived& m)
{
// trick by Martin Costabel to compute 4x4 det with only 30 muls
- return ei_bruteforce_det4_helper(m,0,1,2,3)
- - ei_bruteforce_det4_helper(m,0,2,1,3)
- + ei_bruteforce_det4_helper(m,0,3,1,2)
- + ei_bruteforce_det4_helper(m,1,2,0,3)
- - ei_bruteforce_det4_helper(m,1,3,0,2)
- + ei_bruteforce_det4_helper(m,2,3,0,1);
+ return bruteforce_det4_helper(m,0,1,2,3)
+ - bruteforce_det4_helper(m,0,2,1,3)
+ + bruteforce_det4_helper(m,0,3,1,2)
+ + bruteforce_det4_helper(m,1,2,0,3)
+ - bruteforce_det4_helper(m,1,3,0,2)
+ + bruteforce_det4_helper(m,2,3,0,1);
}
};
+} // end namespace internal
+
/** \lu_module
*
* \returns the determinant of this matrix
*/
template<typename Derived>
-inline typename ei_traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
+inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
{
assert(rows() == cols());
- return ei_determinant_impl<Derived>::run(derived());
+ typedef typename internal::nested<Derived,Base::RowsAtCompileTime>::type Nested;
+ return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived());
}
#endif // EIGEN_DETERMINANT_H
diff --git a/extern/Eigen3/Eigen/src/LU/FullPivLU.h b/extern/Eigen3/Eigen/src/LU/FullPivLU.h
new file mode 100644
index 00000000000..633fb23fdbe
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/LU/FullPivLU.h
@@ -0,0 +1,754 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_LU_H
+#define EIGEN_LU_H
+
+/** \ingroup LU_Module
+ *
+ * \class FullPivLU
+ *
+ * \brief LU decomposition of a matrix with complete pivoting, and related features
+ *
+ * \param MatrixType the type of the matrix of which we are computing the LU decomposition
+ *
+ * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A
+ * is decomposed as A = PLUQ where L is unit-lower-triangular, U is upper-triangular, and P and Q
+ * are permutation matrices. This is a rank-revealing LU decomposition. The eigenvalues (diagonal
+ * coefficients) of U are sorted in such a way that any zeros are at the end.
+ *
+ * This decomposition provides the generic approach to solving systems of linear equations, computing
+ * the rank, invertibility, inverse, kernel, and determinant.
+ *
+ * This LU decomposition is very stable and well tested with large matrices. However there are use cases where the SVD
+ * decomposition is inherently more stable and/or flexible. For example, when computing the kernel of a matrix,
+ * working with the SVD allows to select the smallest singular values of the matrix, something that
+ * the LU decomposition doesn't see.
+ *
+ * The data of the LU decomposition can be directly accessed through the methods matrixLU(),
+ * permutationP(), permutationQ().
+ *
+ * As an exemple, here is how the original matrix can be retrieved:
+ * \include class_FullPivLU.cpp
+ * Output: \verbinclude class_FullPivLU.out
+ *
+ * \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse()
+ */
+template<typename _MatrixType> class FullPivLU
+{
+ public:
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename internal::traits<MatrixType>::StorageKind StorageKind;
+ typedef typename MatrixType::Index Index;
+ typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
+ typedef typename internal::plain_col_type<MatrixType, Index>::type IntColVectorType;
+ typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType;
+ typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationPType;
+
+ /**
+ * \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via LU::compute(const MatrixType&).
+ */
+ FullPivLU();
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem \a size.
+ * \sa FullPivLU()
+ */
+ FullPivLU(Index rows, Index cols);
+
+ /** Constructor.
+ *
+ * \param matrix the matrix of which to compute the LU decomposition.
+ * It is required to be nonzero.
+ */
+ FullPivLU(const MatrixType& matrix);
+
+ /** Computes the LU decomposition of the given matrix.
+ *
+ * \param matrix the matrix of which to compute the LU decomposition.
+ * It is required to be nonzero.
+ *
+ * \returns a reference to *this
+ */
+ FullPivLU& compute(const MatrixType& matrix);
+
+ /** \returns the LU decomposition matrix: the upper-triangular part is U, the
+ * unit-lower-triangular part is L (at least for square matrices; in the non-square
+ * case, special care is needed, see the documentation of class FullPivLU).
+ *
+ * \sa matrixL(), matrixU()
+ */
+ inline const MatrixType& matrixLU() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return m_lu;
+ }
+
+ /** \returns the number of nonzero pivots in the LU decomposition.
+ * Here nonzero is meant in the exact sense, not in a fuzzy sense.
+ * So that notion isn't really intrinsically interesting, but it is
+ * still useful when implementing algorithms.
+ *
+ * \sa rank()
+ */
+ inline Index nonzeroPivots() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return m_nonzero_pivots;
+ }
+
+ /** \returns the absolute value of the biggest pivot, i.e. the biggest
+ * diagonal coefficient of U.
+ */
+ RealScalar maxPivot() const { return m_maxpivot; }
+
+ /** \returns the permutation matrix P
+ *
+ * \sa permutationQ()
+ */
+ inline const PermutationPType& permutationP() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return m_p;
+ }
+
+ /** \returns the permutation matrix Q
+ *
+ * \sa permutationP()
+ */
+ inline const PermutationQType& permutationQ() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return m_q;
+ }
+
+ /** \returns the kernel of the matrix, also called its null-space. The columns of the returned matrix
+ * will form a basis of the kernel.
+ *
+ * \note If the kernel has dimension zero, then the returned matrix is a column-vector filled with zeros.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ *
+ * Example: \include FullPivLU_kernel.cpp
+ * Output: \verbinclude FullPivLU_kernel.out
+ *
+ * \sa image()
+ */
+ inline const internal::kernel_retval<FullPivLU> kernel() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return internal::kernel_retval<FullPivLU>(*this);
+ }
+
+ /** \returns the image of the matrix, also called its column-space. The columns of the returned matrix
+ * will form a basis of the kernel.
+ *
+ * \param originalMatrix the original matrix, of which *this is the LU decomposition.
+ * The reason why it is needed to pass it here, is that this allows
+ * a large optimization, as otherwise this method would need to reconstruct it
+ * from the LU decomposition.
+ *
+ * \note If the image has dimension zero, then the returned matrix is a column-vector filled with zeros.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ *
+ * Example: \include FullPivLU_image.cpp
+ * Output: \verbinclude FullPivLU_image.out
+ *
+ * \sa kernel()
+ */
+ inline const internal::image_retval<FullPivLU>
+ image(const MatrixType& originalMatrix) const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return internal::image_retval<FullPivLU>(*this, originalMatrix);
+ }
+
+ /** \return a solution x to the equation Ax=b, where A is the matrix of which
+ * *this is the LU decomposition.
+ *
+ * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix,
+ * the only requirement in order for the equation to make sense is that
+ * b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.
+ *
+ * \returns a solution.
+ *
+ * \note_about_checking_solutions
+ *
+ * \note_about_arbitrary_choice_of_solution
+ * \note_about_using_kernel_to_study_multiple_solutions
+ *
+ * Example: \include FullPivLU_solve.cpp
+ * Output: \verbinclude FullPivLU_solve.out
+ *
+ * \sa TriangularView::solve(), kernel(), inverse()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<FullPivLU, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return internal::solve_retval<FullPivLU, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the determinant of the matrix of which
+ * *this is the LU decomposition. It has only linear complexity
+ * (that is, O(n) where n is the dimension of the square matrix)
+ * as the LU decomposition has already been computed.
+ *
+ * \note This is only for square matrices.
+ *
+ * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers
+ * optimized paths.
+ *
+ * \warning a determinant can be very big or small, so for matrices
+ * of large enough dimension, there is a risk of overflow/underflow.
+ *
+ * \sa MatrixBase::determinant()
+ */
+ typename internal::traits<MatrixType>::Scalar determinant() const;
+
+ /** Allows to prescribe a threshold to be used by certain methods, such as rank(),
+ * who need to determine when pivots are to be considered nonzero. This is not used for the
+ * LU decomposition itself.
+ *
+ * When it needs to get the threshold value, Eigen calls threshold(). By default, this
+ * uses a formula to automatically determine a reasonable threshold.
+ * Once you have called the present method setThreshold(const RealScalar&),
+ * your value is used instead.
+ *
+ * \param threshold The new value to use as the threshold.
+ *
+ * A pivot will be considered nonzero if its absolute value is strictly greater than
+ * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
+ * where maxpivot is the biggest pivot.
+ *
+ * If you want to come back to the default behavior, call setThreshold(Default_t)
+ */
+ FullPivLU& setThreshold(const RealScalar& threshold)
+ {
+ m_usePrescribedThreshold = true;
+ m_prescribedThreshold = threshold;
+ return *this;
+ }
+
+ /** Allows to come back to the default behavior, letting Eigen use its default formula for
+ * determining the threshold.
+ *
+ * You should pass the special object Eigen::Default as parameter here.
+ * \code lu.setThreshold(Eigen::Default); \endcode
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ FullPivLU& setThreshold(Default_t)
+ {
+ m_usePrescribedThreshold = false;
+ }
+
+ /** Returns the threshold that will be used by certain methods such as rank().
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ RealScalar threshold() const
+ {
+ eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+ return m_usePrescribedThreshold ? m_prescribedThreshold
+ // this formula comes from experimenting (see "LU precision tuning" thread on the list)
+ // and turns out to be identical to Higham's formula used already in LDLt.
+ : NumTraits<Scalar>::epsilon() * m_lu.diagonalSize();
+ }
+
+ /** \returns the rank of the matrix of which *this is the LU decomposition.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline Index rank() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold();
+ Index result = 0;
+ for(Index i = 0; i < m_nonzero_pivots; ++i)
+ result += (internal::abs(m_lu.coeff(i,i)) > premultiplied_threshold);
+ return result;
+ }
+
+ /** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline Index dimensionOfKernel() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return cols() - rank();
+ }
+
+ /** \returns true if the matrix of which *this is the LU decomposition represents an injective
+ * linear map, i.e. has trivial kernel; false otherwise.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline bool isInjective() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return rank() == cols();
+ }
+
+ /** \returns true if the matrix of which *this is the LU decomposition represents a surjective
+ * linear map; false otherwise.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline bool isSurjective() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return rank() == rows();
+ }
+
+ /** \returns true if the matrix of which *this is the LU decomposition is invertible.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline bool isInvertible() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return isInjective() && (m_lu.rows() == m_lu.cols());
+ }
+
+ /** \returns the inverse of the matrix of which *this is the LU decomposition.
+ *
+ * \note If this matrix is not invertible, the returned matrix has undefined coefficients.
+ * Use isInvertible() to first determine whether this matrix is invertible.
+ *
+ * \sa MatrixBase::inverse()
+ */
+ inline const internal::solve_retval<FullPivLU,typename MatrixType::IdentityReturnType> inverse() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!");
+ return internal::solve_retval<FullPivLU,typename MatrixType::IdentityReturnType>
+ (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols()));
+ }
+
+ MatrixType reconstructedMatrix() const;
+
+ inline Index rows() const { return m_lu.rows(); }
+ inline Index cols() const { return m_lu.cols(); }
+
+ protected:
+ MatrixType m_lu;
+ PermutationPType m_p;
+ PermutationQType m_q;
+ IntColVectorType m_rowsTranspositions;
+ IntRowVectorType m_colsTranspositions;
+ Index m_det_pq, m_nonzero_pivots;
+ RealScalar m_maxpivot, m_prescribedThreshold;
+ bool m_isInitialized, m_usePrescribedThreshold;
+};
+
+template<typename MatrixType>
+FullPivLU<MatrixType>::FullPivLU()
+ : m_isInitialized(false), m_usePrescribedThreshold(false)
+{
+}
+
+template<typename MatrixType>
+FullPivLU<MatrixType>::FullPivLU(Index rows, Index cols)
+ : m_lu(rows, cols),
+ m_p(rows),
+ m_q(cols),
+ m_rowsTranspositions(rows),
+ m_colsTranspositions(cols),
+ m_isInitialized(false),
+ m_usePrescribedThreshold(false)
+{
+}
+
+template<typename MatrixType>
+FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix)
+ : m_lu(matrix.rows(), matrix.cols()),
+ m_p(matrix.rows()),
+ m_q(matrix.cols()),
+ m_rowsTranspositions(matrix.rows()),
+ m_colsTranspositions(matrix.cols()),
+ m_isInitialized(false),
+ m_usePrescribedThreshold(false)
+{
+ compute(matrix);
+}
+
+template<typename MatrixType>
+FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
+{
+ m_isInitialized = true;
+ m_lu = matrix;
+
+ const Index size = matrix.diagonalSize();
+ const Index rows = matrix.rows();
+ const Index cols = matrix.cols();
+
+ // will store the transpositions, before we accumulate them at the end.
+ // can't accumulate on-the-fly because that will be done in reverse order for the rows.
+ m_rowsTranspositions.resize(matrix.rows());
+ m_colsTranspositions.resize(matrix.cols());
+ Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i
+
+ m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
+ m_maxpivot = RealScalar(0);
+ RealScalar cutoff(0);
+
+ for(Index k = 0; k < size; ++k)
+ {
+ // First, we need to find the pivot.
+
+ // biggest coefficient in the remaining bottom-right corner (starting at row k, col k)
+ Index row_of_biggest_in_corner, col_of_biggest_in_corner;
+ RealScalar biggest_in_corner;
+ biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k)
+ .cwiseAbs()
+ .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);
+ row_of_biggest_in_corner += k; // correct the values! since they were computed in the corner,
+ col_of_biggest_in_corner += k; // need to add k to them.
+
+ // when k==0, biggest_in_corner is the biggest coeff absolute value in the original matrix
+ if(k == 0) cutoff = biggest_in_corner * NumTraits<Scalar>::epsilon();
+
+ // if the pivot (hence the corner) is "zero", terminate to avoid generating nan/inf values.
+ // Notice that using an exact comparison (biggest_in_corner==0) here, as Golub-van Loan do in
+ // their pseudo-code, results in numerical instability! The cutoff here has been validated
+ // by running the unit test 'lu' with many repetitions.
+ if(biggest_in_corner < cutoff)
+ {
+ // before exiting, make sure to initialize the still uninitialized transpositions
+ // in a sane state without destroying what we already have.
+ m_nonzero_pivots = k;
+ for(Index i = k; i < size; ++i)
+ {
+ m_rowsTranspositions.coeffRef(i) = i;
+ m_colsTranspositions.coeffRef(i) = i;
+ }
+ break;
+ }
+
+ if(biggest_in_corner > m_maxpivot) m_maxpivot = biggest_in_corner;
+
+ // Now that we've found the pivot, we need to apply the row/col swaps to
+ // bring it to the location (k,k).
+
+ m_rowsTranspositions.coeffRef(k) = row_of_biggest_in_corner;
+ m_colsTranspositions.coeffRef(k) = col_of_biggest_in_corner;
+ if(k != row_of_biggest_in_corner) {
+ m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner));
+ ++number_of_transpositions;
+ }
+ if(k != col_of_biggest_in_corner) {
+ m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner));
+ ++number_of_transpositions;
+ }
+
+ // Now that the pivot is at the right location, we update the remaining
+ // bottom-right corner by Gaussian elimination.
+
+ if(k<rows-1)
+ m_lu.col(k).tail(rows-k-1) /= m_lu.coeff(k,k);
+ if(k<size-1)
+ m_lu.block(k+1,k+1,rows-k-1,cols-k-1).noalias() -= m_lu.col(k).tail(rows-k-1) * m_lu.row(k).tail(cols-k-1);
+ }
+
+ // the main loop is over, we still have to accumulate the transpositions to find the
+ // permutations P and Q
+
+ m_p.setIdentity(rows);
+ for(Index k = size-1; k >= 0; --k)
+ m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k));
+
+ m_q.setIdentity(cols);
+ for(Index k = 0; k < size; ++k)
+ m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));
+
+ m_det_pq = (number_of_transpositions%2) ? -1 : 1;
+ return *this;
+}
+
+template<typename MatrixType>
+typename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant() const
+{
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the determinant of a non-square matrix!");
+ return Scalar(m_det_pq) * Scalar(m_lu.diagonal().prod());
+}
+
+/** \returns the matrix represented by the decomposition,
+ * i.e., it returns the product: P^{-1} L U Q^{-1}.
+ * This function is provided for debug purpose. */
+template<typename MatrixType>
+MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
+{
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols());
+ // LU
+ MatrixType res(m_lu.rows(),m_lu.cols());
+ // FIXME the .toDenseMatrix() should not be needed...
+ res = m_lu.leftCols(smalldim)
+ .template triangularView<UnitLower>().toDenseMatrix()
+ * m_lu.topRows(smalldim)
+ .template triangularView<Upper>().toDenseMatrix();
+
+ // P^{-1}(LU)
+ res = m_p.inverse() * res;
+
+ // (P^{-1}LU)Q^{-1}
+ res = res * m_q.inverse();
+
+ return res;
+}
+
+/********* Implementation of kernel() **************************************************/
+
+namespace internal {
+template<typename _MatrixType>
+struct kernel_retval<FullPivLU<_MatrixType> >
+ : kernel_retval_base<FullPivLU<_MatrixType> >
+{
+ EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<_MatrixType>)
+
+ enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(
+ MatrixType::MaxColsAtCompileTime,
+ MatrixType::MaxRowsAtCompileTime)
+ };
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ const Index cols = dec().matrixLU().cols(), dimker = cols - rank();
+ if(dimker == 0)
+ {
+ // The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's
+ // avoid crashing/asserting as that depends on floating point calculations. Let's
+ // just return a single column vector filled with zeros.
+ dst.setZero();
+ return;
+ }
+
+ /* Let us use the following lemma:
+ *
+ * Lemma: If the matrix A has the LU decomposition PAQ = LU,
+ * then Ker A = Q(Ker U).
+ *
+ * Proof: trivial: just keep in mind that P, Q, L are invertible.
+ */
+
+ /* Thus, all we need to do is to compute Ker U, and then apply Q.
+ *
+ * U is upper triangular, with eigenvalues sorted so that any zeros appear at the end.
+ * Thus, the diagonal of U ends with exactly
+ * dimKer zero's. Let us use that to construct dimKer linearly
+ * independent vectors in Ker U.
+ */
+
+ Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
+ RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
+ Index p = 0;
+ for(Index i = 0; i < dec().nonzeroPivots(); ++i)
+ if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
+ pivots.coeffRef(p++) = i;
+ eigen_internal_assert(p == rank());
+
+ // we construct a temporaty trapezoid matrix m, by taking the U matrix and
+ // permuting the rows and cols to bring the nonnegligible pivots to the top of
+ // the main diagonal. We need that to be able to apply our triangular solvers.
+ // FIXME when we get triangularView-for-rectangular-matrices, this can be simplified
+ Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, MatrixType::Options,
+ MaxSmallDimAtCompileTime, MatrixType::MaxColsAtCompileTime>
+ m(dec().matrixLU().block(0, 0, rank(), cols));
+ for(Index i = 0; i < rank(); ++i)
+ {
+ if(i) m.row(i).head(i).setZero();
+ m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i);
+ }
+ m.block(0, 0, rank(), rank());
+ m.block(0, 0, rank(), rank()).template triangularView<StrictlyLower>().setZero();
+ for(Index i = 0; i < rank(); ++i)
+ m.col(i).swap(m.col(pivots.coeff(i)));
+
+ // ok, we have our trapezoid matrix, we can apply the triangular solver.
+ // notice that the math behind this suggests that we should apply this to the
+ // negative of the RHS, but for performance we just put the negative sign elsewhere, see below.
+ m.topLeftCorner(rank(), rank())
+ .template triangularView<Upper>().solveInPlace(
+ m.topRightCorner(rank(), dimker)
+ );
+
+ // now we must undo the column permutation that we had applied!
+ for(Index i = rank()-1; i >= 0; --i)
+ m.col(i).swap(m.col(pivots.coeff(i)));
+
+ // see the negative sign in the next line, that's what we were talking about above.
+ for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker);
+ for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero();
+ for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1);
+ }
+};
+
+/***** Implementation of image() *****************************************************/
+
+template<typename _MatrixType>
+struct image_retval<FullPivLU<_MatrixType> >
+ : image_retval_base<FullPivLU<_MatrixType> >
+{
+ EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<_MatrixType>)
+
+ enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(
+ MatrixType::MaxColsAtCompileTime,
+ MatrixType::MaxRowsAtCompileTime)
+ };
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ if(rank() == 0)
+ {
+ // The Image is just {0}, so it doesn't have a basis properly speaking, but let's
+ // avoid crashing/asserting as that depends on floating point calculations. Let's
+ // just return a single column vector filled with zeros.
+ dst.setZero();
+ return;
+ }
+
+ Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
+ RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
+ Index p = 0;
+ for(Index i = 0; i < dec().nonzeroPivots(); ++i)
+ if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
+ pivots.coeffRef(p++) = i;
+ eigen_internal_assert(p == rank());
+
+ for(Index i = 0; i < rank(); ++i)
+ dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i)));
+ }
+};
+
+/***** Implementation of solve() *****************************************************/
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<FullPivLU<_MatrixType>, Rhs>
+ : solve_retval_base<FullPivLU<_MatrixType>, Rhs>
+{
+ EIGEN_MAKE_SOLVE_HELPERS(FullPivLU<_MatrixType>,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}.
+ * So we proceed as follows:
+ * Step 1: compute c = P * rhs.
+ * Step 2: replace c by the solution x to Lx = c. Exists because L is invertible.
+ * Step 3: replace c by the solution x to Ux = c. May or may not exist.
+ * Step 4: result = Q * c;
+ */
+
+ const Index rows = dec().rows(), cols = dec().cols(),
+ nonzero_pivots = dec().nonzeroPivots();
+ eigen_assert(rhs().rows() == rows);
+ const Index smalldim = (std::min)(rows, cols);
+
+ if(nonzero_pivots == 0)
+ {
+ dst.setZero();
+ return;
+ }
+
+ typename Rhs::PlainObject c(rhs().rows(), rhs().cols());
+
+ // Step 1
+ c = dec().permutationP() * rhs();
+
+ // Step 2
+ dec().matrixLU()
+ .topLeftCorner(smalldim,smalldim)
+ .template triangularView<UnitLower>()
+ .solveInPlace(c.topRows(smalldim));
+ if(rows>cols)
+ {
+ c.bottomRows(rows-cols)
+ -= dec().matrixLU().bottomRows(rows-cols)
+ * c.topRows(cols);
+ }
+
+ // Step 3
+ dec().matrixLU()
+ .topLeftCorner(nonzero_pivots, nonzero_pivots)
+ .template triangularView<Upper>()
+ .solveInPlace(c.topRows(nonzero_pivots));
+
+ // Step 4
+ for(Index i = 0; i < nonzero_pivots; ++i)
+ dst.row(dec().permutationQ().indices().coeff(i)) = c.row(i);
+ for(Index i = nonzero_pivots; i < dec().matrixLU().cols(); ++i)
+ dst.row(dec().permutationQ().indices().coeff(i)).setZero();
+ }
+};
+
+} // end namespace internal
+
+/******* MatrixBase methods *****************************************************************/
+
+/** \lu_module
+ *
+ * \return the full-pivoting LU decomposition of \c *this.
+ *
+ * \sa class FullPivLU
+ */
+template<typename Derived>
+inline const FullPivLU<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::fullPivLu() const
+{
+ return FullPivLU<PlainObject>(eval());
+}
+
+#endif // EIGEN_LU_H
diff --git a/extern/Eigen3/Eigen/src/LU/Inverse.h b/extern/Eigen3/Eigen/src/LU/Inverse.h
new file mode 100644
index 00000000000..2d3e6d10529
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/LU/Inverse.h
@@ -0,0 +1,407 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_INVERSE_H
+#define EIGEN_INVERSE_H
+
+namespace internal {
+
+/**********************************
+*** General case implementation ***
+**********************************/
+
+template<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>
+struct compute_inverse
+{
+ static inline void run(const MatrixType& matrix, ResultType& result)
+ {
+ result = matrix.partialPivLu().inverse();
+ }
+};
+
+template<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>
+struct compute_inverse_and_det_with_check { /* nothing! general case not supported. */ };
+
+/****************************
+*** Size 1 implementation ***
+****************************/
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse<MatrixType, ResultType, 1>
+{
+ static inline void run(const MatrixType& matrix, ResultType& result)
+ {
+ typedef typename MatrixType::Scalar Scalar;
+ result.coeffRef(0,0) = Scalar(1) / matrix.coeff(0,0);
+ }
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_and_det_with_check<MatrixType, ResultType, 1>
+{
+ static inline void run(
+ const MatrixType& matrix,
+ const typename MatrixType::RealScalar& absDeterminantThreshold,
+ ResultType& result,
+ typename ResultType::Scalar& determinant,
+ bool& invertible
+ )
+ {
+ determinant = matrix.coeff(0,0);
+ invertible = abs(determinant) > absDeterminantThreshold;
+ if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant;
+ }
+};
+
+/****************************
+*** Size 2 implementation ***
+****************************/
+
+template<typename MatrixType, typename ResultType>
+inline void compute_inverse_size2_helper(
+ const MatrixType& matrix, const typename ResultType::Scalar& invdet,
+ ResultType& result)
+{
+ result.coeffRef(0,0) = matrix.coeff(1,1) * invdet;
+ result.coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
+ result.coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
+ result.coeffRef(1,1) = matrix.coeff(0,0) * invdet;
+}
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse<MatrixType, ResultType, 2>
+{
+ static inline void run(const MatrixType& matrix, ResultType& result)
+ {
+ typedef typename ResultType::Scalar Scalar;
+ const Scalar invdet = typename MatrixType::Scalar(1) / matrix.determinant();
+ compute_inverse_size2_helper(matrix, invdet, result);
+ }
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_and_det_with_check<MatrixType, ResultType, 2>
+{
+ static inline void run(
+ const MatrixType& matrix,
+ const typename MatrixType::RealScalar& absDeterminantThreshold,
+ ResultType& inverse,
+ typename ResultType::Scalar& determinant,
+ bool& invertible
+ )
+ {
+ typedef typename ResultType::Scalar Scalar;
+ determinant = matrix.determinant();
+ invertible = abs(determinant) > absDeterminantThreshold;
+ if(!invertible) return;
+ const Scalar invdet = Scalar(1) / determinant;
+ compute_inverse_size2_helper(matrix, invdet, inverse);
+ }
+};
+
+/****************************
+*** Size 3 implementation ***
+****************************/
+
+template<typename MatrixType, int i, int j>
+inline typename MatrixType::Scalar cofactor_3x3(const MatrixType& m)
+{
+ enum {
+ i1 = (i+1) % 3,
+ i2 = (i+2) % 3,
+ j1 = (j+1) % 3,
+ j2 = (j+2) % 3
+ };
+ return m.coeff(i1, j1) * m.coeff(i2, j2)
+ - m.coeff(i1, j2) * m.coeff(i2, j1);
+}
+
+template<typename MatrixType, typename ResultType>
+inline void compute_inverse_size3_helper(
+ const MatrixType& matrix,
+ const typename ResultType::Scalar& invdet,
+ const Matrix<typename ResultType::Scalar,3,1>& cofactors_col0,
+ ResultType& result)
+{
+ result.row(0) = cofactors_col0 * invdet;
+ result.coeffRef(1,0) = cofactor_3x3<MatrixType,0,1>(matrix) * invdet;
+ result.coeffRef(1,1) = cofactor_3x3<MatrixType,1,1>(matrix) * invdet;
+ result.coeffRef(1,2) = cofactor_3x3<MatrixType,2,1>(matrix) * invdet;
+ result.coeffRef(2,0) = cofactor_3x3<MatrixType,0,2>(matrix) * invdet;
+ result.coeffRef(2,1) = cofactor_3x3<MatrixType,1,2>(matrix) * invdet;
+ result.coeffRef(2,2) = cofactor_3x3<MatrixType,2,2>(matrix) * invdet;
+}
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse<MatrixType, ResultType, 3>
+{
+ static inline void run(const MatrixType& matrix, ResultType& result)
+ {
+ typedef typename ResultType::Scalar Scalar;
+ Matrix<typename MatrixType::Scalar,3,1> cofactors_col0;
+ cofactors_col0.coeffRef(0) = cofactor_3x3<MatrixType,0,0>(matrix);
+ cofactors_col0.coeffRef(1) = cofactor_3x3<MatrixType,1,0>(matrix);
+ cofactors_col0.coeffRef(2) = cofactor_3x3<MatrixType,2,0>(matrix);
+ const Scalar det = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();
+ const Scalar invdet = Scalar(1) / det;
+ compute_inverse_size3_helper(matrix, invdet, cofactors_col0, result);
+ }
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_and_det_with_check<MatrixType, ResultType, 3>
+{
+ static inline void run(
+ const MatrixType& matrix,
+ const typename MatrixType::RealScalar& absDeterminantThreshold,
+ ResultType& inverse,
+ typename ResultType::Scalar& determinant,
+ bool& invertible
+ )
+ {
+ typedef typename ResultType::Scalar Scalar;
+ Matrix<Scalar,3,1> cofactors_col0;
+ cofactors_col0.coeffRef(0) = cofactor_3x3<MatrixType,0,0>(matrix);
+ cofactors_col0.coeffRef(1) = cofactor_3x3<MatrixType,1,0>(matrix);
+ cofactors_col0.coeffRef(2) = cofactor_3x3<MatrixType,2,0>(matrix);
+ determinant = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();
+ invertible = abs(determinant) > absDeterminantThreshold;
+ if(!invertible) return;
+ const Scalar invdet = Scalar(1) / determinant;
+ compute_inverse_size3_helper(matrix, invdet, cofactors_col0, inverse);
+ }
+};
+
+/****************************
+*** Size 4 implementation ***
+****************************/
+
+template<typename Derived>
+inline const typename Derived::Scalar general_det3_helper
+(const MatrixBase<Derived>& matrix, int i1, int i2, int i3, int j1, int j2, int j3)
+{
+ return matrix.coeff(i1,j1)
+ * (matrix.coeff(i2,j2) * matrix.coeff(i3,j3) - matrix.coeff(i2,j3) * matrix.coeff(i3,j2));
+}
+
+template<typename MatrixType, int i, int j>
+inline typename MatrixType::Scalar cofactor_4x4(const MatrixType& matrix)
+{
+ enum {
+ i1 = (i+1) % 4,
+ i2 = (i+2) % 4,
+ i3 = (i+3) % 4,
+ j1 = (j+1) % 4,
+ j2 = (j+2) % 4,
+ j3 = (j+3) % 4
+ };
+ return general_det3_helper(matrix, i1, i2, i3, j1, j2, j3)
+ + general_det3_helper(matrix, i2, i3, i1, j1, j2, j3)
+ + general_det3_helper(matrix, i3, i1, i2, j1, j2, j3);
+}
+
+template<int Arch, typename Scalar, typename MatrixType, typename ResultType>
+struct compute_inverse_size4
+{
+ static void run(const MatrixType& matrix, ResultType& result)
+ {
+ result.coeffRef(0,0) = cofactor_4x4<MatrixType,0,0>(matrix);
+ result.coeffRef(1,0) = -cofactor_4x4<MatrixType,0,1>(matrix);
+ result.coeffRef(2,0) = cofactor_4x4<MatrixType,0,2>(matrix);
+ result.coeffRef(3,0) = -cofactor_4x4<MatrixType,0,3>(matrix);
+ result.coeffRef(0,2) = cofactor_4x4<MatrixType,2,0>(matrix);
+ result.coeffRef(1,2) = -cofactor_4x4<MatrixType,2,1>(matrix);
+ result.coeffRef(2,2) = cofactor_4x4<MatrixType,2,2>(matrix);
+ result.coeffRef(3,2) = -cofactor_4x4<MatrixType,2,3>(matrix);
+ result.coeffRef(0,1) = -cofactor_4x4<MatrixType,1,0>(matrix);
+ result.coeffRef(1,1) = cofactor_4x4<MatrixType,1,1>(matrix);
+ result.coeffRef(2,1) = -cofactor_4x4<MatrixType,1,2>(matrix);
+ result.coeffRef(3,1) = cofactor_4x4<MatrixType,1,3>(matrix);
+ result.coeffRef(0,3) = -cofactor_4x4<MatrixType,3,0>(matrix);
+ result.coeffRef(1,3) = cofactor_4x4<MatrixType,3,1>(matrix);
+ result.coeffRef(2,3) = -cofactor_4x4<MatrixType,3,2>(matrix);
+ result.coeffRef(3,3) = cofactor_4x4<MatrixType,3,3>(matrix);
+ result /= (matrix.col(0).cwiseProduct(result.row(0).transpose())).sum();
+ }
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse<MatrixType, ResultType, 4>
+ : compute_inverse_size4<Architecture::Target, typename MatrixType::Scalar,
+ MatrixType, ResultType>
+{
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_and_det_with_check<MatrixType, ResultType, 4>
+{
+ static inline void run(
+ const MatrixType& matrix,
+ const typename MatrixType::RealScalar& absDeterminantThreshold,
+ ResultType& inverse,
+ typename ResultType::Scalar& determinant,
+ bool& invertible
+ )
+ {
+ determinant = matrix.determinant();
+ invertible = abs(determinant) > absDeterminantThreshold;
+ if(invertible) compute_inverse<MatrixType, ResultType>::run(matrix, inverse);
+ }
+};
+
+/*************************
+*** MatrixBase methods ***
+*************************/
+
+template<typename MatrixType>
+struct traits<inverse_impl<MatrixType> >
+{
+ typedef typename MatrixType::PlainObject ReturnType;
+};
+
+template<typename MatrixType>
+struct inverse_impl : public ReturnByValue<inverse_impl<MatrixType> >
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename internal::eval<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
+ const MatrixTypeNested m_matrix;
+
+ inverse_impl(const MatrixType& matrix)
+ : m_matrix(matrix)
+ {}
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ const int Size = EIGEN_PLAIN_ENUM_MIN(MatrixType::ColsAtCompileTime,Dest::ColsAtCompileTime);
+ EIGEN_ONLY_USED_FOR_DEBUG(Size);
+ eigen_assert(( (Size<=1) || (Size>4) || (extract_data(m_matrix)!=extract_data(dst)))
+ && "Aliasing problem detected in inverse(), you need to do inverse().eval() here.");
+
+ compute_inverse<MatrixTypeNestedCleaned, Dest>::run(m_matrix, dst);
+ }
+};
+
+} // end namespace internal
+
+/** \lu_module
+ *
+ * \returns the matrix inverse of this matrix.
+ *
+ * For small fixed sizes up to 4x4, this method uses cofactors.
+ * In the general case, this method uses class PartialPivLU.
+ *
+ * \note This matrix must be invertible, otherwise the result is undefined. If you need an
+ * invertibility check, do the following:
+ * \li for fixed sizes up to 4x4, use computeInverseAndDetWithCheck().
+ * \li for the general case, use class FullPivLU.
+ *
+ * Example: \include MatrixBase_inverse.cpp
+ * Output: \verbinclude MatrixBase_inverse.out
+ *
+ * \sa computeInverseAndDetWithCheck()
+ */
+template<typename Derived>
+inline const internal::inverse_impl<Derived> MatrixBase<Derived>::inverse() const
+{
+ EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
+ eigen_assert(rows() == cols());
+ return internal::inverse_impl<Derived>(derived());
+}
+
+/** \lu_module
+ *
+ * Computation of matrix inverse and determinant, with invertibility check.
+ *
+ * This is only for fixed-size square matrices of size up to 4x4.
+ *
+ * \param inverse Reference to the matrix in which to store the inverse.
+ * \param determinant Reference to the variable in which to store the inverse.
+ * \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
+ * \param absDeterminantThreshold Optional parameter controlling the invertibility check.
+ * The matrix will be declared invertible if the absolute value of its
+ * determinant is greater than this threshold.
+ *
+ * Example: \include MatrixBase_computeInverseAndDetWithCheck.cpp
+ * Output: \verbinclude MatrixBase_computeInverseAndDetWithCheck.out
+ *
+ * \sa inverse(), computeInverseWithCheck()
+ */
+template<typename Derived>
+template<typename ResultType>
+inline void MatrixBase<Derived>::computeInverseAndDetWithCheck(
+ ResultType& inverse,
+ typename ResultType::Scalar& determinant,
+ bool& invertible,
+ const RealScalar& absDeterminantThreshold
+ ) const
+{
+ // i'd love to put some static assertions there, but SFINAE means that they have no effect...
+ eigen_assert(rows() == cols());
+ // for 2x2, it's worth giving a chance to avoid evaluating.
+ // for larger sizes, evaluating has negligible cost and limits code size.
+ typedef typename internal::conditional<
+ RowsAtCompileTime == 2,
+ typename internal::remove_all<typename internal::nested<Derived, 2>::type>::type,
+ PlainObject
+ >::type MatrixType;
+ internal::compute_inverse_and_det_with_check<MatrixType, ResultType>::run
+ (derived(), absDeterminantThreshold, inverse, determinant, invertible);
+}
+
+/** \lu_module
+ *
+ * Computation of matrix inverse, with invertibility check.
+ *
+ * This is only for fixed-size square matrices of size up to 4x4.
+ *
+ * \param inverse Reference to the matrix in which to store the inverse.
+ * \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
+ * \param absDeterminantThreshold Optional parameter controlling the invertibility check.
+ * The matrix will be declared invertible if the absolute value of its
+ * determinant is greater than this threshold.
+ *
+ * Example: \include MatrixBase_computeInverseWithCheck.cpp
+ * Output: \verbinclude MatrixBase_computeInverseWithCheck.out
+ *
+ * \sa inverse(), computeInverseAndDetWithCheck()
+ */
+template<typename Derived>
+template<typename ResultType>
+inline void MatrixBase<Derived>::computeInverseWithCheck(
+ ResultType& inverse,
+ bool& invertible,
+ const RealScalar& absDeterminantThreshold
+ ) const
+{
+ RealScalar determinant;
+ // i'd love to put some static assertions there, but SFINAE means that they have no effect...
+ eigen_assert(rows() == cols());
+ computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold);
+}
+
+#endif // EIGEN_INVERSE_H
diff --git a/extern/Eigen3/Eigen/src/LU/PartialPivLU.h b/extern/Eigen3/Eigen/src/LU/PartialPivLU.h
new file mode 100644
index 00000000000..09394b01f5b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/LU/PartialPivLU.h
@@ -0,0 +1,509 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_PARTIALLU_H
+#define EIGEN_PARTIALLU_H
+
+/** \ingroup LU_Module
+ *
+ * \class PartialPivLU
+ *
+ * \brief LU decomposition of a matrix with partial pivoting, and related features
+ *
+ * \param MatrixType the type of the matrix of which we are computing the LU decomposition
+ *
+ * This class represents a LU decomposition of a \b square \b invertible matrix, with partial pivoting: the matrix A
+ * is decomposed as A = PLU where L is unit-lower-triangular, U is upper-triangular, and P
+ * is a permutation matrix.
+ *
+ * Typically, partial pivoting LU decomposition is only considered numerically stable for square invertible
+ * matrices. Thus LAPACK's dgesv and dgesvx require the matrix to be square and invertible. The present class
+ * does the same. It will assert that the matrix is square, but it won't (actually it can't) check that the
+ * matrix is invertible: it is your task to check that you only use this decomposition on invertible matrices.
+ *
+ * The guaranteed safe alternative, working for all matrices, is the full pivoting LU decomposition, provided
+ * by class FullPivLU.
+ *
+ * This is \b not a rank-revealing LU decomposition. Many features are intentionally absent from this class,
+ * such as rank computation. If you need these features, use class FullPivLU.
+ *
+ * This LU decomposition is suitable to invert invertible matrices. It is what MatrixBase::inverse() uses
+ * in the general case.
+ * On the other hand, it is \b not suitable to determine whether a given matrix is invertible.
+ *
+ * The data of the LU decomposition can be directly accessed through the methods matrixLU(), permutationP().
+ *
+ * \sa MatrixBase::partialPivLu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse(), class FullPivLU
+ */
+template<typename _MatrixType> class PartialPivLU
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename internal::traits<MatrixType>::StorageKind StorageKind;
+ typedef typename MatrixType::Index Index;
+ typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
+ typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
+
+
+ /**
+ * \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via PartialPivLU::compute(const MatrixType&).
+ */
+ PartialPivLU();
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem \a size.
+ * \sa PartialPivLU()
+ */
+ PartialPivLU(Index size);
+
+ /** Constructor.
+ *
+ * \param matrix the matrix of which to compute the LU decomposition.
+ *
+ * \warning The matrix should have full rank (e.g. if it's square, it should be invertible).
+ * If you need to deal with non-full rank, use class FullPivLU instead.
+ */
+ PartialPivLU(const MatrixType& matrix);
+
+ PartialPivLU& compute(const MatrixType& matrix);
+
+ /** \returns the LU decomposition matrix: the upper-triangular part is U, the
+ * unit-lower-triangular part is L (at least for square matrices; in the non-square
+ * case, special care is needed, see the documentation of class FullPivLU).
+ *
+ * \sa matrixL(), matrixU()
+ */
+ inline const MatrixType& matrixLU() const
+ {
+ eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+ return m_lu;
+ }
+
+ /** \returns the permutation matrix P.
+ */
+ inline const PermutationType& permutationP() const
+ {
+ eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+ return m_p;
+ }
+
+ /** This method returns the solution x to the equation Ax=b, where A is the matrix of which
+ * *this is the LU decomposition.
+ *
+ * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix,
+ * the only requirement in order for the equation to make sense is that
+ * b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.
+ *
+ * \returns the solution.
+ *
+ * Example: \include PartialPivLU_solve.cpp
+ * Output: \verbinclude PartialPivLU_solve.out
+ *
+ * Since this PartialPivLU class assumes anyway that the matrix A is invertible, the solution
+ * theoretically exists and is unique regardless of b.
+ *
+ * \sa TriangularView::solve(), inverse(), computeInverse()
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<PartialPivLU, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+ return internal::solve_retval<PartialPivLU, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the inverse of the matrix of which *this is the LU decomposition.
+ *
+ * \warning The matrix being decomposed here is assumed to be invertible. If you need to check for
+ * invertibility, use class FullPivLU instead.
+ *
+ * \sa MatrixBase::inverse(), LU::inverse()
+ */
+ inline const internal::solve_retval<PartialPivLU,typename MatrixType::IdentityReturnType> inverse() const
+ {
+ eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+ return internal::solve_retval<PartialPivLU,typename MatrixType::IdentityReturnType>
+ (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols()));
+ }
+
+ /** \returns the determinant of the matrix of which
+ * *this is the LU decomposition. It has only linear complexity
+ * (that is, O(n) where n is the dimension of the square matrix)
+ * as the LU decomposition has already been computed.
+ *
+ * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers
+ * optimized paths.
+ *
+ * \warning a determinant can be very big or small, so for matrices
+ * of large enough dimension, there is a risk of overflow/underflow.
+ *
+ * \sa MatrixBase::determinant()
+ */
+ typename internal::traits<MatrixType>::Scalar determinant() const;
+
+ MatrixType reconstructedMatrix() const;
+
+ inline Index rows() const { return m_lu.rows(); }
+ inline Index cols() const { return m_lu.cols(); }
+
+ protected:
+ MatrixType m_lu;
+ PermutationType m_p;
+ TranspositionType m_rowsTranspositions;
+ Index m_det_p;
+ bool m_isInitialized;
+};
+
+template<typename MatrixType>
+PartialPivLU<MatrixType>::PartialPivLU()
+ : m_lu(),
+ m_p(),
+ m_rowsTranspositions(),
+ m_det_p(0),
+ m_isInitialized(false)
+{
+}
+
+template<typename MatrixType>
+PartialPivLU<MatrixType>::PartialPivLU(Index size)
+ : m_lu(size, size),
+ m_p(size),
+ m_rowsTranspositions(size),
+ m_det_p(0),
+ m_isInitialized(false)
+{
+}
+
+template<typename MatrixType>
+PartialPivLU<MatrixType>::PartialPivLU(const MatrixType& matrix)
+ : m_lu(matrix.rows(), matrix.rows()),
+ m_p(matrix.rows()),
+ m_rowsTranspositions(matrix.rows()),
+ m_det_p(0),
+ m_isInitialized(false)
+{
+ compute(matrix);
+}
+
+namespace internal {
+
+/** \internal This is the blocked version of fullpivlu_unblocked() */
+template<typename Scalar, int StorageOrder, typename PivIndex>
+struct partial_lu_impl
+{
+ // FIXME add a stride to Map, so that the following mapping becomes easier,
+ // another option would be to create an expression being able to automatically
+ // warp any Map, Matrix, and Block expressions as a unique type, but since that's exactly
+ // a Map + stride, why not adding a stride to Map, and convenient ctors from a Matrix,
+ // and Block.
+ typedef Map<Matrix<Scalar, Dynamic, Dynamic, StorageOrder> > MapLU;
+ typedef Block<MapLU, Dynamic, Dynamic> MatrixType;
+ typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ /** \internal performs the LU decomposition in-place of the matrix \a lu
+ * using an unblocked algorithm.
+ *
+ * In addition, this function returns the row transpositions in the
+ * vector \a row_transpositions which must have a size equal to the number
+ * of columns of the matrix \a lu, and an integer \a nb_transpositions
+ * which returns the actual number of transpositions.
+ *
+ * \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
+ */
+ static Index unblocked_lu(MatrixType& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions)
+ {
+ const Index rows = lu.rows();
+ const Index cols = lu.cols();
+ const Index size = (std::min)(rows,cols);
+ nb_transpositions = 0;
+ int first_zero_pivot = -1;
+ for(Index k = 0; k < size; ++k)
+ {
+ Index rrows = rows-k-1;
+ Index rcols = cols-k-1;
+
+ Index row_of_biggest_in_col;
+ RealScalar biggest_in_corner
+ = lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col);
+ row_of_biggest_in_col += k;
+
+ row_transpositions[k] = row_of_biggest_in_col;
+
+ if(biggest_in_corner != RealScalar(0))
+ {
+ if(k != row_of_biggest_in_col)
+ {
+ lu.row(k).swap(lu.row(row_of_biggest_in_col));
+ ++nb_transpositions;
+ }
+
+ // FIXME shall we introduce a safe quotient expression in cas 1/lu.coeff(k,k)
+ // overflow but not the actual quotient?
+ lu.col(k).tail(rrows) /= lu.coeff(k,k);
+ }
+ else if(first_zero_pivot==-1)
+ {
+ // the pivot is exactly zero, we record the index of the first pivot which is exactly 0,
+ // and continue the factorization such we still have A = PLU
+ first_zero_pivot = k;
+ }
+
+ if(k<rows-1)
+ lu.bottomRightCorner(rrows,rcols).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rcols);
+ }
+ return first_zero_pivot;
+ }
+
+ /** \internal performs the LU decomposition in-place of the matrix represented
+ * by the variables \a rows, \a cols, \a lu_data, and \a lu_stride using a
+ * recursive, blocked algorithm.
+ *
+ * In addition, this function returns the row transpositions in the
+ * vector \a row_transpositions which must have a size equal to the number
+ * of columns of the matrix \a lu, and an integer \a nb_transpositions
+ * which returns the actual number of transpositions.
+ *
+ * \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
+ *
+ * \note This very low level interface using pointers, etc. is to:
+ * 1 - reduce the number of instanciations to the strict minimum
+ * 2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > >
+ */
+ static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256)
+ {
+ MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
+ MatrixType lu(lu1,0,0,rows,cols);
+
+ const Index size = (std::min)(rows,cols);
+
+ // if the matrix is too small, no blocking:
+ if(size<=16)
+ {
+ return unblocked_lu(lu, row_transpositions, nb_transpositions);
+ }
+
+ // automatically adjust the number of subdivisions to the size
+ // of the matrix so that there is enough sub blocks:
+ Index blockSize;
+ {
+ blockSize = size/8;
+ blockSize = (blockSize/16)*16;
+ blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize);
+ }
+
+ nb_transpositions = 0;
+ int first_zero_pivot = -1;
+ for(Index k = 0; k < size; k+=blockSize)
+ {
+ Index bs = (std::min)(size-k,blockSize); // actual size of the block
+ Index trows = rows - k - bs; // trailing rows
+ Index tsize = size - k - bs; // trailing size
+
+ // partition the matrix:
+ // A00 | A01 | A02
+ // lu = A_0 | A_1 | A_2 = A10 | A11 | A12
+ // A20 | A21 | A22
+ BlockType A_0(lu,0,0,rows,k);
+ BlockType A_2(lu,0,k+bs,rows,tsize);
+ BlockType A11(lu,k,k,bs,bs);
+ BlockType A12(lu,k,k+bs,bs,tsize);
+ BlockType A21(lu,k+bs,k,trows,bs);
+ BlockType A22(lu,k+bs,k+bs,trows,tsize);
+
+ PivIndex nb_transpositions_in_panel;
+ // recursively call the blocked LU algorithm on [A11^T A21^T]^T
+ // with a very small blocking size:
+ Index ret = blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride,
+ row_transpositions+k, nb_transpositions_in_panel, 16);
+ if(ret>=0 && first_zero_pivot==-1)
+ first_zero_pivot = k+ret;
+
+ nb_transpositions += nb_transpositions_in_panel;
+ // update permutations and apply them to A_0
+ for(Index i=k; i<k+bs; ++i)
+ {
+ Index piv = (row_transpositions[i] += k);
+ A_0.row(i).swap(A_0.row(piv));
+ }
+
+ if(trows)
+ {
+ // apply permutations to A_2
+ for(Index i=k;i<k+bs; ++i)
+ A_2.row(i).swap(A_2.row(row_transpositions[i]));
+
+ // A12 = A11^-1 A12
+ A11.template triangularView<UnitLower>().solveInPlace(A12);
+
+ A22.noalias() -= A21 * A12;
+ }
+ }
+ return first_zero_pivot;
+ }
+};
+
+/** \internal performs the LU decomposition with partial pivoting in-place.
+ */
+template<typename MatrixType, typename TranspositionType>
+void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::Index& nb_transpositions)
+{
+ eigen_assert(lu.cols() == row_transpositions.size());
+ eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
+
+ partial_lu_impl
+ <typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::Index>
+ ::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
+}
+
+} // end namespace internal
+
+template<typename MatrixType>
+PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
+{
+ m_lu = matrix;
+
+ eigen_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
+ const Index size = matrix.rows();
+
+ m_rowsTranspositions.resize(size);
+
+ typename TranspositionType::Index nb_transpositions;
+ internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions);
+ m_det_p = (nb_transpositions%2) ? -1 : 1;
+
+ m_p = m_rowsTranspositions;
+
+ m_isInitialized = true;
+ return *this;
+}
+
+template<typename MatrixType>
+typename internal::traits<MatrixType>::Scalar PartialPivLU<MatrixType>::determinant() const
+{
+ eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+ return Scalar(m_det_p) * m_lu.diagonal().prod();
+}
+
+/** \returns the matrix represented by the decomposition,
+ * i.e., it returns the product: P^{-1} L U.
+ * This function is provided for debug purpose. */
+template<typename MatrixType>
+MatrixType PartialPivLU<MatrixType>::reconstructedMatrix() const
+{
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ // LU
+ MatrixType res = m_lu.template triangularView<UnitLower>().toDenseMatrix()
+ * m_lu.template triangularView<Upper>();
+
+ // P^{-1}(LU)
+ res = m_p.inverse() * res;
+
+ return res;
+}
+
+/***** Implementation of solve() *****************************************************/
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<PartialPivLU<_MatrixType>, Rhs>
+ : solve_retval_base<PartialPivLU<_MatrixType>, Rhs>
+{
+ EIGEN_MAKE_SOLVE_HELPERS(PartialPivLU<_MatrixType>,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ /* The decomposition PA = LU can be rewritten as A = P^{-1} L U.
+ * So we proceed as follows:
+ * Step 1: compute c = Pb.
+ * Step 2: replace c by the solution x to Lx = c.
+ * Step 3: replace c by the solution x to Ux = c.
+ */
+
+ eigen_assert(rhs().rows() == dec().matrixLU().rows());
+
+ // Step 1
+ dst = dec().permutationP() * rhs();
+
+ // Step 2
+ dec().matrixLU().template triangularView<UnitLower>().solveInPlace(dst);
+
+ // Step 3
+ dec().matrixLU().template triangularView<Upper>().solveInPlace(dst);
+ }
+};
+
+} // end namespace internal
+
+/******** MatrixBase methods *******/
+
+/** \lu_module
+ *
+ * \return the partial-pivoting LU decomposition of \c *this.
+ *
+ * \sa class PartialPivLU
+ */
+template<typename Derived>
+inline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::partialPivLu() const
+{
+ return PartialPivLU<PlainObject>(eval());
+}
+
+#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+/** \lu_module
+ *
+ * Synonym of partialPivLu().
+ *
+ * \return the partial-pivoting LU decomposition of \c *this.
+ *
+ * \sa class PartialPivLU
+ */
+template<typename Derived>
+inline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::lu() const
+{
+ return PartialPivLU<PlainObject>(eval());
+}
+#endif
+
+#endif // EIGEN_PARTIALLU_H
diff --git a/extern/Eigen3/Eigen/src/LU/arch/Inverse_SSE.h b/extern/Eigen3/Eigen/src/LU/arch/Inverse_SSE.h
new file mode 100644
index 00000000000..176c349ce44
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/LU/arch/Inverse_SSE.h
@@ -0,0 +1,340 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2001 Intel Corporation
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+// The SSE code for the 4x4 float and double matrix inverse in this file
+// comes from the following Intel's library:
+// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/
+//
+// Here is the respective copyright and license statement:
+//
+// Copyright (c) 2001 Intel Corporation.
+//
+// Permition is granted to use, copy, distribute and prepare derivative works
+// of this library for any purpose and without fee, provided, that the above
+// copyright notice and this statement appear in all copies.
+// Intel makes no representations about the suitability of this software for
+// any purpose, and specifically disclaims all warranties.
+// See LEGAL.TXT for all the legal information.
+
+#ifndef EIGEN_INVERSE_SSE_H
+#define EIGEN_INVERSE_SSE_H
+
+namespace internal {
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
+{
+ enum {
+ MatrixAlignment = bool(MatrixType::Flags&AlignedBit),
+ ResultAlignment = bool(ResultType::Flags&AlignedBit),
+ StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
+ };
+
+ static void run(const MatrixType& matrix, ResultType& result)
+ {
+ EIGEN_ALIGN16 const int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };
+
+ // Load the full matrix into registers
+ __m128 _L1 = matrix.template packet<MatrixAlignment>( 0);
+ __m128 _L2 = matrix.template packet<MatrixAlignment>( 4);
+ __m128 _L3 = matrix.template packet<MatrixAlignment>( 8);
+ __m128 _L4 = matrix.template packet<MatrixAlignment>(12);
+
+ // The inverse is calculated using "Divide and Conquer" technique. The
+ // original matrix is divide into four 2x2 sub-matrices. Since each
+ // register holds four matrix element, the smaller matrices are
+ // represented as a registers. Hence we get a better locality of the
+ // calculations.
+
+ __m128 A, B, C, D; // the four sub-matrices
+ if(!StorageOrdersMatch)
+ {
+ A = _mm_unpacklo_ps(_L1, _L2);
+ B = _mm_unpacklo_ps(_L3, _L4);
+ C = _mm_unpackhi_ps(_L1, _L2);
+ D = _mm_unpackhi_ps(_L3, _L4);
+ }
+ else
+ {
+ A = _mm_movelh_ps(_L1, _L2);
+ B = _mm_movehl_ps(_L2, _L1);
+ C = _mm_movelh_ps(_L3, _L4);
+ D = _mm_movehl_ps(_L4, _L3);
+ }
+
+ __m128 iA, iB, iC, iD, // partial inverse of the sub-matrices
+ DC, AB;
+ __m128 dA, dB, dC, dD; // determinant of the sub-matrices
+ __m128 det, d, d1, d2;
+ __m128 rd; // reciprocal of the determinant
+
+ // AB = A# * B
+ AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B);
+ AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E)));
+ // DC = D# * C
+ DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C);
+ DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E)));
+
+ // dA = |A|
+ dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A);
+ dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA));
+ // dB = |B|
+ dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B);
+ dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB));
+
+ // dC = |C|
+ dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C);
+ dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC));
+ // dD = |D|
+ dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D);
+ dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD));
+
+ // d = trace(AB*DC) = trace(A#*B*D#*C)
+ d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB);
+
+ // iD = C*A#*B
+ iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB));
+ iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB)));
+ // iA = B*D#*C
+ iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC));
+ iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC)));
+
+ // d = trace(AB*DC) = trace(A#*B*D#*C) [continue]
+ d = _mm_add_ps(d, _mm_movehl_ps(d, d));
+ d = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1));
+ d1 = _mm_mul_ss(dA,dD);
+ d2 = _mm_mul_ss(dB,dC);
+
+ // iD = D*|A| - C*A#*B
+ iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD);
+
+ // iA = A*|D| - B*D#*C;
+ iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA);
+
+ // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
+ det = _mm_sub_ss(_mm_add_ss(d1,d2),d);
+ rd = _mm_div_ss(_mm_set_ss(1.0f), det);
+
+// #ifdef ZERO_SINGULAR
+// rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd);
+// #endif
+
+ // iB = D * (A#B)# = D*B#*A
+ iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33));
+ iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66)));
+ // iC = A * (D#C)# = A*C#*D
+ iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33));
+ iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));
+
+ rd = _mm_shuffle_ps(rd,rd,0);
+ rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP));
+
+ // iB = C*|B| - D*B#*A
+ iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);
+
+ // iC = B*|C| - A*C#*D;
+ iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC);
+
+ // iX = iX / det
+ iA = _mm_mul_ps(rd,iA);
+ iB = _mm_mul_ps(rd,iB);
+ iC = _mm_mul_ps(rd,iC);
+ iD = _mm_mul_ps(rd,iD);
+
+ result.template writePacket<ResultAlignment>( 0, _mm_shuffle_ps(iA,iB,0x77));
+ result.template writePacket<ResultAlignment>( 4, _mm_shuffle_ps(iA,iB,0x22));
+ result.template writePacket<ResultAlignment>( 8, _mm_shuffle_ps(iC,iD,0x77));
+ result.template writePacket<ResultAlignment>(12, _mm_shuffle_ps(iC,iD,0x22));
+ }
+
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
+{
+ enum {
+ MatrixAlignment = bool(MatrixType::Flags&AlignedBit),
+ ResultAlignment = bool(ResultType::Flags&AlignedBit),
+ StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
+ };
+ static void run(const MatrixType& matrix, ResultType& result)
+ {
+ const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
+ const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
+
+ // The inverse is calculated using "Divide and Conquer" technique. The
+ // original matrix is divide into four 2x2 sub-matrices. Since each
+ // register of the matrix holds two element, the smaller matrices are
+ // consisted of two registers. Hence we get a better locality of the
+ // calculations.
+
+ // the four sub-matrices
+ __m128d A1, A2, B1, B2, C1, C2, D1, D2;
+
+ if(StorageOrdersMatch)
+ {
+ A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2);
+ A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6);
+ C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
+ C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
+ }
+ else
+ {
+ __m128d tmp;
+ A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2);
+ A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6);
+ tmp = A1;
+ A1 = _mm_unpacklo_pd(A1,A2);
+ A2 = _mm_unpackhi_pd(tmp,A2);
+ tmp = C1;
+ C1 = _mm_unpacklo_pd(C1,C2);
+ C2 = _mm_unpackhi_pd(tmp,C2);
+
+ B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
+ B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
+ tmp = B1;
+ B1 = _mm_unpacklo_pd(B1,B2);
+ B2 = _mm_unpackhi_pd(tmp,B2);
+ tmp = D1;
+ D1 = _mm_unpacklo_pd(D1,D2);
+ D2 = _mm_unpackhi_pd(tmp,D2);
+ }
+
+ __m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2, // partial invese of the sub-matrices
+ DC1, DC2, AB1, AB2;
+ __m128d dA, dB, dC, dD; // determinant of the sub-matrices
+ __m128d det, d1, d2, rd;
+
+ // dA = |A|
+ dA = _mm_shuffle_pd(A2, A2, 1);
+ dA = _mm_mul_pd(A1, dA);
+ dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3));
+ // dB = |B|
+ dB = _mm_shuffle_pd(B2, B2, 1);
+ dB = _mm_mul_pd(B1, dB);
+ dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3));
+
+ // AB = A# * B
+ AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3));
+ AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0));
+ AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3)));
+ AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0)));
+
+ // dC = |C|
+ dC = _mm_shuffle_pd(C2, C2, 1);
+ dC = _mm_mul_pd(C1, dC);
+ dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3));
+ // dD = |D|
+ dD = _mm_shuffle_pd(D2, D2, 1);
+ dD = _mm_mul_pd(D1, dD);
+ dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3));
+
+ // DC = D# * C
+ DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3));
+ DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0));
+ DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3)));
+ DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0)));
+
+ // rd = trace(AB*DC) = trace(A#*B*D#*C)
+ d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0));
+ d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3));
+ rd = _mm_add_pd(d1, d2);
+ rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3));
+
+ // iD = C*A#*B
+ iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0));
+ iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0));
+ iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3)));
+ iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3)));
+
+ // iA = B*D#*C
+ iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0));
+ iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0));
+ iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3)));
+ iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3)));
+
+ // iD = D*|A| - C*A#*B
+ dA = _mm_shuffle_pd(dA,dA,0);
+ iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1);
+ iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2);
+
+ // iA = A*|D| - B*D#*C;
+ dD = _mm_shuffle_pd(dD,dD,0);
+ iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1);
+ iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2);
+
+ d1 = _mm_mul_sd(dA, dD);
+ d2 = _mm_mul_sd(dB, dC);
+
+ // iB = D * (A#B)# = D*B#*A
+ iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1));
+ iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1));
+ iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2)));
+ iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2)));
+
+ // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
+ det = _mm_add_sd(d1, d2);
+ det = _mm_sub_sd(det, rd);
+
+ // iC = A * (D#C)# = A*C#*D
+ iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1));
+ iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1));
+ iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2)));
+ iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2)));
+
+ rd = _mm_div_sd(_mm_set_sd(1.0), det);
+// #ifdef ZERO_SINGULAR
+// rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd);
+// #endif
+ rd = _mm_shuffle_pd(rd,rd,0);
+
+ // iB = C*|B| - D*B#*A
+ dB = _mm_shuffle_pd(dB,dB,0);
+ iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
+ iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
+
+ d1 = _mm_xor_pd(rd, _Sign_PN);
+ d2 = _mm_xor_pd(rd, _Sign_NP);
+
+ // iC = B*|C| - A*C#*D;
+ dC = _mm_shuffle_pd(dC,dC,0);
+ iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);
+ iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);
+
+ result.template writePacket<ResultAlignment>( 0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1)); // iA# / det
+ result.template writePacket<ResultAlignment>( 4, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
+ result.template writePacket<ResultAlignment>( 2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1)); // iB# / det
+ result.template writePacket<ResultAlignment>( 6, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
+ result.template writePacket<ResultAlignment>( 8, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1)); // iC# / det
+ result.template writePacket<ResultAlignment>(12, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
+ result.template writePacket<ResultAlignment>(10, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1)); // iD# / det
+ result.template writePacket<ResultAlignment>(14, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
+ }
+};
+
+}
+
+#endif // EIGEN_INVERSE_SSE_H
diff --git a/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h b/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h
new file mode 100644
index 00000000000..f04c6038d6a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h
@@ -0,0 +1,532 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_H
+#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_H
+
+/** \ingroup QR_Module
+ *
+ * \class ColPivHouseholderQR
+ *
+ * \brief Householder rank-revealing QR decomposition of a matrix with column-pivoting
+ *
+ * \param MatrixType the type of the matrix of which we are computing the QR decomposition
+ *
+ * This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b Q and \b R
+ * such that
+ * \f[
+ * \mathbf{A} \, \mathbf{P} = \mathbf{Q} \, \mathbf{R}
+ * \f]
+ * by using Householder transformations. Here, \b P is a permutation matrix, \b Q a unitary matrix and \b R an
+ * upper triangular matrix.
+ *
+ * This decomposition performs column pivoting in order to be rank-revealing and improve
+ * numerical stability. It is slower than HouseholderQR, and faster than FullPivHouseholderQR.
+ *
+ * \sa MatrixBase::colPivHouseholderQr()
+ */
+template<typename _MatrixType> class ColPivHouseholderQR
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
+ typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
+ typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
+ typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
+ typedef typename internal::plain_row_type<MatrixType, RealScalar>::type RealRowVectorType;
+ typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
+
+ /**
+ * \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via ColPivHouseholderQR::compute(const MatrixType&).
+ */
+ ColPivHouseholderQR()
+ : m_qr(),
+ m_hCoeffs(),
+ m_colsPermutation(),
+ m_colsTranspositions(),
+ m_temp(),
+ m_colSqNorms(),
+ m_isInitialized(false) {}
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem \a size.
+ * \sa ColPivHouseholderQR()
+ */
+ ColPivHouseholderQR(Index rows, Index cols)
+ : m_qr(rows, cols),
+ m_hCoeffs((std::min)(rows,cols)),
+ m_colsPermutation(cols),
+ m_colsTranspositions(cols),
+ m_temp(cols),
+ m_colSqNorms(cols),
+ m_isInitialized(false),
+ m_usePrescribedThreshold(false) {}
+
+ ColPivHouseholderQR(const MatrixType& matrix)
+ : m_qr(matrix.rows(), matrix.cols()),
+ m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
+ m_colsPermutation(matrix.cols()),
+ m_colsTranspositions(matrix.cols()),
+ m_temp(matrix.cols()),
+ m_colSqNorms(matrix.cols()),
+ m_isInitialized(false),
+ m_usePrescribedThreshold(false)
+ {
+ compute(matrix);
+ }
+
+ /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
+ * *this is the QR decomposition, if any exists.
+ *
+ * \param b the right-hand-side of the equation to solve.
+ *
+ * \returns a solution.
+ *
+ * \note The case where b is a matrix is not yet implemented. Also, this
+ * code is space inefficient.
+ *
+ * \note_about_checking_solutions
+ *
+ * \note_about_arbitrary_choice_of_solution
+ *
+ * Example: \include ColPivHouseholderQR_solve.cpp
+ * Output: \verbinclude ColPivHouseholderQR_solve.out
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<ColPivHouseholderQR, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return internal::solve_retval<ColPivHouseholderQR, Rhs>(*this, b.derived());
+ }
+
+ HouseholderSequenceType householderQ(void) const;
+
+ /** \returns a reference to the matrix where the Householder QR decomposition is stored
+ */
+ const MatrixType& matrixQR() const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return m_qr;
+ }
+
+ ColPivHouseholderQR& compute(const MatrixType& matrix);
+
+ const PermutationType& colsPermutation() const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return m_colsPermutation;
+ }
+
+ /** \returns the absolute value of the determinant of the matrix of which
+ * *this is the QR decomposition. It has only linear complexity
+ * (that is, O(n) where n is the dimension of the square matrix)
+ * as the QR decomposition has already been computed.
+ *
+ * \note This is only for square matrices.
+ *
+ * \warning a determinant can be very big or small, so for matrices
+ * of large enough dimension, there is a risk of overflow/underflow.
+ * One way to work around that is to use logAbsDeterminant() instead.
+ *
+ * \sa logAbsDeterminant(), MatrixBase::determinant()
+ */
+ typename MatrixType::RealScalar absDeterminant() const;
+
+ /** \returns the natural log of the absolute value of the determinant of the matrix of which
+ * *this is the QR decomposition. It has only linear complexity
+ * (that is, O(n) where n is the dimension of the square matrix)
+ * as the QR decomposition has already been computed.
+ *
+ * \note This is only for square matrices.
+ *
+ * \note This method is useful to work around the risk of overflow/underflow that's inherent
+ * to determinant computation.
+ *
+ * \sa absDeterminant(), MatrixBase::determinant()
+ */
+ typename MatrixType::RealScalar logAbsDeterminant() const;
+
+ /** \returns the rank of the matrix of which *this is the QR decomposition.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline Index rank() const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold();
+ Index result = 0;
+ for(Index i = 0; i < m_nonzero_pivots; ++i)
+ result += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold);
+ return result;
+ }
+
+ /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline Index dimensionOfKernel() const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return cols() - rank();
+ }
+
+ /** \returns true if the matrix of which *this is the QR decomposition represents an injective
+ * linear map, i.e. has trivial kernel; false otherwise.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline bool isInjective() const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return rank() == cols();
+ }
+
+ /** \returns true if the matrix of which *this is the QR decomposition represents a surjective
+ * linear map; false otherwise.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline bool isSurjective() const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return rank() == rows();
+ }
+
+ /** \returns true if the matrix of which *this is the QR decomposition is invertible.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline bool isInvertible() const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return isInjective() && isSurjective();
+ }
+
+ /** \returns the inverse of the matrix of which *this is the QR decomposition.
+ *
+ * \note If this matrix is not invertible, the returned matrix has undefined coefficients.
+ * Use isInvertible() to first determine whether this matrix is invertible.
+ */
+ inline const
+ internal::solve_retval<ColPivHouseholderQR, typename MatrixType::IdentityReturnType>
+ inverse() const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return internal::solve_retval<ColPivHouseholderQR,typename MatrixType::IdentityReturnType>
+ (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols()));
+ }
+
+ inline Index rows() const { return m_qr.rows(); }
+ inline Index cols() const { return m_qr.cols(); }
+ const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
+
+ /** Allows to prescribe a threshold to be used by certain methods, such as rank(),
+ * who need to determine when pivots are to be considered nonzero. This is not used for the
+ * QR decomposition itself.
+ *
+ * When it needs to get the threshold value, Eigen calls threshold(). By default, this
+ * uses a formula to automatically determine a reasonable threshold.
+ * Once you have called the present method setThreshold(const RealScalar&),
+ * your value is used instead.
+ *
+ * \param threshold The new value to use as the threshold.
+ *
+ * A pivot will be considered nonzero if its absolute value is strictly greater than
+ * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
+ * where maxpivot is the biggest pivot.
+ *
+ * If you want to come back to the default behavior, call setThreshold(Default_t)
+ */
+ ColPivHouseholderQR& setThreshold(const RealScalar& threshold)
+ {
+ m_usePrescribedThreshold = true;
+ m_prescribedThreshold = threshold;
+ return *this;
+ }
+
+ /** Allows to come back to the default behavior, letting Eigen use its default formula for
+ * determining the threshold.
+ *
+ * You should pass the special object Eigen::Default as parameter here.
+ * \code qr.setThreshold(Eigen::Default); \endcode
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ ColPivHouseholderQR& setThreshold(Default_t)
+ {
+ m_usePrescribedThreshold = false;
+ return *this;
+ }
+
+ /** Returns the threshold that will be used by certain methods such as rank().
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ RealScalar threshold() const
+ {
+ eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+ return m_usePrescribedThreshold ? m_prescribedThreshold
+ // this formula comes from experimenting (see "LU precision tuning" thread on the list)
+ // and turns out to be identical to Higham's formula used already in LDLt.
+ : NumTraits<Scalar>::epsilon() * m_qr.diagonalSize();
+ }
+
+ /** \returns the number of nonzero pivots in the QR decomposition.
+ * Here nonzero is meant in the exact sense, not in a fuzzy sense.
+ * So that notion isn't really intrinsically interesting, but it is
+ * still useful when implementing algorithms.
+ *
+ * \sa rank()
+ */
+ inline Index nonzeroPivots() const
+ {
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return m_nonzero_pivots;
+ }
+
+ /** \returns the absolute value of the biggest pivot, i.e. the biggest
+ * diagonal coefficient of R.
+ */
+ RealScalar maxPivot() const { return m_maxpivot; }
+
+ protected:
+ MatrixType m_qr;
+ HCoeffsType m_hCoeffs;
+ PermutationType m_colsPermutation;
+ IntRowVectorType m_colsTranspositions;
+ RowVectorType m_temp;
+ RealRowVectorType m_colSqNorms;
+ bool m_isInitialized, m_usePrescribedThreshold;
+ RealScalar m_prescribedThreshold, m_maxpivot;
+ Index m_nonzero_pivots;
+ Index m_det_pq;
+};
+
+template<typename MatrixType>
+typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::absDeterminant() const
+{
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+ return internal::abs(m_qr.diagonal().prod());
+}
+
+template<typename MatrixType>
+typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDeterminant() const
+{
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+ return m_qr.diagonal().cwiseAbs().array().log().sum();
+}
+
+template<typename MatrixType>
+ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
+{
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+ Index size = matrix.diagonalSize();
+
+ m_qr = matrix;
+ m_hCoeffs.resize(size);
+
+ m_temp.resize(cols);
+
+ m_colsTranspositions.resize(matrix.cols());
+ Index number_of_transpositions = 0;
+
+ m_colSqNorms.resize(cols);
+ for(Index k = 0; k < cols; ++k)
+ m_colSqNorms.coeffRef(k) = m_qr.col(k).squaredNorm();
+
+ RealScalar threshold_helper = m_colSqNorms.maxCoeff() * internal::abs2(NumTraits<Scalar>::epsilon()) / RealScalar(rows);
+
+ m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
+ m_maxpivot = RealScalar(0);
+
+ for(Index k = 0; k < size; ++k)
+ {
+ // first, we look up in our table m_colSqNorms which column has the biggest squared norm
+ Index biggest_col_index;
+ RealScalar biggest_col_sq_norm = m_colSqNorms.tail(cols-k).maxCoeff(&biggest_col_index);
+ biggest_col_index += k;
+
+ // since our table m_colSqNorms accumulates imprecision at every step, we must now recompute
+ // the actual squared norm of the selected column.
+ // Note that not doing so does result in solve() sometimes returning inf/nan values
+ // when running the unit test with 1000 repetitions.
+ biggest_col_sq_norm = m_qr.col(biggest_col_index).tail(rows-k).squaredNorm();
+
+ // we store that back into our table: it can't hurt to correct our table.
+ m_colSqNorms.coeffRef(biggest_col_index) = biggest_col_sq_norm;
+
+ // if the current biggest column is smaller than epsilon times the initial biggest column,
+ // terminate to avoid generating nan/inf values.
+ // Note that here, if we test instead for "biggest == 0", we get a failure every 1000 (or so)
+ // repetitions of the unit test, with the result of solve() filled with large values of the order
+ // of 1/(size*epsilon).
+ if(biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
+ {
+ m_nonzero_pivots = k;
+ m_hCoeffs.tail(size-k).setZero();
+ m_qr.bottomRightCorner(rows-k,cols-k)
+ .template triangularView<StrictlyLower>()
+ .setZero();
+ break;
+ }
+
+ // apply the transposition to the columns
+ m_colsTranspositions.coeffRef(k) = biggest_col_index;
+ if(k != biggest_col_index) {
+ m_qr.col(k).swap(m_qr.col(biggest_col_index));
+ std::swap(m_colSqNorms.coeffRef(k), m_colSqNorms.coeffRef(biggest_col_index));
+ ++number_of_transpositions;
+ }
+
+ // generate the householder vector, store it below the diagonal
+ RealScalar beta;
+ m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
+
+ // apply the householder transformation to the diagonal coefficient
+ m_qr.coeffRef(k,k) = beta;
+
+ // remember the maximum absolute value of diagonal coefficients
+ if(internal::abs(beta) > m_maxpivot) m_maxpivot = internal::abs(beta);
+
+ // apply the householder transformation
+ m_qr.bottomRightCorner(rows-k, cols-k-1)
+ .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1));
+
+ // update our table of squared norms of the columns
+ m_colSqNorms.tail(cols-k-1) -= m_qr.row(k).tail(cols-k-1).cwiseAbs2();
+ }
+
+ m_colsPermutation.setIdentity(cols);
+ for(Index k = 0; k < m_nonzero_pivots; ++k)
+ m_colsPermutation.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));
+
+ m_det_pq = (number_of_transpositions%2) ? -1 : 1;
+ m_isInitialized = true;
+
+ return *this;
+}
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
+ : solve_retval_base<ColPivHouseholderQR<_MatrixType>, Rhs>
+{
+ EIGEN_MAKE_SOLVE_HELPERS(ColPivHouseholderQR<_MatrixType>,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ eigen_assert(rhs().rows() == dec().rows());
+
+ const int cols = dec().cols(),
+ nonzero_pivots = dec().nonzeroPivots();
+
+ if(nonzero_pivots == 0)
+ {
+ dst.setZero();
+ return;
+ }
+
+ typename Rhs::PlainObject c(rhs());
+
+ // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
+ c.applyOnTheLeft(householderSequence(dec().matrixQR(), dec().hCoeffs())
+ .setLength(dec().nonzeroPivots())
+ .transpose()
+ );
+
+ dec().matrixQR()
+ .topLeftCorner(nonzero_pivots, nonzero_pivots)
+ .template triangularView<Upper>()
+ .solveInPlace(c.topRows(nonzero_pivots));
+
+
+ typename Rhs::PlainObject d(c);
+ d.topRows(nonzero_pivots)
+ = dec().matrixQR()
+ .topLeftCorner(nonzero_pivots, nonzero_pivots)
+ .template triangularView<Upper>()
+ * c.topRows(nonzero_pivots);
+
+ for(Index i = 0; i < nonzero_pivots; ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
+ for(Index i = nonzero_pivots; i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
+ }
+};
+
+} // end namespace internal
+
+/** \returns the matrix Q as a sequence of householder transformations */
+template<typename MatrixType>
+typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType>
+ ::householderQ() const
+{
+ eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+ return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()).setLength(m_nonzero_pivots);
+}
+
+/** \return the column-pivoting Householder QR decomposition of \c *this.
+ *
+ * \sa class ColPivHouseholderQR
+ */
+template<typename Derived>
+const ColPivHouseholderQR<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::colPivHouseholderQr() const
+{
+ return ColPivHouseholderQR<PlainObject>(eval());
+}
+
+
+#endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_H
diff --git a/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h b/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h
new file mode 100644
index 00000000000..dde3013be9d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h
@@ -0,0 +1,546 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
+#define EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
+
+/** \ingroup QR_Module
+ *
+ * \class FullPivHouseholderQR
+ *
+ * \brief Householder rank-revealing QR decomposition of a matrix with full pivoting
+ *
+ * \param MatrixType the type of the matrix of which we are computing the QR decomposition
+ *
+ * This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b Q and \b R
+ * such that
+ * \f[
+ * \mathbf{A} \, \mathbf{P} = \mathbf{Q} \, \mathbf{R}
+ * \f]
+ * by using Householder transformations. Here, \b P is a permutation matrix, \b Q a unitary matrix and \b R an
+ * upper triangular matrix.
+ *
+ * This decomposition performs a very prudent full pivoting in order to be rank-revealing and achieve optimal
+ * numerical stability. The trade-off is that it is slower than HouseholderQR and ColPivHouseholderQR.
+ *
+ * \sa MatrixBase::fullPivHouseholderQr()
+ */
+template<typename _MatrixType> class FullPivHouseholderQR
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
+ typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
+ typedef Matrix<Index, 1, ColsAtCompileTime, RowMajor, 1, MaxColsAtCompileTime> IntRowVectorType;
+ typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
+ typedef typename internal::plain_col_type<MatrixType, Index>::type IntColVectorType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColVectorType;
+
+ /** \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via FullPivHouseholderQR::compute(const MatrixType&).
+ */
+ FullPivHouseholderQR()
+ : m_qr(),
+ m_hCoeffs(),
+ m_rows_transpositions(),
+ m_cols_transpositions(),
+ m_cols_permutation(),
+ m_temp(),
+ m_isInitialized(false),
+ m_usePrescribedThreshold(false) {}
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem \a size.
+ * \sa FullPivHouseholderQR()
+ */
+ FullPivHouseholderQR(Index rows, Index cols)
+ : m_qr(rows, cols),
+ m_hCoeffs((std::min)(rows,cols)),
+ m_rows_transpositions(rows),
+ m_cols_transpositions(cols),
+ m_cols_permutation(cols),
+ m_temp((std::min)(rows,cols)),
+ m_isInitialized(false),
+ m_usePrescribedThreshold(false) {}
+
+ FullPivHouseholderQR(const MatrixType& matrix)
+ : m_qr(matrix.rows(), matrix.cols()),
+ m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
+ m_rows_transpositions(matrix.rows()),
+ m_cols_transpositions(matrix.cols()),
+ m_cols_permutation(matrix.cols()),
+ m_temp((std::min)(matrix.rows(), matrix.cols())),
+ m_isInitialized(false),
+ m_usePrescribedThreshold(false)
+ {
+ compute(matrix);
+ }
+
+ /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
+ * *this is the QR decomposition, if any exists.
+ *
+ * \param b the right-hand-side of the equation to solve.
+ *
+ * \returns a solution.
+ *
+ * \note The case where b is a matrix is not yet implemented. Also, this
+ * code is space inefficient.
+ *
+ * \note_about_checking_solutions
+ *
+ * \note_about_arbitrary_choice_of_solution
+ *
+ * Example: \include FullPivHouseholderQR_solve.cpp
+ * Output: \verbinclude FullPivHouseholderQR_solve.out
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<FullPivHouseholderQR, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ return internal::solve_retval<FullPivHouseholderQR, Rhs>(*this, b.derived());
+ }
+
+ MatrixQType matrixQ(void) const;
+
+ /** \returns a reference to the matrix where the Householder QR decomposition is stored
+ */
+ const MatrixType& matrixQR() const
+ {
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ return m_qr;
+ }
+
+ FullPivHouseholderQR& compute(const MatrixType& matrix);
+
+ const PermutationType& colsPermutation() const
+ {
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ return m_cols_permutation;
+ }
+
+ const IntColVectorType& rowsTranspositions() const
+ {
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ return m_rows_transpositions;
+ }
+
+ /** \returns the absolute value of the determinant of the matrix of which
+ * *this is the QR decomposition. It has only linear complexity
+ * (that is, O(n) where n is the dimension of the square matrix)
+ * as the QR decomposition has already been computed.
+ *
+ * \note This is only for square matrices.
+ *
+ * \warning a determinant can be very big or small, so for matrices
+ * of large enough dimension, there is a risk of overflow/underflow.
+ * One way to work around that is to use logAbsDeterminant() instead.
+ *
+ * \sa logAbsDeterminant(), MatrixBase::determinant()
+ */
+ typename MatrixType::RealScalar absDeterminant() const;
+
+ /** \returns the natural log of the absolute value of the determinant of the matrix of which
+ * *this is the QR decomposition. It has only linear complexity
+ * (that is, O(n) where n is the dimension of the square matrix)
+ * as the QR decomposition has already been computed.
+ *
+ * \note This is only for square matrices.
+ *
+ * \note This method is useful to work around the risk of overflow/underflow that's inherent
+ * to determinant computation.
+ *
+ * \sa absDeterminant(), MatrixBase::determinant()
+ */
+ typename MatrixType::RealScalar logAbsDeterminant() const;
+
+ /** \returns the rank of the matrix of which *this is the QR decomposition.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline Index rank() const
+ {
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ RealScalar premultiplied_threshold = internal::abs(m_maxpivot) * threshold();
+ Index result = 0;
+ for(Index i = 0; i < m_nonzero_pivots; ++i)
+ result += (internal::abs(m_qr.coeff(i,i)) > premultiplied_threshold);
+ return result;
+ }
+
+ /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline Index dimensionOfKernel() const
+ {
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ return cols() - rank();
+ }
+
+ /** \returns true if the matrix of which *this is the QR decomposition represents an injective
+ * linear map, i.e. has trivial kernel; false otherwise.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline bool isInjective() const
+ {
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ return rank() == cols();
+ }
+
+ /** \returns true if the matrix of which *this is the QR decomposition represents a surjective
+ * linear map; false otherwise.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline bool isSurjective() const
+ {
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ return rank() == rows();
+ }
+
+ /** \returns true if the matrix of which *this is the QR decomposition is invertible.
+ *
+ * \note This method has to determine which pivots should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline bool isInvertible() const
+ {
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ return isInjective() && isSurjective();
+ }
+
+ /** \returns the inverse of the matrix of which *this is the QR decomposition.
+ *
+ * \note If this matrix is not invertible, the returned matrix has undefined coefficients.
+ * Use isInvertible() to first determine whether this matrix is invertible.
+ */ inline const
+ internal::solve_retval<FullPivHouseholderQR, typename MatrixType::IdentityReturnType>
+ inverse() const
+ {
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ return internal::solve_retval<FullPivHouseholderQR,typename MatrixType::IdentityReturnType>
+ (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols()));
+ }
+
+ inline Index rows() const { return m_qr.rows(); }
+ inline Index cols() const { return m_qr.cols(); }
+ const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
+
+ /** Allows to prescribe a threshold to be used by certain methods, such as rank(),
+ * who need to determine when pivots are to be considered nonzero. This is not used for the
+ * QR decomposition itself.
+ *
+ * When it needs to get the threshold value, Eigen calls threshold(). By default, this
+ * uses a formula to automatically determine a reasonable threshold.
+ * Once you have called the present method setThreshold(const RealScalar&),
+ * your value is used instead.
+ *
+ * \param threshold The new value to use as the threshold.
+ *
+ * A pivot will be considered nonzero if its absolute value is strictly greater than
+ * \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
+ * where maxpivot is the biggest pivot.
+ *
+ * If you want to come back to the default behavior, call setThreshold(Default_t)
+ */
+ FullPivHouseholderQR& setThreshold(const RealScalar& threshold)
+ {
+ m_usePrescribedThreshold = true;
+ m_prescribedThreshold = threshold;
+ return *this;
+ }
+
+ /** Allows to come back to the default behavior, letting Eigen use its default formula for
+ * determining the threshold.
+ *
+ * You should pass the special object Eigen::Default as parameter here.
+ * \code qr.setThreshold(Eigen::Default); \endcode
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ FullPivHouseholderQR& setThreshold(Default_t)
+ {
+ m_usePrescribedThreshold = false;
+ return *this;
+ }
+
+ /** Returns the threshold that will be used by certain methods such as rank().
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ RealScalar threshold() const
+ {
+ eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+ return m_usePrescribedThreshold ? m_prescribedThreshold
+ // this formula comes from experimenting (see "LU precision tuning" thread on the list)
+ // and turns out to be identical to Higham's formula used already in LDLt.
+ : NumTraits<Scalar>::epsilon() * m_qr.diagonalSize();
+ }
+
+ /** \returns the number of nonzero pivots in the QR decomposition.
+ * Here nonzero is meant in the exact sense, not in a fuzzy sense.
+ * So that notion isn't really intrinsically interesting, but it is
+ * still useful when implementing algorithms.
+ *
+ * \sa rank()
+ */
+ inline Index nonzeroPivots() const
+ {
+ eigen_assert(m_isInitialized && "LU is not initialized.");
+ return m_nonzero_pivots;
+ }
+
+ /** \returns the absolute value of the biggest pivot, i.e. the biggest
+ * diagonal coefficient of U.
+ */
+ RealScalar maxPivot() const { return m_maxpivot; }
+
+ protected:
+ MatrixType m_qr;
+ HCoeffsType m_hCoeffs;
+ IntColVectorType m_rows_transpositions;
+ IntRowVectorType m_cols_transpositions;
+ PermutationType m_cols_permutation;
+ RowVectorType m_temp;
+ bool m_isInitialized, m_usePrescribedThreshold;
+ RealScalar m_prescribedThreshold, m_maxpivot;
+ Index m_nonzero_pivots;
+ RealScalar m_precision;
+ Index m_det_pq;
+};
+
+template<typename MatrixType>
+typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::absDeterminant() const
+{
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+ return internal::abs(m_qr.diagonal().prod());
+}
+
+template<typename MatrixType>
+typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDeterminant() const
+{
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+ return m_qr.diagonal().cwiseAbs().array().log().sum();
+}
+
+template<typename MatrixType>
+FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
+{
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+ Index size = (std::min)(rows,cols);
+
+ m_qr = matrix;
+ m_hCoeffs.resize(size);
+
+ m_temp.resize(cols);
+
+ m_precision = NumTraits<Scalar>::epsilon() * size;
+
+ m_rows_transpositions.resize(matrix.rows());
+ m_cols_transpositions.resize(matrix.cols());
+ Index number_of_transpositions = 0;
+
+ RealScalar biggest(0);
+
+ m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
+ m_maxpivot = RealScalar(0);
+
+ for (Index k = 0; k < size; ++k)
+ {
+ Index row_of_biggest_in_corner, col_of_biggest_in_corner;
+ RealScalar biggest_in_corner;
+
+ biggest_in_corner = m_qr.bottomRightCorner(rows-k, cols-k)
+ .cwiseAbs()
+ .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);
+ row_of_biggest_in_corner += k;
+ col_of_biggest_in_corner += k;
+ if(k==0) biggest = biggest_in_corner;
+
+ // if the corner is negligible, then we have less than full rank, and we can finish early
+ if(internal::isMuchSmallerThan(biggest_in_corner, biggest, m_precision))
+ {
+ m_nonzero_pivots = k;
+ for(Index i = k; i < size; i++)
+ {
+ m_rows_transpositions.coeffRef(i) = i;
+ m_cols_transpositions.coeffRef(i) = i;
+ m_hCoeffs.coeffRef(i) = Scalar(0);
+ }
+ break;
+ }
+
+ m_rows_transpositions.coeffRef(k) = row_of_biggest_in_corner;
+ m_cols_transpositions.coeffRef(k) = col_of_biggest_in_corner;
+ if(k != row_of_biggest_in_corner) {
+ m_qr.row(k).tail(cols-k).swap(m_qr.row(row_of_biggest_in_corner).tail(cols-k));
+ ++number_of_transpositions;
+ }
+ if(k != col_of_biggest_in_corner) {
+ m_qr.col(k).swap(m_qr.col(col_of_biggest_in_corner));
+ ++number_of_transpositions;
+ }
+
+ RealScalar beta;
+ m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
+ m_qr.coeffRef(k,k) = beta;
+
+ // remember the maximum absolute value of diagonal coefficients
+ if(internal::abs(beta) > m_maxpivot) m_maxpivot = internal::abs(beta);
+
+ m_qr.bottomRightCorner(rows-k, cols-k-1)
+ .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1));
+ }
+
+ m_cols_permutation.setIdentity(cols);
+ for(Index k = 0; k < size; ++k)
+ m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k));
+
+ m_det_pq = (number_of_transpositions%2) ? -1 : 1;
+ m_isInitialized = true;
+
+ return *this;
+}
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
+ : solve_retval_base<FullPivHouseholderQR<_MatrixType>, Rhs>
+{
+ EIGEN_MAKE_SOLVE_HELPERS(FullPivHouseholderQR<_MatrixType>,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ const Index rows = dec().rows(), cols = dec().cols();
+ eigen_assert(rhs().rows() == rows);
+
+ // FIXME introduce nonzeroPivots() and use it here. and more generally,
+ // make the same improvements in this dec as in FullPivLU.
+ if(dec().rank()==0)
+ {
+ dst.setZero();
+ return;
+ }
+
+ typename Rhs::PlainObject c(rhs());
+
+ Matrix<Scalar,1,Rhs::ColsAtCompileTime> temp(rhs().cols());
+ for (Index k = 0; k < dec().rank(); ++k)
+ {
+ Index remainingSize = rows-k;
+ c.row(k).swap(c.row(dec().rowsTranspositions().coeff(k)));
+ c.bottomRightCorner(remainingSize, rhs().cols())
+ .applyHouseholderOnTheLeft(dec().matrixQR().col(k).tail(remainingSize-1),
+ dec().hCoeffs().coeff(k), &temp.coeffRef(0));
+ }
+
+ if(!dec().isSurjective())
+ {
+ // is c is in the image of R ?
+ RealScalar biggest_in_upper_part_of_c = c.topRows( dec().rank() ).cwiseAbs().maxCoeff();
+ RealScalar biggest_in_lower_part_of_c = c.bottomRows(rows-dec().rank()).cwiseAbs().maxCoeff();
+ // FIXME brain dead
+ const RealScalar m_precision = NumTraits<Scalar>::epsilon() * (std::min)(rows,cols);
+ // this internal:: prefix is needed by at least gcc 3.4 and ICC
+ if(!internal::isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision))
+ return;
+ }
+ dec().matrixQR()
+ .topLeftCorner(dec().rank(), dec().rank())
+ .template triangularView<Upper>()
+ .solveInPlace(c.topRows(dec().rank()));
+
+ for(Index i = 0; i < dec().rank(); ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
+ for(Index i = dec().rank(); i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
+ }
+};
+
+} // end namespace internal
+
+/** \returns the matrix Q */
+template<typename MatrixType>
+typename FullPivHouseholderQR<MatrixType>::MatrixQType FullPivHouseholderQR<MatrixType>::matrixQ() const
+{
+ eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+ // compute the product H'_0 H'_1 ... H'_n-1,
+ // where H_k is the k-th Householder transformation I - h_k v_k v_k'
+ // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
+ Index rows = m_qr.rows();
+ Index cols = m_qr.cols();
+ Index size = (std::min)(rows,cols);
+ MatrixQType res = MatrixQType::Identity(rows, rows);
+ Matrix<Scalar,1,MatrixType::RowsAtCompileTime> temp(rows);
+ for (Index k = size-1; k >= 0; k--)
+ {
+ res.block(k, k, rows-k, rows-k)
+ .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), internal::conj(m_hCoeffs.coeff(k)), &temp.coeffRef(k));
+ res.row(k).swap(res.row(m_rows_transpositions.coeff(k)));
+ }
+ return res;
+}
+
+/** \return the full-pivoting Householder QR decomposition of \c *this.
+ *
+ * \sa class FullPivHouseholderQR
+ */
+template<typename Derived>
+const FullPivHouseholderQR<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::fullPivHouseholderQr() const
+{
+ return FullPivHouseholderQR<PlainObject>(eval());
+}
+
+#endif // EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
diff --git a/extern/Eigen3/Eigen/src/QR/HouseholderQR.h b/extern/Eigen3/Eigen/src/QR/HouseholderQR.h
new file mode 100644
index 00000000000..9ee96de2680
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/QR/HouseholderQR.h
@@ -0,0 +1,355 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2010 Vincent Lejeune
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_QR_H
+#define EIGEN_QR_H
+
+/** \ingroup QR_Module
+ *
+ *
+ * \class HouseholderQR
+ *
+ * \brief Householder QR decomposition of a matrix
+ *
+ * \param MatrixType the type of the matrix of which we are computing the QR decomposition
+ *
+ * This class performs a QR decomposition of a matrix \b A into matrices \b Q and \b R
+ * such that
+ * \f[
+ * \mathbf{A} = \mathbf{Q} \, \mathbf{R}
+ * \f]
+ * by using Householder transformations. Here, \b Q a unitary matrix and \b R an upper triangular matrix.
+ * The result is stored in a compact way compatible with LAPACK.
+ *
+ * Note that no pivoting is performed. This is \b not a rank-revealing decomposition.
+ * If you want that feature, use FullPivHouseholderQR or ColPivHouseholderQR instead.
+ *
+ * This Householder QR decomposition is faster, but less numerically stable and less feature-full than
+ * FullPivHouseholderQR or ColPivHouseholderQR.
+ *
+ * \sa MatrixBase::householderQr()
+ */
+template<typename _MatrixType> class HouseholderQR
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ Options = MatrixType::Options,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
+ typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
+ typedef typename HouseholderSequence<MatrixType,HCoeffsType>::ConjugateReturnType HouseholderSequenceType;
+
+ /**
+ * \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via HouseholderQR::compute(const MatrixType&).
+ */
+ HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {}
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem \a size.
+ * \sa HouseholderQR()
+ */
+ HouseholderQR(Index rows, Index cols)
+ : m_qr(rows, cols),
+ m_hCoeffs((std::min)(rows,cols)),
+ m_temp(cols),
+ m_isInitialized(false) {}
+
+ HouseholderQR(const MatrixType& matrix)
+ : m_qr(matrix.rows(), matrix.cols()),
+ m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
+ m_temp(matrix.cols()),
+ m_isInitialized(false)
+ {
+ compute(matrix);
+ }
+
+ /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
+ * *this is the QR decomposition, if any exists.
+ *
+ * \param b the right-hand-side of the equation to solve.
+ *
+ * \returns a solution.
+ *
+ * \note The case where b is a matrix is not yet implemented. Also, this
+ * code is space inefficient.
+ *
+ * \note_about_checking_solutions
+ *
+ * \note_about_arbitrary_choice_of_solution
+ *
+ * Example: \include HouseholderQR_solve.cpp
+ * Output: \verbinclude HouseholderQR_solve.out
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<HouseholderQR, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+ return internal::solve_retval<HouseholderQR, Rhs>(*this, b.derived());
+ }
+
+ HouseholderSequenceType householderQ() const
+ {
+ eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+ return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
+ }
+
+ /** \returns a reference to the matrix where the Householder QR decomposition is stored
+ * in a LAPACK-compatible way.
+ */
+ const MatrixType& matrixQR() const
+ {
+ eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+ return m_qr;
+ }
+
+ HouseholderQR& compute(const MatrixType& matrix);
+
+ /** \returns the absolute value of the determinant of the matrix of which
+ * *this is the QR decomposition. It has only linear complexity
+ * (that is, O(n) where n is the dimension of the square matrix)
+ * as the QR decomposition has already been computed.
+ *
+ * \note This is only for square matrices.
+ *
+ * \warning a determinant can be very big or small, so for matrices
+ * of large enough dimension, there is a risk of overflow/underflow.
+ * One way to work around that is to use logAbsDeterminant() instead.
+ *
+ * \sa logAbsDeterminant(), MatrixBase::determinant()
+ */
+ typename MatrixType::RealScalar absDeterminant() const;
+
+ /** \returns the natural log of the absolute value of the determinant of the matrix of which
+ * *this is the QR decomposition. It has only linear complexity
+ * (that is, O(n) where n is the dimension of the square matrix)
+ * as the QR decomposition has already been computed.
+ *
+ * \note This is only for square matrices.
+ *
+ * \note This method is useful to work around the risk of overflow/underflow that's inherent
+ * to determinant computation.
+ *
+ * \sa absDeterminant(), MatrixBase::determinant()
+ */
+ typename MatrixType::RealScalar logAbsDeterminant() const;
+
+ inline Index rows() const { return m_qr.rows(); }
+ inline Index cols() const { return m_qr.cols(); }
+ const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
+
+ protected:
+ MatrixType m_qr;
+ HCoeffsType m_hCoeffs;
+ RowVectorType m_temp;
+ bool m_isInitialized;
+};
+
+template<typename MatrixType>
+typename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const
+{
+ eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+ eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+ return internal::abs(m_qr.diagonal().prod());
+}
+
+template<typename MatrixType>
+typename MatrixType::RealScalar HouseholderQR<MatrixType>::logAbsDeterminant() const
+{
+ eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+ eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+ return m_qr.diagonal().cwiseAbs().array().log().sum();
+}
+
+namespace internal {
+
+/** \internal */
+template<typename MatrixQR, typename HCoeffs>
+void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0)
+{
+ typedef typename MatrixQR::Index Index;
+ typedef typename MatrixQR::Scalar Scalar;
+ typedef typename MatrixQR::RealScalar RealScalar;
+ Index rows = mat.rows();
+ Index cols = mat.cols();
+ Index size = (std::min)(rows,cols);
+
+ eigen_assert(hCoeffs.size() == size);
+
+ typedef Matrix<Scalar,MatrixQR::ColsAtCompileTime,1> TempType;
+ TempType tempVector;
+ if(tempData==0)
+ {
+ tempVector.resize(cols);
+ tempData = tempVector.data();
+ }
+
+ for(Index k = 0; k < size; ++k)
+ {
+ Index remainingRows = rows - k;
+ Index remainingCols = cols - k - 1;
+
+ RealScalar beta;
+ mat.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeffs.coeffRef(k), beta);
+ mat.coeffRef(k,k) = beta;
+
+ // apply H to remaining part of m_qr from the left
+ mat.bottomRightCorner(remainingRows, remainingCols)
+ .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), hCoeffs.coeffRef(k), tempData+k+1);
+ }
+}
+
+/** \internal */
+template<typename MatrixQR, typename HCoeffs>
+void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
+ typename MatrixQR::Index maxBlockSize=32,
+ typename MatrixQR::Scalar* tempData = 0)
+{
+ typedef typename MatrixQR::Index Index;
+ typedef typename MatrixQR::Scalar Scalar;
+ typedef typename MatrixQR::RealScalar RealScalar;
+ typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
+
+ Index rows = mat.rows();
+ Index cols = mat.cols();
+ Index size = (std::min)(rows, cols);
+
+ typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
+ TempType tempVector;
+ if(tempData==0)
+ {
+ tempVector.resize(cols);
+ tempData = tempVector.data();
+ }
+
+ Index blockSize = (std::min)(maxBlockSize,size);
+
+ Index k = 0;
+ for (k = 0; k < size; k += blockSize)
+ {
+ Index bs = (std::min)(size-k,blockSize); // actual size of the block
+ Index tcols = cols - k - bs; // trailing columns
+ Index brows = rows-k; // rows of the block
+
+ // partition the matrix:
+ // A00 | A01 | A02
+ // mat = A10 | A11 | A12
+ // A20 | A21 | A22
+ // and performs the qr dec of [A11^T A12^T]^T
+ // and update [A21^T A22^T]^T using level 3 operations.
+ // Finally, the algorithm continue on A22
+
+ BlockType A11_21 = mat.block(k,k,brows,bs);
+ Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs);
+
+ householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData);
+
+ if(tcols)
+ {
+ BlockType A21_22 = mat.block(k,k+bs,brows,tcols);
+ apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint());
+ }
+ }
+}
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
+ : solve_retval_base<HouseholderQR<_MatrixType>, Rhs>
+{
+ EIGEN_MAKE_SOLVE_HELPERS(HouseholderQR<_MatrixType>,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ const Index rows = dec().rows(), cols = dec().cols();
+ const Index rank = (std::min)(rows, cols);
+ eigen_assert(rhs().rows() == rows);
+
+ typename Rhs::PlainObject c(rhs());
+
+ // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
+ c.applyOnTheLeft(householderSequence(
+ dec().matrixQR().leftCols(rank),
+ dec().hCoeffs().head(rank)).transpose()
+ );
+
+ dec().matrixQR()
+ .topLeftCorner(rank, rank)
+ .template triangularView<Upper>()
+ .solveInPlace(c.topRows(rank));
+
+ dst.topRows(rank) = c.topRows(rank);
+ dst.bottomRows(cols-rank).setZero();
+ }
+};
+
+} // end namespace internal
+
+template<typename MatrixType>
+HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
+{
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+ Index size = (std::min)(rows,cols);
+
+ m_qr = matrix;
+ m_hCoeffs.resize(size);
+
+ m_temp.resize(cols);
+
+ internal::householder_qr_inplace_blocked(m_qr, m_hCoeffs, 48, m_temp.data());
+
+ m_isInitialized = true;
+ return *this;
+}
+
+/** \return the Householder QR decomposition of \c *this.
+ *
+ * \sa class HouseholderQR
+ */
+template<typename Derived>
+const HouseholderQR<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::householderQr() const
+{
+ return HouseholderQR<PlainObject>(eval());
+}
+
+
+#endif // EIGEN_QR_H
diff --git a/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h b/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h
new file mode 100644
index 00000000000..5f61399988c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h
@@ -0,0 +1,716 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_JACOBISVD_H
+#define EIGEN_JACOBISVD_H
+
+namespace internal {
+// forward declaration (needed by ICC)
+// the empty body is required by MSVC
+template<typename MatrixType, int QRPreconditioner,
+ bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
+struct svd_precondition_2x2_block_to_be_real {};
+
+/*** QR preconditioners (R-SVD)
+ ***
+ *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
+ *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
+ *** JacobiSVD which by itself is only able to work on square matrices.
+ ***/
+
+enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+struct qr_preconditioner_should_do_anything
+{
+ enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
+ b = MatrixType::RowsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime != Dynamic &&
+ MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
+ ret = !( (QRPreconditioner == NoQRPreconditioner) ||
+ (Case == PreconditionIfMoreColsThanRows && bool(a)) ||
+ (Case == PreconditionIfMoreRowsThanCols && bool(b)) )
+ };
+};
+
+template<typename MatrixType, int QRPreconditioner, int Case,
+ bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
+> struct qr_preconditioner_impl {};
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+struct qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
+{
+ static bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
+ {
+ return false;
+ }
+};
+
+/*** preconditioner using FullPivHouseholderQR ***/
+
+template<typename MatrixType>
+struct qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+ static bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ FullPivHouseholderQR<MatrixType> qr(matrix);
+ svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) svd.m_matrixU = qr.matrixQ();
+ if(svd.computeV()) svd.m_matrixV = qr.colsPermutation();
+ return true;
+ }
+ return false;
+ }
+};
+
+template<typename MatrixType>
+struct qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+ static bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ typedef Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, MatrixType::RowsAtCompileTime,
+ MatrixType::Options, MatrixType::MaxColsAtCompileTime, MatrixType::MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+ FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> qr(matrix.adjoint());
+ svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) svd.m_matrixV = qr.matrixQ();
+ if(svd.computeU()) svd.m_matrixU = qr.colsPermutation();
+ return true;
+ }
+ else return false;
+ }
+};
+
+/*** preconditioner using ColPivHouseholderQR ***/
+
+template<typename MatrixType>
+struct qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+ static bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ ColPivHouseholderQR<MatrixType> qr(matrix);
+ svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) svd.m_matrixU = qr.householderQ();
+ else if(svd.m_computeThinU) {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ qr.householderQ().applyThisOnTheLeft(svd.m_matrixU);
+ }
+ if(svd.computeV()) svd.m_matrixV = qr.colsPermutation();
+ return true;
+ }
+ return false;
+ }
+};
+
+template<typename MatrixType>
+struct qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+ static bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ typedef Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, MatrixType::RowsAtCompileTime,
+ MatrixType::Options, MatrixType::MaxColsAtCompileTime, MatrixType::MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+ ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> qr(matrix.adjoint());
+ svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) svd.m_matrixV = qr.householderQ();
+ else if(svd.m_computeThinV) {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ qr.householderQ().applyThisOnTheLeft(svd.m_matrixV);
+ }
+ if(svd.computeU()) svd.m_matrixU = qr.colsPermutation();
+ return true;
+ }
+ else return false;
+ }
+};
+
+/*** preconditioner using HouseholderQR ***/
+
+template<typename MatrixType>
+struct qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+ static bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ HouseholderQR<MatrixType> qr(matrix);
+ svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) svd.m_matrixU = qr.householderQ();
+ else if(svd.m_computeThinU) {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ qr.householderQ().applyThisOnTheLeft(svd.m_matrixU);
+ }
+ if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
+ return true;
+ }
+ return false;
+ }
+};
+
+template<typename MatrixType>
+struct qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+ static bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ typedef Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, MatrixType::RowsAtCompileTime,
+ MatrixType::Options, MatrixType::MaxColsAtCompileTime, MatrixType::MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+ HouseholderQR<TransposeTypeWithSameStorageOrder> qr(matrix.adjoint());
+ svd.m_workMatrix = qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) svd.m_matrixV = qr.householderQ();
+ else if(svd.m_computeThinV) {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ qr.householderQ().applyThisOnTheLeft(svd.m_matrixV);
+ }
+ if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
+ return true;
+ }
+ else return false;
+ }
+};
+
+/*** 2x2 SVD implementation
+ ***
+ *** JacobiSVD consists in performing a series of 2x2 SVD subproblems
+ ***/
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
+{
+ typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
+};
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
+{
+ typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
+ {
+ Scalar z;
+ JacobiRotation<Scalar> rot;
+ RealScalar n = sqrt(abs2(work_matrix.coeff(p,p)) + abs2(work_matrix.coeff(q,p)));
+ if(n==0)
+ {
+ z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.row(p) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ else
+ {
+ rot.c() = conj(work_matrix.coeff(p,p)) / n;
+ rot.s() = work_matrix.coeff(q,p) / n;
+ work_matrix.applyOnTheLeft(p,q,rot);
+ if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
+ if(work_matrix.coeff(p,q) != Scalar(0))
+ {
+ Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.col(q) *= z;
+ if(svd.computeV()) svd.m_matrixV.col(q) *= z;
+ }
+ if(work_matrix.coeff(q,q) != Scalar(0))
+ {
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ }
+ }
+};
+
+template<typename MatrixType, typename RealScalar, typename Index>
+void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
+ JacobiRotation<RealScalar> *j_left,
+ JacobiRotation<RealScalar> *j_right)
+{
+ Matrix<RealScalar,2,2> m;
+ m << real(matrix.coeff(p,p)), real(matrix.coeff(p,q)),
+ real(matrix.coeff(q,p)), real(matrix.coeff(q,q));
+ JacobiRotation<RealScalar> rot1;
+ RealScalar t = m.coeff(0,0) + m.coeff(1,1);
+ RealScalar d = m.coeff(1,0) - m.coeff(0,1);
+ if(t == RealScalar(0))
+ {
+ rot1.c() = RealScalar(0);
+ rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+ }
+ else
+ {
+ RealScalar u = d / t;
+ rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + abs2(u));
+ rot1.s() = rot1.c() * u;
+ }
+ m.applyOnTheLeft(0,1,rot1);
+ j_right->makeJacobi(m,0,1);
+ *j_left = rot1 * j_right->transpose();
+}
+
+} // end namespace internal
+
+/** \ingroup SVD_Module
+ *
+ *
+ * \class JacobiSVD
+ *
+ * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
+ * for the R-SVD step for non-square matrices. See discussion of possible values below.
+ *
+ * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
+ * \f[ A = U S V^* \f]
+ * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
+ * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
+ * and right \em singular \em vectors of \a A respectively.
+ *
+ * Singular values are always sorted in decreasing order.
+ *
+ * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
+ *
+ * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
+ * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
+ * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
+ * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+ *
+ * Here's an example demonstrating basic usage:
+ * \include JacobiSVD_basic.cpp
+ * Output: \verbinclude JacobiSVD_basic.out
+ *
+ * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
+ * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
+ * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
+ * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
+ *
+ * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+ * terminate in finite (and reasonable) time.
+ *
+ * The possible values for QRPreconditioner are:
+ * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
+ * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
+ * Contrary to other QRs, it doesn't allow computing thin unitaries.
+ * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
+ * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
+ * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
+ * process is more reliable than the optimized bidiagonal SVD iterations.
+ * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
+ * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
+ * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
+ * if QR preconditioning is needed before applying it anyway.
+ *
+ * \sa MatrixBase::jacobiSvd()
+ */
+template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
+ MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
+ WorkMatrixType;
+
+ /** \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via JacobiSVD::compute(const MatrixType&).
+ */
+ JacobiSVD()
+ : m_isInitialized(false),
+ m_isAllocated(false),
+ m_computationOptions(0),
+ m_rows(-1), m_cols(-1)
+ {}
+
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem size.
+ * \sa JacobiSVD()
+ */
+ JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
+ : m_isInitialized(false),
+ m_isAllocated(false),
+ m_computationOptions(0),
+ m_rows(-1), m_cols(-1)
+ {
+ allocate(rows, cols, computationOptions);
+ }
+
+ /** \brief Constructor performing the decomposition of given matrix.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
+ : m_isInitialized(false),
+ m_isAllocated(false),
+ m_computationOptions(0),
+ m_rows(-1), m_cols(-1)
+ {
+ compute(matrix, computationOptions);
+ }
+
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ JacobiSVD& compute(const MatrixType& matrix)
+ {
+ return compute(matrix, m_computationOptions);
+ }
+
+ /** \returns the \a U matrix.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
+ *
+ * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
+ *
+ * This method asserts that you asked for \a U to be computed.
+ */
+ const MatrixUType& matrixU() const
+ {
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ eigen_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?");
+ return m_matrixU;
+ }
+
+ /** \returns the \a V matrix.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV.
+ *
+ * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
+ *
+ * This method asserts that you asked for \a V to be computed.
+ */
+ const MatrixVType& matrixV() const
+ {
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ eigen_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?");
+ return m_matrixV;
+ }
+
+ /** \returns the vector of singular values.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
+ * returned vector has size \a m. Singular values are always sorted in decreasing order.
+ */
+ const SingularValuesType& singularValues() const
+ {
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ return m_singularValues;
+ }
+
+ /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
+ inline bool computeU() const { return m_computeFullU || m_computeThinU; }
+ /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
+ inline bool computeV() const { return m_computeFullV || m_computeThinV; }
+
+ /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+ *
+ * \param b the right-hand-side of the equation to solve.
+ *
+ * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
+ *
+ * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
+ * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<JacobiSVD, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ eigen_assert(computeU() && computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+ return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
+ }
+
+ /** \returns the number of singular values that are not exactly 0 */
+ Index nonzeroSingularValues() const
+ {
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ return m_nonzeroSingularValues;
+ }
+
+ inline Index rows() const { return m_rows; }
+ inline Index cols() const { return m_cols; }
+
+ private:
+ void allocate(Index rows, Index cols, unsigned int computationOptions);
+
+ protected:
+ MatrixUType m_matrixU;
+ MatrixVType m_matrixV;
+ SingularValuesType m_singularValues;
+ WorkMatrixType m_workMatrix;
+ bool m_isInitialized, m_isAllocated;
+ bool m_computeFullU, m_computeThinU;
+ bool m_computeFullV, m_computeThinV;
+ unsigned int m_computationOptions;
+ Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
+
+ template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
+ friend struct internal::svd_precondition_2x2_block_to_be_real;
+ template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
+ friend struct internal::qr_preconditioner_impl;
+};
+
+template<typename MatrixType, int QRPreconditioner>
+void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ eigen_assert(rows >= 0 && cols >= 0);
+
+ if (m_isAllocated &&
+ rows == m_rows &&
+ cols == m_cols &&
+ computationOptions == m_computationOptions)
+ {
+ return;
+ }
+
+ m_rows = rows;
+ m_cols = cols;
+ m_isInitialized = false;
+ m_isAllocated = true;
+ m_computationOptions = computationOptions;
+ m_computeFullU = (computationOptions & ComputeFullU) != 0;
+ m_computeThinU = (computationOptions & ComputeThinU) != 0;
+ m_computeFullV = (computationOptions & ComputeFullV) != 0;
+ m_computeThinV = (computationOptions & ComputeThinV) != 0;
+ eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
+ eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
+ eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
+ "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
+ if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
+ {
+ eigen_assert(!(m_computeThinU || m_computeThinV) &&
+ "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
+ "Use the ColPivHouseholderQR preconditioner instead.");
+ }
+ m_diagSize = (std::min)(m_rows, m_cols);
+ m_singularValues.resize(m_diagSize);
+ m_matrixU.resize(m_rows, m_computeFullU ? m_rows
+ : m_computeThinU ? m_diagSize
+ : 0);
+ m_matrixV.resize(m_cols, m_computeFullV ? m_cols
+ : m_computeThinV ? m_diagSize
+ : 0);
+ m_workMatrix.resize(m_diagSize, m_diagSize);
+}
+
+template<typename MatrixType, int QRPreconditioner>
+JacobiSVD<MatrixType, QRPreconditioner>&
+JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
+{
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+
+ // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
+ // only worsening the precision of U and V as we accumulate more rotations
+ const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
+
+ /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
+
+ if(!internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows>::run(*this, matrix)
+ && !internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols>::run(*this, matrix))
+ {
+ m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize);
+ if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
+ if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
+ if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
+ if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
+ }
+
+ /*** step 2. The main Jacobi SVD iteration. ***/
+
+ bool finished = false;
+ while(!finished)
+ {
+ finished = true;
+
+ // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
+
+ for(Index p = 1; p < m_diagSize; ++p)
+ {
+ for(Index q = 0; q < p; ++q)
+ {
+ // if this 2x2 sub-matrix is not diagonal already...
+ // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
+ // keep us iterating forever.
+ using std::max;
+ if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p)))
+ > (max)(internal::abs(m_workMatrix.coeff(p,p)),internal::abs(m_workMatrix.coeff(q,q)))*precision)
+ {
+ finished = false;
+
+ // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
+ internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
+ JacobiRotation<RealScalar> j_left, j_right;
+ internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
+
+ // accumulate resulting Jacobi rotations
+ m_workMatrix.applyOnTheLeft(p,q,j_left);
+ if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
+
+ m_workMatrix.applyOnTheRight(p,q,j_right);
+ if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
+ }
+ }
+ }
+ }
+
+ /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
+
+ for(Index i = 0; i < m_diagSize; ++i)
+ {
+ RealScalar a = internal::abs(m_workMatrix.coeff(i,i));
+ m_singularValues.coeffRef(i) = a;
+ if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
+ }
+
+ /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
+
+ m_nonzeroSingularValues = m_diagSize;
+ for(Index i = 0; i < m_diagSize; i++)
+ {
+ Index pos;
+ RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos);
+ if(maxRemainingSingularValue == RealScalar(0))
+ {
+ m_nonzeroSingularValues = i;
+ break;
+ }
+ if(pos)
+ {
+ pos += i;
+ std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));
+ if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));
+ if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
+ }
+ }
+
+ m_isInitialized = true;
+ return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int QRPreconditioner, typename Rhs>
+struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+ : solve_retval_base<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+{
+ typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType;
+ EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ eigen_assert(rhs().rows() == dec().rows());
+
+ // A = U S V^*
+ // So A^{-1} = V S^{-1} U^*
+
+ Index diagSize = (std::min)(dec().rows(), dec().cols());
+ typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
+
+ Index nonzeroSingVals = dec().nonzeroSingularValues();
+ invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
+ invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
+
+ dst = dec().matrixV().leftCols(diagSize)
+ * invertedSingVals.asDiagonal()
+ * dec().matrixU().leftCols(diagSize).adjoint()
+ * rhs();
+ }
+};
+} // end namespace internal
+
+template<typename Derived>
+JacobiSVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
+{
+ return JacobiSVD<PlainObject>(*this, computationOptions);
+}
+
+
+
+#endif // EIGEN_JACOBISVD_H
diff --git a/extern/Eigen3/Eigen/src/SVD/UpperBidiagonalization.h b/extern/Eigen3/Eigen/src/SVD/UpperBidiagonalization.h
new file mode 100644
index 00000000000..2de197da953
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SVD/UpperBidiagonalization.h
@@ -0,0 +1,159 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BIDIAGONALIZATION_H
+#define EIGEN_BIDIAGONALIZATION_H
+
+namespace internal {
+// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API.
+// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class.
+
+template<typename _MatrixType> class UpperBidiagonalization
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;
+ typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
+ typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0> BidiagonalType;
+ typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType;
+ typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType;
+ typedef HouseholderSequence<
+ const MatrixType,
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Diagonal<const MatrixType,0> >
+ > HouseholderUSequenceType;
+ typedef HouseholderSequence<
+ const MatrixType,
+ Diagonal<const MatrixType,1>,
+ OnTheRight
+ > HouseholderVSequenceType;
+
+ /**
+ * \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via Bidiagonalization::compute(const MatrixType&).
+ */
+ UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {}
+
+ UpperBidiagonalization(const MatrixType& matrix)
+ : m_householder(matrix.rows(), matrix.cols()),
+ m_bidiagonal(matrix.cols(), matrix.cols()),
+ m_isInitialized(false)
+ {
+ compute(matrix);
+ }
+
+ UpperBidiagonalization& compute(const MatrixType& matrix);
+
+ const MatrixType& householder() const { return m_householder; }
+ const BidiagonalType& bidiagonal() const { return m_bidiagonal; }
+
+ const HouseholderUSequenceType householderU() const
+ {
+ eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
+ return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate());
+ }
+
+ const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
+ {
+ eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
+ return HouseholderVSequenceType(m_householder, m_householder.const_derived().template diagonal<1>())
+ .setLength(m_householder.cols()-1)
+ .setShift(1);
+ }
+
+ protected:
+ MatrixType m_householder;
+ BidiagonalType m_bidiagonal;
+ bool m_isInitialized;
+};
+
+template<typename _MatrixType>
+UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)
+{
+ Index rows = matrix.rows();
+ Index cols = matrix.cols();
+
+ eigen_assert(rows >= cols && "UpperBidiagonalization is only for matrices satisfying rows>=cols.");
+
+ m_householder = matrix;
+
+ ColVectorType temp(rows);
+
+ for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
+ {
+ Index remainingRows = rows - k;
+ Index remainingCols = cols - k - 1;
+
+ // construct left householder transform in-place in m_householder
+ m_householder.col(k).tail(remainingRows)
+ .makeHouseholderInPlace(m_householder.coeffRef(k,k),
+ m_bidiagonal.template diagonal<0>().coeffRef(k));
+ // apply householder transform to remaining part of m_householder on the left
+ m_householder.bottomRightCorner(remainingRows, remainingCols)
+ .applyHouseholderOnTheLeft(m_householder.col(k).tail(remainingRows-1),
+ m_householder.coeff(k,k),
+ temp.data());
+
+ if(k == cols-1) break;
+
+ // construct right householder transform in-place in m_householder
+ m_householder.row(k).tail(remainingCols)
+ .makeHouseholderInPlace(m_householder.coeffRef(k,k+1),
+ m_bidiagonal.template diagonal<1>().coeffRef(k));
+ // apply householder transform to remaining part of m_householder on the left
+ m_householder.bottomRightCorner(remainingRows-1, remainingCols)
+ .applyHouseholderOnTheRight(m_householder.row(k).tail(remainingCols-1).transpose(),
+ m_householder.coeff(k,k+1),
+ temp.data());
+ }
+ m_isInitialized = true;
+ return *this;
+}
+
+#if 0
+/** \return the Householder QR decomposition of \c *this.
+ *
+ * \sa class Bidiagonalization
+ */
+template<typename Derived>
+const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::bidiagonalization() const
+{
+ return UpperBidiagonalization<PlainObject>(eval());
+}
+#endif
+
+} // end namespace internal
+
+#endif // EIGEN_BIDIAGONALIZATION_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/AmbiVector.h b/extern/Eigen3/Eigen/src/Sparse/AmbiVector.h
index f279e80f00a..2ea8ba3096b 100644
--- a/extern/Eigen2/Eigen/src/Sparse/AmbiVector.h
+++ b/extern/Eigen3/Eigen/src/Sparse/AmbiVector.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -30,54 +30,57 @@
*
* See BasicSparseLLT and SparseProduct for usage examples.
*/
-template<typename _Scalar> class AmbiVector
+template<typename _Scalar, typename _Index>
+class AmbiVector
{
public:
typedef _Scalar Scalar;
+ typedef _Index Index;
typedef typename NumTraits<Scalar>::Real RealScalar;
- AmbiVector(int size)
- : m_buffer(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
+
+ AmbiVector(Index size)
+ : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
{
resize(size);
}
- void init(RealScalar estimatedDensity);
+ void init(double estimatedDensity);
void init(int mode);
- void nonZeros() const;
+ Index nonZeros() const;
/** Specifies a sub-vector to work on */
- void setBounds(int start, int end) { m_start = start; m_end = end; }
+ void setBounds(Index start, Index end) { m_start = start; m_end = end; }
void setZero();
void restart();
- Scalar& coeffRef(int i);
- Scalar coeff(int i);
+ Scalar& coeffRef(Index i);
+ Scalar& coeff(Index i);
class Iterator;
~AmbiVector() { delete[] m_buffer; }
- void resize(int size)
+ void resize(Index size)
{
if (m_allocatedSize < size)
reallocate(size);
m_size = size;
}
- int size() const { return m_size; }
+ Index size() const { return m_size; }
protected:
- void reallocate(int size)
+ void reallocate(Index size)
{
// if the size of the matrix is not too large, let's allocate a bit more than needed such
// that we can handle dense vector even in sparse mode.
delete[] m_buffer;
if (size<1000)
{
- int allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
+ Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
m_buffer = new Scalar[allocSize];
}
@@ -93,9 +96,9 @@ template<typename _Scalar> class AmbiVector
void reallocateSparse()
{
- int copyElements = m_allocatedElements;
- m_allocatedElements = std::min(int(m_allocatedElements*1.5),m_size);
- int allocSize = m_allocatedElements * sizeof(ListEl);
+ Index copyElements = m_allocatedElements;
+ m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
+ Index allocSize = m_allocatedElements * sizeof(ListEl);
allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
Scalar* newBuffer = new Scalar[allocSize];
memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
@@ -107,33 +110,30 @@ template<typename _Scalar> class AmbiVector
// element type of the linked list
struct ListEl
{
- int next;
- int index;
+ Index next;
+ Index index;
Scalar value;
};
// used to store data in both mode
Scalar* m_buffer;
- int m_size;
- int m_start;
- int m_end;
- int m_allocatedSize;
- int m_allocatedElements;
- int m_mode;
+ Scalar m_zero;
+ Index m_size;
+ Index m_start;
+ Index m_end;
+ Index m_allocatedSize;
+ Index m_allocatedElements;
+ Index m_mode;
// linked list mode
- int m_llStart;
- int m_llCurrent;
- int m_llSize;
-
- private:
- AmbiVector(const AmbiVector&);
-
+ Index m_llStart;
+ Index m_llCurrent;
+ Index m_llSize;
};
/** \returns the number of non zeros in the current sub vector */
-template<typename Scalar>
-void AmbiVector<Scalar>::nonZeros() const
+template<typename _Scalar,typename _Index>
+_Index AmbiVector<_Scalar,_Index>::nonZeros() const
{
if (m_mode==IsSparse)
return m_llSize;
@@ -141,8 +141,8 @@ void AmbiVector<Scalar>::nonZeros() const
return m_end - m_start;
}
-template<typename Scalar>
-void AmbiVector<Scalar>::init(RealScalar estimatedDensity)
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
{
if (estimatedDensity>0.1)
init(IsDense);
@@ -150,8 +150,8 @@ void AmbiVector<Scalar>::init(RealScalar estimatedDensity)
init(IsSparse);
}
-template<typename Scalar>
-void AmbiVector<Scalar>::init(int mode)
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(int mode)
{
m_mode = mode;
if (m_mode==IsSparse)
@@ -166,31 +166,31 @@ void AmbiVector<Scalar>::init(int mode)
*
* Don't worry, this function is extremely cheap.
*/
-template<typename Scalar>
-void AmbiVector<Scalar>::restart()
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::restart()
{
m_llCurrent = m_llStart;
}
/** Set all coefficients of current subvector to zero */
-template<typename Scalar>
-void AmbiVector<Scalar>::setZero()
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::setZero()
{
if (m_mode==IsDense)
{
- for (int i=m_start; i<m_end; ++i)
+ for (Index i=m_start; i<m_end; ++i)
m_buffer[i] = Scalar(0);
}
else
{
- ei_assert(m_mode==IsSparse);
+ eigen_assert(m_mode==IsSparse);
m_llSize = 0;
m_llStart = -1;
}
}
-template<typename Scalar>
-Scalar& AmbiVector<Scalar>::coeffRef(int i)
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
{
if (m_mode==IsDense)
return m_buffer[i];
@@ -198,7 +198,7 @@ Scalar& AmbiVector<Scalar>::coeffRef(int i)
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
// TODO factorize the following code to reduce code generation
- ei_assert(m_mode==IsSparse);
+ eigen_assert(m_mode==IsSparse);
if (m_llSize==0)
{
// this is the first element
@@ -224,8 +224,8 @@ Scalar& AmbiVector<Scalar>::coeffRef(int i)
}
else
{
- int nextel = llElements[m_llCurrent].next;
- ei_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
+ Index nextel = llElements[m_llCurrent].next;
+ eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
while (nextel >= 0 && llElements[nextel].index<=i)
{
m_llCurrent = nextel;
@@ -244,7 +244,7 @@ Scalar& AmbiVector<Scalar>::coeffRef(int i)
reallocateSparse();
llElements = reinterpret_cast<ListEl*>(m_buffer);
}
- ei_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
+ eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
// let's insert a new coefficient
ListEl& el = llElements[m_llSize];
el.value = Scalar(0);
@@ -258,36 +258,36 @@ Scalar& AmbiVector<Scalar>::coeffRef(int i)
}
}
-template<typename Scalar>
-Scalar AmbiVector<Scalar>::coeff(int i)
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
{
if (m_mode==IsDense)
return m_buffer[i];
else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
- ei_assert(m_mode==IsSparse);
+ eigen_assert(m_mode==IsSparse);
if ((m_llSize==0) || (i<llElements[m_llStart].index))
{
- return Scalar(0);
+ return m_zero;
}
else
{
- int elid = m_llStart;
+ Index elid = m_llStart;
while (elid >= 0 && llElements[elid].index<i)
elid = llElements[elid].next;
if (llElements[elid].index==i)
return llElements[m_llCurrent].value;
else
- return Scalar(0);
+ return m_zero;
}
}
}
/** Iterator over the nonzero coefficients */
-template<typename _Scalar>
-class AmbiVector<_Scalar>::Iterator
+template<typename _Scalar,typename _Index>
+class AmbiVector<_Scalar,_Index>::Iterator
{
public:
typedef _Scalar Scalar;
@@ -299,13 +299,15 @@ class AmbiVector<_Scalar>::Iterator
* In practice, all coefficients having a magnitude smaller than \a epsilon
* are skipped.
*/
- Iterator(const AmbiVector& vec, RealScalar epsilon = RealScalar(0.1)*precision<RealScalar>())
+ Iterator(const AmbiVector& vec, RealScalar epsilon = RealScalar(0.1)*NumTraits<RealScalar>::dummy_precision())
: m_vector(vec)
{
m_epsilon = epsilon;
m_isDense = m_vector.m_mode==IsDense;
if (m_isDense)
{
+ m_currentEl = 0; // this is to avoid a compilation warning
+ m_cachedValue = 0; // this is to avoid a compilation warning
m_cachedIndex = m_vector.m_start-1;
++(*this);
}
@@ -313,10 +315,11 @@ class AmbiVector<_Scalar>::Iterator
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
m_currentEl = m_vector.m_llStart;
- while (m_currentEl>=0 && ei_abs(llElements[m_currentEl].value)<m_epsilon)
+ while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon)
m_currentEl = llElements[m_currentEl].next;
if (m_currentEl<0)
{
+ m_cachedValue = 0; // this is to avoid a compilation warning
m_cachedIndex = -1;
}
else
@@ -327,7 +330,7 @@ class AmbiVector<_Scalar>::Iterator
}
}
- int index() const { return m_cachedIndex; }
+ Index index() const { return m_cachedIndex; }
Scalar value() const { return m_cachedValue; }
operator bool() const { return m_cachedIndex>=0; }
@@ -338,7 +341,7 @@ class AmbiVector<_Scalar>::Iterator
{
do {
++m_cachedIndex;
- } while (m_cachedIndex<m_vector.m_end && ei_abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
+ } while (m_cachedIndex<m_vector.m_end && internal::abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
if (m_cachedIndex<m_vector.m_end)
m_cachedValue = m_vector.m_buffer[m_cachedIndex];
else
@@ -349,7 +352,7 @@ class AmbiVector<_Scalar>::Iterator
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
do {
m_currentEl = llElements[m_currentEl].next;
- } while (m_currentEl>=0 && ei_abs(llElements[m_currentEl].value)<m_epsilon);
+ } while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon);
if (m_currentEl<0)
{
m_cachedIndex = -1;
@@ -365,14 +368,11 @@ class AmbiVector<_Scalar>::Iterator
protected:
const AmbiVector& m_vector; // the target vector
- int m_currentEl; // the current element in sparse/linked-list mode
+ Index m_currentEl; // the current element in sparse/linked-list mode
RealScalar m_epsilon; // epsilon used to prune zero coefficients
- int m_cachedIndex; // current coordinate
+ Index m_cachedIndex; // current coordinate
Scalar m_cachedValue; // current value
bool m_isDense; // mode of the vector
-
- private:
- Iterator& operator=(const Iterator&);
};
diff --git a/extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h b/extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h
index 4dbd3230985..b3bde272ec2 100644
--- a/extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h
+++ b/extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -28,11 +28,20 @@
/** Stores a sparse set of values as a list of values and a list of indices.
*
*/
-template<typename Scalar>
+template<typename _Scalar,typename _Index>
class CompressedStorage
{
+ public:
+
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+
+ protected:
+
typedef typename NumTraits<Scalar>::Real RealScalar;
+
public:
+
CompressedStorage()
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{}
@@ -53,7 +62,7 @@ class CompressedStorage
{
resize(other.size());
memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
- memcpy(m_indices, other.m_indices, m_size * sizeof(int));
+ memcpy(m_indices, other.m_indices, m_size * sizeof(Index));
return *this;
}
@@ -91,9 +100,9 @@ class CompressedStorage
m_size = size;
}
- void append(const Scalar& v, int i)
+ void append(const Scalar& v, Index i)
{
- int id = m_size;
+ Index id = static_cast<Index>(m_size);
resize(m_size+1, 1);
m_values[id] = v;
m_indices[id] = i;
@@ -106,10 +115,10 @@ class CompressedStorage
inline Scalar& value(size_t i) { return m_values[i]; }
inline const Scalar& value(size_t i) const { return m_values[i]; }
- inline int& index(size_t i) { return m_indices[i]; }
- inline const int& index(size_t i) const { return m_indices[i]; }
+ inline Index& index(size_t i) { return m_indices[i]; }
+ inline const Index& index(size_t i) const { return m_indices[i]; }
- static CompressedStorage Map(int* indices, Scalar* values, size_t size)
+ static CompressedStorage Map(Index* indices, Scalar* values, size_t size)
{
CompressedStorage res;
res.m_indices = indices;
@@ -117,15 +126,15 @@ class CompressedStorage
res.m_allocatedSize = res.m_size = size;
return res;
}
-
+
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
- inline int searchLowerIndex(int key) const
+ inline Index searchLowerIndex(Index key) const
{
return searchLowerIndex(0, m_size, key);
}
-
+
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
- inline int searchLowerIndex(size_t start, size_t end, int key) const
+ inline Index searchLowerIndex(size_t start, size_t end, Index key) const
{
while(end>start)
{
@@ -135,12 +144,12 @@ class CompressedStorage
else
end = mid;
}
- return start;
+ return static_cast<Index>(start);
}
-
+
/** \returns the stored value at index \a key
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
- inline Scalar at(int key, Scalar defaultValue = Scalar(0)) const
+ inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const
{
if (m_size==0)
return defaultValue;
@@ -151,11 +160,11 @@ class CompressedStorage
const size_t id = searchLowerIndex(0,m_size-1,key);
return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
}
-
+
/** Like at(), but the search is performed in the range [start,end) */
- inline Scalar atInRange(size_t start, size_t end, int key, Scalar defaultValue = Scalar(0)) const
+ inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const
{
- if (start==end)
+ if (start>=end)
return Scalar(0);
else if (end>start && key==m_indices[end-1])
return m_values[end-1];
@@ -164,11 +173,11 @@ class CompressedStorage
const size_t id = searchLowerIndex(start,end-1,key);
return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
}
-
+
/** \returns a reference to the value at index \a key
* If the value does not exist, then the value \a defaultValue is inserted
* such that the keys are sorted. */
- inline Scalar& atWithInsertion(int key, Scalar defaultValue = Scalar(0))
+ inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0))
{
size_t id = searchLowerIndex(0,m_size,key);
if (id>=m_size || m_indices[id]!=key)
@@ -184,14 +193,14 @@ class CompressedStorage
}
return m_values[id];
}
-
- void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
+
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
size_t k = 0;
size_t n = size();
for (size_t i=0; i<n; ++i)
{
- if (!ei_isMuchSmallerThan(value(i), reference, epsilon))
+ if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
{
value(k) = value(i);
index(k) = index(i);
@@ -206,11 +215,11 @@ class CompressedStorage
inline void reallocate(size_t size)
{
Scalar* newValues = new Scalar[size];
- int* newIndices = new int[size];
- size_t copySize = std::min(size, m_size);
+ Index* newIndices = new Index[size];
+ size_t copySize = (std::min)(size, m_size);
// copy
memcpy(newValues, m_values, copySize * sizeof(Scalar));
- memcpy(newIndices, m_indices, copySize * sizeof(int));
+ memcpy(newIndices, m_indices, copySize * sizeof(Index));
// delete old stuff
delete[] m_values;
delete[] m_indices;
@@ -221,7 +230,7 @@ class CompressedStorage
protected:
Scalar* m_values;
- int* m_indices;
+ Index* m_indices;
size_t m_size;
size_t m_allocatedSize;
diff --git a/extern/Eigen2/Eigen/src/Sparse/CoreIterators.h b/extern/Eigen3/Eigen/src/Sparse/CoreIterators.h
index f1520a585ca..b4beaeee69e 100644
--- a/extern/Eigen2/Eigen/src/Sparse/CoreIterators.h
+++ b/extern/Eigen3/Eigen/src/Sparse/CoreIterators.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -35,13 +35,16 @@
*/
// generic version for dense matrix and expressions
-template<typename Derived> class MatrixBase<Derived>::InnerIterator
+template<typename Derived> class DenseBase<Derived>::InnerIterator
{
+ protected:
typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+
enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit };
public:
- EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, int outer)
- : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.rows())
+ EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, Index outer)
+ : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.innerSize())
{}
EIGEN_STRONG_INLINE Scalar value() const
@@ -52,17 +55,17 @@ template<typename Derived> class MatrixBase<Derived>::InnerIterator
EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; }
- EIGEN_STRONG_INLINE int index() const { return m_inner; }
- inline int row() const { return IsRowMajor ? m_outer : index(); }
- inline int col() const { return IsRowMajor ? index() : m_outer; }
+ EIGEN_STRONG_INLINE Index index() const { return m_inner; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
protected:
const Derived& m_expression;
- int m_inner;
- const int m_outer;
- const int m_end;
+ Index m_inner;
+ const Index m_outer;
+ const Index m_end;
};
#endif // EIGEN_COREITERATORS_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h b/extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h
index 01f97cd6d94..93e75f4c601 100644
--- a/extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h
+++ b/extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -35,65 +35,75 @@
* random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is
* nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows
* otherwise.
- *
+ *
* Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might
* decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance
* till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors.
*
* \see SparseMatrix
*/
-template<typename _Scalar, int _Flags>
-struct ei_traits<DynamicSparseMatrix<_Scalar, _Flags> >
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<DynamicSparseMatrix<_Scalar, _Options, _Index> >
{
typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
enum {
RowsAtCompileTime = Dynamic,
ColsAtCompileTime = Dynamic,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic,
- Flags = SparseBit | _Flags,
+ Flags = _Options | NestByRefBit | LvalueBit,
CoeffReadCost = NumTraits<Scalar>::ReadCost,
SupportedAccessPatterns = OuterRandomAccessPattern
};
};
+}
-template<typename _Scalar, int _Flags>
+template<typename _Scalar, int _Options, typename _Index>
class DynamicSparseMatrix
- : public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Flags> >
+ : public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Options, _Index> >
{
public:
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(DynamicSparseMatrix)
+ EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix)
// FIXME: why are these operator already alvailable ???
// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=)
// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=)
typedef MappedSparseMatrix<Scalar,Flags> Map;
+ using Base::IsRowMajor;
+ using Base::operator=;
+ enum {
+ Options = _Options
+ };
protected:
- enum { IsRowMajor = Base::IsRowMajor };
typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
- int m_innerSize;
- std::vector<CompressedStorage<Scalar> > m_data;
+ Index m_innerSize;
+ std::vector<CompressedStorage<Scalar,Index> > m_data;
public:
-
- inline int rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
- inline int cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
- inline int innerSize() const { return m_innerSize; }
- inline int outerSize() const { return m_data.size(); }
- inline int innerNonZeros(int j) const { return m_data[j].size(); }
-
- std::vector<CompressedStorage<Scalar> >& _data() { return m_data; }
- const std::vector<CompressedStorage<Scalar> >& _data() const { return m_data; }
+
+ inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return static_cast<Index>(m_data.size()); }
+ inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
+
+ std::vector<CompressedStorage<Scalar,Index> >& _data() { return m_data; }
+ const std::vector<CompressedStorage<Scalar,Index> >& _data() const { return m_data; }
/** \returns the coefficient value at given position \a row, \a col
* This operation involes a log(rho*outer_size) binary search.
*/
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
return m_data[outer].at(inner);
}
@@ -101,73 +111,74 @@ class DynamicSparseMatrix
* This operation involes a log(rho*outer_size) binary search. If the coefficient does not
* exist yet, then a sorted insertion into a sequential buffer is performed.
*/
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
return m_data[outer].atWithInsertion(inner);
}
class InnerIterator;
- inline void setZero()
+ void setZero()
{
- for (int j=0; j<outerSize(); ++j)
+ for (Index j=0; j<outerSize(); ++j)
m_data[j].clear();
}
/** \returns the number of non zero coefficients */
- inline int nonZeros() const
+ Index nonZeros() const
{
- int res = 0;
- for (int j=0; j<outerSize(); ++j)
- res += m_data[j].size();
+ Index res = 0;
+ for (Index j=0; j<outerSize(); ++j)
+ res += static_cast<Index>(m_data[j].size());
return res;
}
- /** Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
- inline void startFill(int reserveSize = 1000)
+
+
+ void reserve(Index reserveSize = 1000)
{
if (outerSize()>0)
{
- int reserveSizePerVector = std::max(reserveSize/outerSize(),4);
- for (int j=0; j<outerSize(); ++j)
+ Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4));
+ for (Index j=0; j<outerSize(); ++j)
{
- m_data[j].clear();
m_data[j].reserve(reserveSizePerVector);
}
}
}
- /** inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
- * 1 - the coefficient does not exist yet
- * 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
- * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
- * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
- *
- * \see fillrand(), coeffRef()
- */
- inline Scalar& fill(int row, int col)
+ /** Does nothing: provided for compatibility with SparseMatrix */
+ inline void startVec(Index /*outer*/) {}
+
+ /** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+ * - the nonzero does not already exist
+ * - the new coefficient is the last one of the given inner vector.
+ *
+ * \sa insert, insertBackByOuterInner */
+ inline Scalar& insertBack(Index row, Index col)
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
- ei_assert(outer<int(m_data.size()) && inner<m_innerSize);
- ei_assert((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner));
+ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+ }
+
+ /** \sa insertBack */
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(outer<Index(m_data.size()) && inner<m_innerSize && "out of range");
+ eigen_assert(((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner))
+ && "wrong sorted insertion");
m_data[outer].append(0, inner);
return m_data[outer].value(m_data[outer].size()-1);
}
- /** Like fill() but with random inner coordinates.
- * Compared to the generic coeffRef(), the unique limitation is that we assume
- * the coefficient does not exist yet.
- */
- inline Scalar& fillrand(int row, int col)
+ inline Scalar& insert(Index row, Index col)
{
- const int outer = IsRowMajor ? row : col;
- const int inner = IsRowMajor ? col : row;
-
- int startId = 0;
- int id = m_data[outer].size() - 1;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index startId = 0;
+ Index id = static_cast<Index>(m_data[outer].size()) - 1;
m_data[outer].resize(id+2,1);
while ( (id >= startId) && (m_data[outer].index(id) > inner) )
@@ -181,37 +192,38 @@ class DynamicSparseMatrix
return m_data[outer].value(id+1);
}
- /** Does nothing. Provided for compatibility with SparseMatrix. */
- inline void endFill() {}
-
- void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
+ /** Does nothing: provided for compatibility with SparseMatrix */
+ inline void finalize() {}
+
+ /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
- for (int j=0; j<outerSize(); ++j)
+ for (Index j=0; j<outerSize(); ++j)
m_data[j].prune(reference,epsilon);
}
/** Resize the matrix without preserving the data (the matrix is set to zero)
*/
- void resize(int rows, int cols)
+ void resize(Index rows, Index cols)
{
- const int outerSize = IsRowMajor ? rows : cols;
+ const Index outerSize = IsRowMajor ? rows : cols;
m_innerSize = IsRowMajor ? cols : rows;
setZero();
- if (int(m_data.size()) != outerSize)
+ if (Index(m_data.size()) != outerSize)
{
m_data.resize(outerSize);
}
}
-
- void resizeAndKeepData(int rows, int cols)
+
+ void resizeAndKeepData(Index rows, Index cols)
{
- const int outerSize = IsRowMajor ? rows : cols;
- const int innerSize = IsRowMajor ? cols : rows;
+ const Index outerSize = IsRowMajor ? rows : cols;
+ const Index innerSize = IsRowMajor ? cols : rows;
if (m_innerSize>innerSize)
{
// remove all coefficients with innerCoord>=innerSize
// TODO
- std::cerr << "not implemented yet\n";
+ //std::cerr << "not implemented yet\n";
exit(2);
}
if (m_data.size() != outerSize)
@@ -223,20 +235,20 @@ class DynamicSparseMatrix
inline DynamicSparseMatrix()
: m_innerSize(0), m_data(0)
{
- ei_assert(innerSize()==0 && outerSize()==0);
+ eigen_assert(innerSize()==0 && outerSize()==0);
}
- inline DynamicSparseMatrix(int rows, int cols)
+ inline DynamicSparseMatrix(Index rows, Index cols)
: m_innerSize(0)
{
resize(rows, cols);
}
template<typename OtherDerived>
- inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ explicit inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
: m_innerSize(0)
{
- *this = other.derived();
+ Base::operator=(other.derived());
}
inline DynamicSparseMatrix(const DynamicSparseMatrix& other)
@@ -267,33 +279,68 @@ class DynamicSparseMatrix
return *this;
}
- template<typename OtherDerived>
- inline DynamicSparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
+ /** Destructor */
+ inline ~DynamicSparseMatrix() {}
+
+ public:
+
+ /** \deprecated
+ * Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
+ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
{
- return SparseMatrixBase<DynamicSparseMatrix>::operator=(other.derived());
+ setZero();
+ reserve(reserveSize);
}
- /** Destructor */
- inline ~DynamicSparseMatrix() {}
+ /** \deprecated use insert()
+ * inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
+ * 1 - the coefficient does not exist yet
+ * 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
+ * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
+ * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
+ *
+ * \see fillrand(), coeffRef()
+ */
+ EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return insertBack(outer,inner);
+ }
+
+ /** \deprecated use insert()
+ * Like fill() but with random inner coordinates.
+ * Compared to the generic coeffRef(), the unique limitation is that we assume
+ * the coefficient does not exist yet.
+ */
+ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
+ {
+ return insert(row,col);
+ }
+
+ /** \deprecated use finalize()
+ * Does nothing. Provided for compatibility with SparseMatrix. */
+ EIGEN_DEPRECATED void endFill() {}
+
+# ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
+# include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
+# endif
};
-template<typename Scalar, int _Flags>
-class DynamicSparseMatrix<Scalar,_Flags>::InnerIterator : public SparseVector<Scalar,_Flags>::InnerIterator
+template<typename Scalar, int _Options, typename _Index>
+class DynamicSparseMatrix<Scalar,_Options,_Index>::InnerIterator : public SparseVector<Scalar,_Options>::InnerIterator
{
- typedef typename SparseVector<Scalar,_Flags>::InnerIterator Base;
+ typedef typename SparseVector<Scalar,_Options>::InnerIterator Base;
public:
- InnerIterator(const DynamicSparseMatrix& mat, int outer)
+ InnerIterator(const DynamicSparseMatrix& mat, Index outer)
: Base(mat.m_data[outer]), m_outer(outer)
{}
-
- inline int row() const { return IsRowMajor ? m_outer : Base::index(); }
- inline int col() const { return IsRowMajor ? Base::index() : m_outer; }
- protected:
- const int m_outer;
+ inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
+ inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
- private:
- InnerIterator& operator=(const InnerIterator&);
+ protected:
+ const Index m_outer;
};
#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h b/extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h
index f4935d8344e..31a431fb224 100644
--- a/extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h
+++ b/extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -34,54 +34,56 @@
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
*
*/
-template<typename _Scalar, int _Flags>
-struct ei_traits<MappedSparseMatrix<_Scalar, _Flags> > : ei_traits<SparseMatrix<_Scalar, _Flags> >
+namespace internal {
+template<typename _Scalar, int _Flags, typename _Index>
+struct traits<MappedSparseMatrix<_Scalar, _Flags, _Index> > : traits<SparseMatrix<_Scalar, _Flags, _Index> >
{};
+}
-template<typename _Scalar, int _Flags>
+template<typename _Scalar, int _Flags, typename _Index>
class MappedSparseMatrix
- : public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags> >
+ : public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _Index> >
{
public:
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(MappedSparseMatrix)
+ EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix)
protected:
enum { IsRowMajor = Base::IsRowMajor };
- int m_outerSize;
- int m_innerSize;
- int m_nnz;
- int* m_outerIndex;
- int* m_innerIndices;
+ Index m_outerSize;
+ Index m_innerSize;
+ Index m_nnz;
+ Index* m_outerIndex;
+ Index* m_innerIndices;
Scalar* m_values;
public:
- inline int rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
- inline int cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
- inline int innerSize() const { return m_innerSize; }
- inline int outerSize() const { return m_outerSize; }
- inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+ inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
//----------------------------------------
// direct access interface
inline const Scalar* _valuePtr() const { return m_values; }
inline Scalar* _valuePtr() { return m_values; }
- inline const int* _innerIndexPtr() const { return m_innerIndices; }
- inline int* _innerIndexPtr() { return m_innerIndices; }
+ inline const Index* _innerIndexPtr() const { return m_innerIndices; }
+ inline Index* _innerIndexPtr() { return m_innerIndices; }
- inline const int* _outerIndexPtr() const { return m_outerIndex; }
- inline int* _outerIndexPtr() { return m_outerIndex; }
+ inline const Index* _outerIndexPtr() const { return m_outerIndex; }
+ inline Index* _outerIndexPtr() { return m_outerIndex; }
//----------------------------------------
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
- const int outer = RowMajor ? row : col;
- const int inner = RowMajor ? col : row;
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
- int start = m_outerIndex[outer];
- int end = m_outerIndex[outer+1];
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
if (start==end)
return Scalar(0);
else if (end>0 && inner==m_innerIndices[end-1])
@@ -89,57 +91,45 @@ class MappedSparseMatrix
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
- const int* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
- const int id = r-&m_innerIndices[0];
+ const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
+ const Index id = r-&m_innerIndices[0];
return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
}
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
- const int outer = RowMajor ? row : col;
- const int inner = RowMajor ? col : row;
-
- int start = m_outerIndex[outer];
- int end = m_outerIndex[outer+1];
- ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
- ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
- int* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
- const int id = r-&m_innerIndices[0];
- ei_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
+ const Index id = r-&m_innerIndices[0];
+ eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
return m_values[id];
}
class InnerIterator;
/** \returns the number of non zero coefficients */
- inline int nonZeros() const { return m_nnz; }
+ inline Index nonZeros() const { return m_nnz; }
- inline MappedSparseMatrix(int rows, int cols, int nnz, int* outerIndexPtr, int* innerIndexPtr, Scalar* valuePtr)
+ inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr)
: m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
m_innerIndices(innerIndexPtr), m_values(valuePtr)
{}
- #ifdef EIGEN_TAUCS_SUPPORT
- explicit MappedSparseMatrix(taucs_ccs_matrix& taucsMatrix);
- #endif
-
- #ifdef EIGEN_CHOLMOD_SUPPORT
- explicit MappedSparseMatrix(cholmod_sparse& cholmodMatrix);
- #endif
-
- #ifdef EIGEN_SUPERLU_SUPPORT
- explicit MappedSparseMatrix(SluMatrix& sluMatrix);
- #endif
-
/** Empty destructor */
inline ~MappedSparseMatrix() {}
};
-template<typename Scalar, int _Flags>
-class MappedSparseMatrix<Scalar,_Flags>::InnerIterator
+template<typename Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
{
public:
- InnerIterator(const MappedSparseMatrix& mat, int outer)
+ InnerIterator(const MappedSparseMatrix& mat, Index outer)
: m_matrix(mat),
m_outer(outer),
m_id(mat._outerIndexPtr()[outer]),
@@ -148,7 +138,7 @@ class MappedSparseMatrix<Scalar,_Flags>::InnerIterator
{}
template<unsigned int Added, unsigned int Removed>
- InnerIterator(const Flagged<MappedSparseMatrix,Added,Removed>& mat, int outer)
+ InnerIterator(const Flagged<MappedSparseMatrix,Added,Removed>& mat, Index outer)
: m_matrix(mat._expression()), m_id(m_matrix._outerIndexPtr()[outer]),
m_start(m_id), m_end(m_matrix._outerIndexPtr()[outer+1])
{}
@@ -158,18 +148,18 @@ class MappedSparseMatrix<Scalar,_Flags>::InnerIterator
inline Scalar value() const { return m_matrix._valuePtr()[m_id]; }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix._valuePtr()[m_id]); }
- inline int index() const { return m_matrix._innerIndexPtr()[m_id]; }
- inline int row() const { return IsRowMajor ? m_outer : index(); }
- inline int col() const { return IsRowMajor ? index() : m_outer; }
+ inline Index index() const { return m_matrix._innerIndexPtr()[m_id]; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
protected:
const MappedSparseMatrix& m_matrix;
- const int m_outer;
- int m_id;
- const int m_start;
- const int m_end;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
};
#endif // EIGEN_MAPPED_SPARSEMATRIX_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseAssign.h b/extern/Eigen3/Eigen/src/Sparse/SparseAssign.h
index e69de29bb2d..e69de29bb2d 100644
--- a/extern/Eigen2/Eigen/src/Sparse/SparseAssign.h
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseAssign.h
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseBlock.h b/extern/Eigen3/Eigen/src/Sparse/SparseBlock.h
index ae77a77879b..8079c999994 100644
--- a/extern/Eigen2/Eigen/src/Sparse/SparseBlock.h
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseBlock.h
@@ -1,8 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
-// Copyright (C) 2008 Daniel Gomez Ferro <dgomezferro@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -26,49 +25,58 @@
#ifndef EIGEN_SPARSE_BLOCK_H
#define EIGEN_SPARSE_BLOCK_H
+namespace internal {
template<typename MatrixType, int Size>
-struct ei_traits<SparseInnerVectorSet<MatrixType, Size> >
+struct traits<SparseInnerVectorSet<MatrixType, Size> >
{
- typedef typename ei_traits<MatrixType>::Scalar Scalar;
+ typedef typename traits<MatrixType>::Scalar Scalar;
+ typedef typename traits<MatrixType>::Index Index;
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
+ typedef MatrixXpr XprKind;
enum {
IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit,
Flags = MatrixType::Flags,
RowsAtCompileTime = IsRowMajor ? Size : MatrixType::RowsAtCompileTime,
ColsAtCompileTime = IsRowMajor ? MatrixType::ColsAtCompileTime : Size,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
CoeffReadCost = MatrixType::CoeffReadCost
};
};
+} // end namespace internal
template<typename MatrixType, int Size>
-class SparseInnerVectorSet : ei_no_assignment_operator,
+class SparseInnerVectorSet : internal::no_assignment_operator,
public SparseMatrixBase<SparseInnerVectorSet<MatrixType, Size> >
{
- enum { IsRowMajor = ei_traits<SparseInnerVectorSet>::IsRowMajor };
public:
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
class InnerIterator: public MatrixType::InnerIterator
{
public:
- inline InnerIterator(const SparseInnerVectorSet& xpr, int outer)
- : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer)
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
-
- private:
- InnerIterator& operator=(const InnerIterator&);
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
};
- inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
: m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
{
- ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
}
- inline SparseInnerVectorSet(const MatrixType& matrix, int outer)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
: m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
{
- ei_assert(Size!=Dynamic);
- ei_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ eigen_assert(Size!=Dynamic);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
}
// template<typename OtherDerived>
@@ -83,15 +91,14 @@ class SparseInnerVectorSet : ei_no_assignment_operator,
// return *this;
// }
- EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
const typename MatrixType::Nested m_matrix;
- int m_outerStart;
- const ei_int_if_dynamic<Size> m_outerSize;
-
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
};
/***************************************************************************
@@ -103,31 +110,34 @@ class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size>
: public SparseMatrixBase<SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size> >
{
typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType;
- enum { IsRowMajor = ei_traits<SparseInnerVectorSet>::IsRowMajor };
public:
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
class InnerIterator: public MatrixType::InnerIterator
{
public:
- inline InnerIterator(const SparseInnerVectorSet& xpr, int outer)
- : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer)
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
- private:
- InnerIterator& operator=(const InnerIterator&);
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
};
- inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
: m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
{
- ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
}
- inline SparseInnerVectorSet(const MatrixType& matrix, int outer)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
: m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
{
- ei_assert(Size!=Dynamic);
- ei_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ eigen_assert(Size!=Dynamic);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
}
template<typename OtherDerived>
@@ -142,7 +152,7 @@ class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size>
else
{
// evaluate/copy vector per vector
- for (int j=0; j<m_outerSize.value(); ++j)
+ for (Index j=0; j<m_outerSize.value(); ++j)
{
SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
@@ -156,20 +166,35 @@ class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size>
return operator=<SparseInnerVectorSet>(other);
}
+ Index nonZeros() const
+ {
+ Index count = 0;
+ for (Index j=0; j<m_outerSize.value(); ++j)
+ count += m_matrix._data()[m_outerStart+j].size();
+ return count;
+ }
+
+ const Scalar& lastCoeff() const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+ eigen_assert(m_matrix.data()[m_outerStart].size()>0);
+ return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1);
+ }
+
// template<typename Sparse>
// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
// {
// return *this;
// }
- EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
const typename MatrixType::Nested m_matrix;
- int m_outerStart;
- const ei_int_if_dynamic<Size> m_outerSize;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
};
@@ -177,55 +202,113 @@ class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size>
/***************************************************************************
* specialisation for SparseMatrix
***************************************************************************/
-/*
-template<typename _Scalar, int _Options, int Size>
-class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options>, Size>
+
+template<typename _Scalar, int _Options, typename _Index, int Size>
+class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
: public SparseMatrixBase<SparseInnerVectorSet<SparseMatrix<_Scalar, _Options>, Size> >
{
- typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType;
- enum { IsRowMajor = ei_traits<SparseInnerVectorSet>::IsRowMajor };
+ typedef SparseMatrix<_Scalar, _Options> MatrixType;
public:
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
class InnerIterator: public MatrixType::InnerIterator
{
public:
- inline InnerIterator(const SparseInnerVectorSet& xpr, int outer)
- : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer)
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
{}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
};
- inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
: m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
{
- ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
}
- inline SparseInnerVectorSet(const MatrixType& matrix, int outer)
- : m_matrix(matrix), m_outerStart(outer)
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
{
- ei_assert(Size==1);
- ei_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ eigen_assert(Size==1);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
}
template<typename OtherDerived>
inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
{
- if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit))
+ typedef typename internal::remove_all<typename MatrixType::Nested>::type _NestedMatrixType;
+ _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);;
+ // This assignement is slow if this vector set not empty
+ // and/or it is not at the end of the nonzeros of the underlying matrix.
+
+ // 1 - eval to a temporary to avoid transposition and/or aliasing issues
+ SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, Index> tmp(other);
+
+ // 2 - let's check whether there is enough allocated memory
+ Index nnz = tmp.nonZeros();
+ Index nnz_previous = nonZeros();
+ Index free_size = matrix.data().allocatedSize() - nnz_previous;
+ std::size_t nnz_head = m_outerStart==0 ? 0 : matrix._outerIndexPtr()[m_outerStart];
+ std::size_t tail = m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()];
+ std::size_t nnz_tail = matrix.nonZeros() - tail;
+
+ if(nnz>free_size)
{
- // need to transpose => perform a block evaluation followed by a big swap
- DynamicSparseMatrix<Scalar,IsRowMajor?RowMajorBit:0> aux(other);
- *this = aux.markAsRValue();
+ // realloc manually to reduce copies
+ typename MatrixType::Storage newdata(m_matrix.nonZeros() - nnz_previous + nnz);
+
+ std::memcpy(&newdata.value(0), &m_matrix.data().value(0), nnz_head*sizeof(Scalar));
+ std::memcpy(&newdata.index(0), &m_matrix.data().index(0), nnz_head*sizeof(Index));
+
+ std::memcpy(&newdata.value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&newdata.index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+
+ std::memcpy(&newdata.value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
+ std::memcpy(&newdata.index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+
+ matrix.data().swap(newdata);
}
else
{
- // evaluate/copy vector per vector
- for (int j=0; j<m_outerSize.value(); ++j)
+ // no need to realloc, simply copy the tail at its respective position and insert tmp
+ matrix.data().resize(nnz_head + nnz + nnz_tail);
+
+ if(nnz<nnz_previous)
{
- SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
- m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
+ std::memcpy(&matrix.data().value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+ }
+ else
+ {
+ for(Index i=nnz_tail-1; i>=0; --i)
+ {
+ matrix.data().value(nnz_head+nnz+i) = matrix.data().value(tail+i);
+ matrix.data().index(nnz_head+nnz+i) = matrix.data().index(tail+i);
+ }
}
+
+ std::memcpy(&matrix.data().value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
}
+
+ // update outer index pointers
+ Index p = nnz_head;
+ for(Index k=1; k<m_outerSize.value(); ++k)
+ {
+ matrix._outerIndexPtr()[m_outerStart+k] = p;
+ p += tmp.innerVector(k).nonZeros();
+ }
+ std::ptrdiff_t offset = nnz - nnz_previous;
+ for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
+ {
+ matrix._outerIndexPtr()[k] += offset;
+ }
+
return *this;
}
@@ -236,9 +319,31 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options>, Size>
inline const Scalar* _valuePtr() const
{ return m_matrix._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
- inline const int* _innerIndexPtr() const
+ inline Scalar* _valuePtr()
+ { return m_matrix.const_cast_derived()._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* _innerIndexPtr() const
{ return m_matrix._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
- inline const int* _outerIndexPtr() const { return m_matrix._outerIndexPtr() + m_outerStart; }
+ inline Index* _innerIndexPtr()
+ { return m_matrix.const_cast_derived()._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* _outerIndexPtr() const
+ { return m_matrix._outerIndexPtr() + m_outerStart; }
+ inline Index* _outerIndexPtr()
+ { return m_matrix.const_cast_derived()._outerIndexPtr() + m_outerStart; }
+
+ Index nonZeros() const
+ {
+ return std::size_t(m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()])
+ - std::size_t(m_matrix._outerIndexPtr()[m_outerStart]);
+ }
+
+ const Scalar& lastCoeff() const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+ eigen_assert(nonZeros()>0);
+ return m_matrix._valuePtr()[m_matrix._outerIndexPtr()[m_outerStart+1]-1];
+ }
// template<typename Sparse>
// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
@@ -246,22 +351,22 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options>, Size>
// return *this;
// }
- EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
- EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
const typename MatrixType::Nested m_matrix;
- int m_outerStart;
- const ei_int_if_dynamic<Size> m_outerSize;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
};
-*/
+
//----------
/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(int i)
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i)
{
EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
return innerVector(i);
@@ -270,7 +375,7 @@ SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(int i)
/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
* (read-only version) */
template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(int i) const
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i) const
{
EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
return innerVector(i);
@@ -278,7 +383,7 @@ const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(int i) cons
/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(int i)
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i)
{
EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
return innerVector(i);
@@ -287,7 +392,7 @@ SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(int i)
/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
* (read-only version) */
template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(int i) const
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i) const
{
EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
return innerVector(i);
@@ -297,21 +402,21 @@ const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(int i) cons
* is col-major (resp. row-major).
*/
template<typename Derived>
-SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(int outer)
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer)
{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
-const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(int outer) const
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) const
{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
//----------
/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(int start, int size)
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(Index start, Index size)
{
EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
return innerVectors(start, size);
@@ -320,7 +425,7 @@ SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(int sta
/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
* (read-only version) */
template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(int start, int size) const
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(Index start, Index size) const
{
EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
return innerVectors(start, size);
@@ -328,7 +433,7 @@ const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(i
/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(int start, int size)
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(Index start, Index size)
{
EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
return innerVectors(start, size);
@@ -337,7 +442,7 @@ SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(int sta
/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
* (read-only version) */
template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(int start, int size) const
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(Index start, Index size) const
{
EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
return innerVectors(start, size);
@@ -347,108 +452,14 @@ const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(i
* is col-major (resp. row-major).
*/
template<typename Derived>
-SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(int outerStart, int outerSize)
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
-const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(int outerStart, int outerSize) const
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
-# if 0
-template<typename MatrixType, int BlockRows, int BlockCols, int PacketAccess>
-class Block<MatrixType,BlockRows,BlockCols,PacketAccess,IsSparse>
- : public SparseMatrixBase<Block<MatrixType,BlockRows,BlockCols,PacketAccess,IsSparse> >
-{
-public:
-
- _EIGEN_GENERIC_PUBLIC_INTERFACE(Block, SparseMatrixBase<Block>)
- class InnerIterator;
-
- /** Column or Row constructor
- */
- inline Block(const MatrixType& matrix, int i)
- : m_matrix(matrix),
- // It is a row if and only if BlockRows==1 and BlockCols==MatrixType::ColsAtCompileTime,
- // and it is a column if and only if BlockRows==MatrixType::RowsAtCompileTime and BlockCols==1,
- // all other cases are invalid.
- // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
- m_startRow( (BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) ? i : 0),
- m_startCol( (BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
- m_blockRows(matrix.rows()), // if it is a row, then m_blockRows has a fixed-size of 1, so no pb to try to overwrite it
- m_blockCols(matrix.cols()) // same for m_blockCols
- {
- ei_assert( (i>=0) && (
- ((BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) && i<matrix.rows())
- ||((BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) && i<matrix.cols())));
- }
-
- /** Fixed-size constructor
- */
- inline Block(const MatrixType& matrix, int startRow, int startCol)
- : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol),
- m_blockRows(matrix.rows()), m_blockCols(matrix.cols())
- {
- EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && RowsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
- ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= matrix.rows()
- && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= matrix.cols());
- }
-
- /** Dynamic-size constructor
- */
- inline Block(const MatrixType& matrix,
- int startRow, int startCol,
- int blockRows, int blockCols)
- : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol),
- m_blockRows(blockRows), m_blockCols(blockCols)
- {
- ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
- && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
- ei_assert(startRow >= 0 && blockRows >= 1 && startRow + blockRows <= matrix.rows()
- && startCol >= 0 && blockCols >= 1 && startCol + blockCols <= matrix.cols());
- }
-
- inline int rows() const { return m_blockRows.value(); }
- inline int cols() const { return m_blockCols.value(); }
-
- inline int stride(void) const { return m_matrix.stride(); }
-
- inline Scalar& coeffRef(int row, int col)
- {
- return m_matrix.const_cast_derived()
- .coeffRef(row + m_startRow.value(), col + m_startCol.value());
- }
-
- inline const Scalar coeff(int row, int col) const
- {
- return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
- }
-
- inline Scalar& coeffRef(int index)
- {
- return m_matrix.const_cast_derived()
- .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
- }
-
- inline const Scalar coeff(int index) const
- {
- return m_matrix
- .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
- }
-
- protected:
-
- const typename MatrixType::Nested m_matrix;
- const ei_int_if_dynamic<MatrixType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
- const ei_int_if_dynamic<MatrixType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
- const ei_int_if_dynamic<RowsAtCompileTime> m_blockRows;
- const ei_int_if_dynamic<ColsAtCompileTime> m_blockCols;
-
-};
-#endif
-
#endif // EIGEN_SPARSE_BLOCK_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h
new file mode 100644
index 00000000000..cde5bbc0300
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h
@@ -0,0 +1,375 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
+#define EIGEN_SPARSE_CWISE_BINARY_OP_H
+
+// Here we have to handle 3 cases:
+// 1 - sparse op dense
+// 2 - dense op sparse
+// 3 - sparse op sparse
+// We also need to implement a 4th iterator for:
+// 4 - dense op dense
+// Finally, we also need to distinguish between the product and other operations :
+// configuration returned mode
+// 1 - sparse op dense product sparse
+// generic dense
+// 2 - dense op sparse product sparse
+// generic dense
+// 3 - sparse op sparse product sparse
+// generic sparse
+// 4 - dense op dense product dense
+// generic dense
+
+namespace internal {
+
+template<> struct promote_storage_type<Dense,Sparse>
+{ typedef Sparse ret; };
+
+template<> struct promote_storage_type<Sparse,Dense>
+{ typedef Sparse ret; };
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
+ typename _LhsStorageMode = typename traits<Lhs>::StorageKind,
+ typename _RhsStorageMode = typename traits<Rhs>::StorageKind>
+class sparse_cwise_binary_op_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
+ : public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+ public:
+ class InnerIterator;
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
+ : public internal::sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs,typename CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator>
+{
+ public:
+ typedef typename Lhs::Index Index;
+ typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
+ BinaryOp,Lhs,Rhs, InnerIterator> Base;
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer)
+ : Base(binOp.derived(),outer)
+ {}
+};
+
+/***************************************************************************
+* Implementation of inner-iterators
+***************************************************************************/
+
+// template<typename T> struct internal::func_is_conjunction { enum { ret = false }; };
+// template<typename T> struct internal::func_is_conjunction<internal::scalar_product_op<T> > { enum { ret = true }; };
+
+// TODO generalize the internal::scalar_product_op specialization to all conjunctions if any !
+
+namespace internal {
+
+// sparse - sparse (generic)
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename traits<CwiseBinaryXpr>::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
+ ++m_lhsIter;
+ ++m_rhsIter;
+ }
+ else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), Scalar(0));
+ ++m_lhsIter;
+ }
+ else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
+ {
+ m_id = m_rhsIter.index();
+ m_value = m_functor(Scalar(0), m_rhsIter.value());
+ ++m_rhsIter;
+ }
+ else
+ {
+ m_value = 0; // this is to avoid a compilation warning
+ m_id = -1;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
+ EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ Index m_id;
+};
+
+// sparse - sparse (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ ++m_rhsIter;
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Dense>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename traits<CwiseBinaryXpr>::RhsNested RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename Lhs::Index Index;
+ enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_lhsIter.value(),
+ m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
+
+ protected:
+ const RhsNested m_rhs;
+ LhsIterator m_lhsIter;
+ const BinaryFunc m_functor;
+ const Index m_outer;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Dense, Sparse>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
+ enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_rhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
+
+ protected:
+ const CwiseBinaryXpr& m_xpr;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+ const Index m_outer;
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of SparseMatrixBase and SparseCwise functions/operators
+***************************************************************************/
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename internal::traits<Derived>::Scalar>,
+// Derived, OtherDerived>
+// SparseMatrixBase<Derived>::operator-(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
+// Derived, OtherDerived>(derived(), other.derived());
+// }
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
+{
+ return *this = derived() - other.derived();
+}
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
+// SparseMatrixBase<Derived>::operator+(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return CwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived>(derived(), other.derived());
+// }
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
+{
+ return *this = derived() + other.derived();
+}
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+// SparseCwise<ExpressionType>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived());
+// }
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived());
+}
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
+// SparseCwise<ExpressionType>::operator/(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived());
+// }
+//
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
+// SparseCwise<ExpressionType>::operator/(const MatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived());
+// }
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// inline ExpressionType& SparseCwise<ExpressionType>::operator*=(const SparseMatrixBase<OtherDerived> &other)
+// {
+// return m_matrix.const_cast_derived() = _expression() * other.derived();
+// }
+
+
+#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h
new file mode 100644
index 00000000000..aa068835fbb
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h
@@ -0,0 +1,146 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
+#define EIGEN_SPARSE_CWISE_UNARY_OP_H
+
+// template<typename UnaryOp, typename MatrixType>
+// struct internal::traits<SparseCwiseUnaryOp<UnaryOp, MatrixType> > : internal::traits<MatrixType>
+// {
+// typedef typename internal::result_of<
+// UnaryOp(typename MatrixType::Scalar)
+// >::type Scalar;
+// typedef typename MatrixType::Nested MatrixTypeNested;
+// typedef typename internal::remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+// enum {
+// CoeffReadCost = _MatrixTypeNested::CoeffReadCost + internal::functor_traits<UnaryOp>::Cost
+// };
+// };
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>
+ : public SparseMatrixBase<CwiseUnaryOp<UnaryOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+// typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
+
+ typedef CwiseUnaryOp<UnaryOp, MatrixType> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+};
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::InnerIterator
+{
+ typedef typename CwiseUnaryOpImpl::Scalar Scalar;
+ typedef typename internal::traits<Derived>::_XprTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename MatrixType::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, Index outer)
+ : m_iter(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { ++m_iter; return *this; }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
+
+ protected:
+ MatrixTypeIterator m_iter;
+ const UnaryOp m_functor;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>
+ : public SparseMatrixBase<CwiseUnaryView<ViewOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+// typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
+
+ typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::InnerIterator
+{
+ typedef typename CwiseUnaryViewImpl::Scalar Scalar;
+ typedef typename internal::traits<Derived>::_MatrixTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename MatrixType::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryView, Index outer)
+ : m_iter(unaryView.derived().nestedExpression(),outer), m_functor(unaryView.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { ++m_iter; return *this; }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); }
+ EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(m_iter.valueRef()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
+
+ protected:
+ MatrixTypeIterator m_iter;
+ const ViewOp m_functor;
+};
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator*=(const Scalar& other)
+{
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() *= other;
+ return derived();
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator/=(const Scalar& other)
+{
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() /= other;
+ return derived();
+}
+
+#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h
new file mode 100644
index 00000000000..0f77aa5be99
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h
@@ -0,0 +1,231 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEDENSEPRODUCT_H
+#define EIGEN_SPARSEDENSEPRODUCT_H
+
+template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType
+{
+ typedef SparseTimeDenseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
+{
+ typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type;
+};
+
+template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
+{
+ typedef DenseTimeSparseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
+{
+ typedef SparseDenseOuterProduct<Rhs,Lhs,true> Type;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, bool Tr>
+struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+ typedef Sparse StorageKind;
+ typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
+ typename traits<Rhs>::Scalar>::ReturnType Scalar;
+ typedef typename Lhs::Index Index;
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+
+ enum {
+ LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost,
+ RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost,
+
+ RowsAtCompileTime = Tr ? int(traits<Rhs>::RowsAtCompileTime) : int(traits<Lhs>::RowsAtCompileTime),
+ ColsAtCompileTime = Tr ? int(traits<Lhs>::ColsAtCompileTime) : int(traits<Rhs>::ColsAtCompileTime),
+ MaxRowsAtCompileTime = Tr ? int(traits<Rhs>::MaxRowsAtCompileTime) : int(traits<Lhs>::MaxRowsAtCompileTime),
+ MaxColsAtCompileTime = Tr ? int(traits<Lhs>::MaxColsAtCompileTime) : int(traits<Rhs>::MaxColsAtCompileTime),
+
+ Flags = Tr ? RowMajorBit : 0,
+
+ CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits<Scalar>::MulCost
+ };
+};
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs, bool Tr>
+class SparseDenseOuterProduct
+ : public SparseMatrixBase<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+ public:
+
+ typedef SparseMatrixBase<SparseDenseOuterProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct)
+ typedef internal::traits<SparseDenseOuterProduct> Traits;
+
+ private:
+
+ typedef typename Traits::LhsNested LhsNested;
+ typedef typename Traits::RhsNested RhsNested;
+ typedef typename Traits::_LhsNested _LhsNested;
+ typedef typename Traits::_RhsNested _RhsNested;
+
+ public:
+
+ class InnerIterator;
+
+ EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+template<typename Lhs, typename Rhs, bool Transpose>
+class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
+{
+ typedef typename _LhsNested::InnerIterator Base;
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
+ : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
+ {
+ }
+
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return Transpose ? Base::row() : m_outer; }
+ inline Index col() const { return Transpose ? m_outer : Base::row(); }
+
+ inline Scalar value() const { return Base::value() * m_factor; }
+
+ protected:
+ int m_outer;
+ Scalar m_factor;
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<SparseTimeDenseProduct<Lhs,Rhs> >
+ : traits<ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+ typedef MatrixXpr XprKind;
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseTimeDenseProduct
+ : public ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct)
+
+ SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ typedef typename internal::remove_all<Lhs>::type _Lhs;
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::InnerIterator LhsInnerIterator;
+ enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit };
+ for(Index j=0; j<m_lhs.outerSize(); ++j)
+ {
+ typename Rhs::Scalar rhs_j = alpha * m_rhs.coeff(LhsIsRowMajor ? 0 : j,0);
+ typename Dest::RowXpr dest_j(dest.row(LhsIsRowMajor ? j : 0));
+ for(LhsInnerIterator it(m_lhs,j); it ;++it)
+ {
+ if(LhsIsRowMajor) dest_j += (alpha*it.value()) * m_rhs.row(it.index());
+ else if(Rhs::ColsAtCompileTime==1) dest.coeffRef(it.index()) += it.value() * rhs_j;
+ else dest.row(it.index()) += (alpha*it.value()) * m_rhs.row(j);
+ }
+ }
+ }
+
+ private:
+ SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&);
+};
+
+
+// dense = dense * sparse
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<DenseTimeSparseProduct<Lhs,Rhs> >
+ : traits<ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class DenseTimeSparseProduct
+ : public ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct)
+
+ DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Rhs::InnerIterator RhsInnerIterator;
+ enum { RhsIsRowMajor = (_Rhs::Flags&RowMajorBit)==RowMajorBit };
+ for(Index j=0; j<m_rhs.outerSize(); ++j)
+ for(RhsInnerIterator i(m_rhs,j); i; ++i)
+ dest.col(RhsIsRowMajor ? i.index() : j) += (alpha*i.value()) * m_lhs.col(RhsIsRowMajor ? j : i.index());
+ }
+
+ private:
+ DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
+};
+
+// sparse * dense
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h
new file mode 100644
index 00000000000..fb9a29c051b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h
@@ -0,0 +1,195 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+
+// The product of a diagonal matrix with a sparse matrix can be easily
+// implemented using expression template.
+// We have two consider very different cases:
+// 1 - diag * row-major sparse
+// => each inner vector <=> scalar * sparse vector product
+// => so we can reuse CwiseUnaryOp::InnerIterator
+// 2 - diag * col-major sparse
+// => each inner vector <=> densevector * sparse vector cwise product
+// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
+// for that particular case
+// The two other cases are symmetric.
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<SparseDiagonalProduct<Lhs, Rhs> >
+{
+ typedef typename remove_all<Lhs>::type _Lhs;
+ typedef typename remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = _Lhs::RowsAtCompileTime,
+ ColsAtCompileTime = _Rhs::ColsAtCompileTime,
+
+ MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime,
+
+ SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags),
+ Flags = (SparseFlags&RowMajorBit),
+ CoeffReadCost = Dynamic
+ };
+};
+
+enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
+class sparse_diagonal_product_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseDiagonalProduct
+ : public SparseMatrixBase<SparseDiagonalProduct<Lhs,Rhs> >,
+ internal::no_assignment_operator
+{
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+
+ typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+ typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+
+ enum {
+ LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal
+ : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor,
+ RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal
+ : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor
+ };
+
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
+
+ typedef internal::sparse_diagonal_product_inner_iterator_selector
+ <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+
+ EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
+ : public CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator
+{
+ typedef typename CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
+ : public CwiseBinaryOp<
+ scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::DiagonalVectorType>::InnerIterator
+{
+ typedef typename CwiseBinaryOp<
+ scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::DiagonalVectorType>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
+ : public CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator
+{
+ typedef typename CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
+ : public CwiseBinaryOp<
+ scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
+{
+ typedef typename CwiseBinaryOp<
+ scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0)
+ {}
+};
+
+} // end namespace internal
+
+// SparseMatrixBase functions
+
+template<typename Derived>
+template<typename OtherDerived>
+const SparseDiagonalProduct<Derived,OtherDerived>
+SparseMatrixBase<Derived>::operator*(const DiagonalBase<OtherDerived> &other) const
+{
+ return SparseDiagonalProduct<Derived,OtherDerived>(this->derived(), other.derived());
+}
+
+#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseDot.h b/extern/Eigen3/Eigen/src/Sparse/SparseDot.h
index 7a26e0f4ba5..1f10f71a402 100644
--- a/extern/Eigen2/Eigen/src/Sparse/SparseDot.h
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseDot.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -27,23 +27,23 @@
template<typename Derived>
template<typename OtherDerived>
-typename ei_traits<Derived>::Scalar
+typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
- EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename OtherDerived::Scalar>::ret),
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
- ei_assert(size() == other.size());
- ei_assert(other.size()>0 && "you are using a non initialized vector");
-
+ eigen_assert(size() == other.size());
+ eigen_assert(other.size()>0 && "you are using a non initialized vector");
+
typename Derived::InnerIterator i(derived(),0);
Scalar res = 0;
while (i)
{
- res += i.value() * ei_conj(other.coeff(i.index()));
+ res += internal::conj(i.value()) * other.coeff(i.index());
++i;
}
return res;
@@ -51,17 +51,17 @@ SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
template<typename Derived>
template<typename OtherDerived>
-typename ei_traits<Derived>::Scalar
+typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
- EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename OtherDerived::Scalar>::ret),
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-
- ei_assert(size() == other.size());
-
+
+ eigen_assert(size() == other.size());
+
typename Derived::InnerIterator i(derived(),0);
typename OtherDerived::InnerIterator j(other.derived(),0);
Scalar res = 0;
@@ -69,7 +69,7 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
{
if (i.index()==j.index())
{
- res += i.value() * ei_conj(j.value());
+ res += internal::conj(i.value()) * j.value();
++i; ++j;
}
else if (i.index()<j.index())
@@ -81,17 +81,17 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
}
template<typename Derived>
-inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::squaredNorm() const
{
- return ei_real((*this).cwise().abs2().sum());
+ return internal::real((*this).cwiseAbs2().sum());
}
template<typename Derived>
-inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::norm() const
{
- return ei_sqrt(squaredNorm());
+ return internal::sqrt(squaredNorm());
}
#endif // EIGEN_SPARSE_DOT_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h b/extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h
index 355f4d52eab..f00b3d6469b 100644
--- a/extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -32,10 +32,10 @@
// typename NumTraits<Scalar>::Real prec
// ) const
// {
-// const typename ei_nested<Derived,2>::type nested(derived());
-// const typename ei_nested<OtherDerived,2>::type otherNested(other.derived());
+// const typename internal::nested<Derived,2>::type nested(derived());
+// const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
// return (nested - otherNested).cwise().abs2().sum()
-// <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
+// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
// }
#endif // EIGEN_SPARSE_FUZZY_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h b/extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h
new file mode 100644
index 00000000000..0e175ec6e71
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h
@@ -0,0 +1,651 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEMATRIX_H
+#define EIGEN_SPARSEMATRIX_H
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseMatrix
+ *
+ * \brief The main sparse matrix class
+ *
+ * This class implements a sparse matrix using the very common compressed row/column storage
+ * scheme.
+ *
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+ * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
+ * is RowMajor. The default is 0 which means column-major.
+ * \tparam _Index the type of the indices. Default is \c int.
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseMatrix<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+
+} // end namespace internal
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseMatrix
+ : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
+// using Base::operator=;
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
+ // FIXME: why are these operator already alvailable ???
+ // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, *=)
+ // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, /=)
+
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+ using Base::IsRowMajor;
+ typedef CompressedStorage<Scalar,Index> Storage;
+ enum {
+ Options = _Options
+ };
+
+ protected:
+
+ typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Index* m_outerIndex;
+ CompressedStorage<Scalar,Index> m_data;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+ inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
+
+ inline const Scalar* _valuePtr() const { return &m_data.value(0); }
+ inline Scalar* _valuePtr() { return &m_data.value(0); }
+
+ inline const Index* _innerIndexPtr() const { return &m_data.index(0); }
+ inline Index* _innerIndexPtr() { return &m_data.index(0); }
+
+ inline const Index* _outerIndexPtr() const { return m_outerIndex; }
+ inline Index* _outerIndexPtr() { return m_outerIndex; }
+
+ inline Storage& data() { return m_data; }
+ inline const Storage& data() const { return m_data; }
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ const Index p = m_data.searchLowerIndex(start,end-1,inner);
+ eigen_assert((p<end) && (m_data.index(p)==inner) && "coeffRef cannot be called on a zero coefficient");
+ return m_data.value(p);
+ }
+
+ public:
+
+ class InnerIterator;
+
+ /** Removes all non zeros */
+ inline void setZero()
+ {
+ m_data.clear();
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
+
+ /** Preallocates \a reserveSize non zeros */
+ inline void reserve(Index reserveSize)
+ {
+ m_data.reserve(reserveSize);
+ }
+
+ //--- low level purely coherent filling ---
+
+ /** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+ * - the nonzero does not already exist
+ * - the new coefficient is the last one according to the storage order
+ *
+ * Before filling a given inner vector you must call the statVec(Index) function.
+ *
+ * After an insertion session, you should call the finalize() function.
+ *
+ * \sa insert, insertBackByOuterInner, startVec */
+ inline Scalar& insertBack(Index row, Index col)
+ {
+ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+ }
+
+ /** \sa insertBack, startVec */
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
+ eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \warning use it only if you know what you are doing */
+ inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
+ {
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \sa insertBack, insertBackByOuterInner */
+ inline void startVec(Index outer)
+ {
+ eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
+ eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ //---
+
+ /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
+ * The non zero coefficient must \b not already exist.
+ *
+ * \warning This function can be extremely slow if the non zero coefficients
+ * are not inserted in a coherent order.
+ *
+ * After an insertion session, you should call the finalize() function.
+ */
+ EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index previousOuter = outer;
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
+ {
+ m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+ --previousOuter;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ // here we have to handle the tricky case where the outerIndex array
+ // starts with: [ 0 0 0 0 0 1 ...] and we are inserting in, e.g.,
+ // the 2nd inner vector...
+ bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
+ && (size_t(m_outerIndex[outer+1]) == m_data.size());
+
+ size_t startId = m_outerIndex[outer];
+ // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+ size_t p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ float reallocRatio = 1;
+ if (m_data.allocatedSize()<=m_data.size())
+ {
+ // if there is no preallocated memory, let's reserve a minimum of 32 elements
+ if (m_data.size()==0)
+ {
+ m_data.reserve(32);
+ }
+ else
+ {
+ // we need to reallocate the data, to reduce multiple reallocations
+ // we use a smart resize algorithm based on the current filling ratio
+ // in addition, we use float to avoid integers overflows
+ float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
+ reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
+ // furthermore we bound the realloc ratio to:
+ // 1) reduce multiple minor realloc when the matrix is almost filled
+ // 2) avoid to allocate too much memory when the matrix is almost empty
+ reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
+ }
+ }
+ m_data.resize(m_data.size()+1,reallocRatio);
+
+ if (!isLastVec)
+ {
+ if (previousOuter==-1)
+ {
+ // oops wrong guess.
+ // let's correct the outer offsets
+ for (Index k=0; k<=(outer+1); ++k)
+ m_outerIndex[k] = 0;
+ Index k=outer+1;
+ while(m_outerIndex[k]==0)
+ m_outerIndex[k++] = 1;
+ while (k<=m_outerSize && m_outerIndex[k]!=0)
+ m_outerIndex[k++]++;
+ p = 0;
+ --k;
+ k = m_outerIndex[k]-1;
+ while (k>0)
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ else
+ {
+ // we are not inserting into the last inner vec
+ // update outer indices:
+ Index j = outer+2;
+ while (j<=m_outerSize && m_outerIndex[j]!=0)
+ m_outerIndex[j++]++;
+ --j;
+ // shift data of last vecs:
+ Index k = m_outerIndex[j]-1;
+ while (k>=Index(p))
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ }
+
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+ }
+
+
+
+
+ /** Must be called after inserting a set of non zero entries.
+ */
+ inline void finalize()
+ {
+ Index size = static_cast<Index>(m_data.size());
+ Index i = m_outerSize;
+ // find the last filled column
+ while (i>=0 && m_outerIndex[i]==0)
+ --i;
+ ++i;
+ while (i<=m_outerSize)
+ {
+ m_outerIndex[i] = size;
+ ++i;
+ }
+ }
+
+ /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ prune(default_prunning_func(reference,epsilon));
+ }
+
+ /** Suppress all nonzeros which do not satisfy the predicate \a keep.
+ * The functor type \a KeepFunc must implement the following function:
+ * \code
+ * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
+ * \endcode
+ * \sa prune(Scalar,RealScalar)
+ */
+ template<typename KeepFunc>
+ void prune(const KeepFunc& keep = KeepFunc())
+ {
+ Index k = 0;
+ for(Index j=0; j<m_outerSize; ++j)
+ {
+ Index previousStart = m_outerIndex[j];
+ m_outerIndex[j] = k;
+ Index end = m_outerIndex[j+1];
+ for(Index i=previousStart; i<end; ++i)
+ {
+ if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
+ {
+ m_data.value(k) = m_data.value(i);
+ m_data.index(k) = m_data.index(i);
+ ++k;
+ }
+ }
+ }
+ m_outerIndex[m_outerSize] = k;
+ m_data.resize(k,0);
+ }
+
+ /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero
+ * \sa resizeNonZeros(Index), reserve(), setZero()
+ */
+ void resize(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ m_data.clear();
+ if (m_outerSize != outerSize || m_outerSize==0)
+ {
+ delete[] m_outerIndex;
+ m_outerIndex = new Index [outerSize+1];
+ m_outerSize = outerSize;
+ }
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ }
+
+ /** Low level API
+ * Resize the nonzero vector to \a size */
+ void resizeNonZeros(Index size)
+ {
+ m_data.resize(size);
+ }
+
+ /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
+ inline SparseMatrix()
+ : m_outerSize(-1), m_innerSize(0), m_outerIndex(0)
+ {
+ resize(0, 0);
+ }
+
+ /** Constructs a \a rows \c x \a cols empty matrix */
+ inline SparseMatrix(Index rows, Index cols)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ resize(rows, cols);
+ }
+
+ /** Constructs a sparse matrix from the sparse expression \a other */
+ template<typename OtherDerived>
+ inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ *this = other.derived();
+ }
+
+ /** Copy constructor */
+ inline SparseMatrix(const SparseMatrix& other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ *this = other.derived();
+ }
+
+ /** Swap the content of two sparse matrices of same type (optimization) */
+ inline void swap(SparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_outerIndex, other.m_outerIndex);
+ std::swap(m_innerSize, other.m_innerSize);
+ std::swap(m_outerSize, other.m_outerSize);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseMatrix& operator=(const SparseMatrix& other)
+ {
+// std::cout << "SparseMatrix& operator=(const SparseMatrix& other)\n";
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.rows(), other.cols());
+ memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Lhs, typename Rhs>
+ inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+ { return Base::operator=(product); }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
+ { return Base::operator=(other); }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
+ { return Base::operator=(other); }
+ #endif
+
+ template<typename OtherDerived>
+ EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if (needToTranspose)
+ {
+ // two passes algorithm:
+ // 1 - compute the number of coeffs per dest inner vector
+ // 2 - do the actual copy/eval
+ // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+ typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+ typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ resize(other.rows(), other.cols());
+ Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
+ // pass 1
+ // FIXME the above copy could be merged with that pass
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ ++m_outerIndex[it.index()];
+
+ // prefix sum
+ Index count = 0;
+ VectorXi positions(outerSize());
+ for (Index j=0; j<outerSize(); ++j)
+ {
+ Index tmp = m_outerIndex[j];
+ m_outerIndex[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ m_outerIndex[outerSize()] = count;
+ // alloc
+ m_data.resize(count);
+ // pass 2
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ {
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ {
+ Index pos = positions[it.index()]++;
+ m_data.index(pos) = j;
+ m_data.value(pos) = it.value();
+ }
+ }
+ return *this;
+ }
+ else
+ {
+ // there is no special optimization
+ return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
+ }
+ }
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
+ {
+ EIGEN_DBG_SPARSE(
+ s << "Nonzero entries:\n";
+ for (Index i=0; i<m.nonZeros(); ++i)
+ {
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ }
+ s << std::endl;
+ s << std::endl;
+ s << "Column pointers:\n";
+ for (Index i=0; i<m.outerSize(); ++i)
+ {
+ s << m.m_outerIndex[i] << " ";
+ }
+ s << " $" << std::endl;
+ s << std::endl;
+ );
+ s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
+ return s;
+ }
+
+ /** Destructor */
+ inline ~SparseMatrix()
+ {
+ delete[] m_outerIndex;
+ }
+
+ /** Overloaded for performance */
+ Scalar sum() const;
+
+ public:
+
+ /** \deprecated use setZero() and reserve()
+ * Initializes the filling process of \c *this.
+ * \param reserveSize approximate number of nonzeros
+ * Note that the matrix \c *this is zero-ed.
+ */
+ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
+ {
+ setZero();
+ m_data.reserve(reserveSize);
+ }
+
+ /** \deprecated use insert()
+ * Like fill() but with random inner coordinates.
+ */
+ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
+ {
+ return insert(row,col);
+ }
+
+ /** \deprecated use insert()
+ */
+ EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ Index i = outer;
+ while (i>=0 && m_outerIndex[i]==0)
+ {
+ m_outerIndex[i] = m_data.size();
+ --i;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+ else
+ {
+ eigen_assert(m_data.index(m_data.size()-1)<inner && "wrong sorted insertion");
+ }
+// std::cerr << size_t(m_outerIndex[outer+1]) << " == " << m_data.size() << "\n";
+ assert(size_t(m_outerIndex[outer+1]) == m_data.size());
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \deprecated use finalize */
+ EIGEN_DEPRECATED void endFill() { finalize(); }
+
+# ifdef EIGEN_SPARSEMATRIX_PLUGIN
+# include EIGEN_SPARSEMATRIX_PLUGIN
+# endif
+
+private:
+ struct default_prunning_func {
+ default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
+ inline bool operator() (const Index&, const Index&, const Scalar& value) const
+ {
+ return !internal::isMuchSmallerThan(value, reference, epsilon);
+ }
+ Scalar reference;
+ RealScalar epsilon;
+ };
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseMatrix& mat, Index outer)
+ : m_values(mat._valuePtr()), m_indices(mat._innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_end(mat.m_outerIndex[outer+1])
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline const Scalar& value() const { return m_values[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
+
+ inline Index index() const { return m_indices[m_id]; }
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const Scalar* m_values;
+ const Index* m_indices;
+ const Index m_outer;
+ Index m_id;
+ const Index m_end;
+};
+
+#endif // EIGEN_SPARSEMATRIX_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h b/extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h
index 468bc9e227c..c01981bc935 100644
--- a/extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -25,30 +25,55 @@
#ifndef EIGEN_SPARSEMATRIXBASE_H
#define EIGEN_SPARSEMATRIXBASE_H
-template<typename Derived> class SparseMatrixBase
+/** \ingroup Sparse_Module
+ *
+ * \class SparseMatrixBase
+ *
+ * \brief Base class of any sparse matrices or sparse expressions
+ *
+ * \tparam Derived
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
+ */
+template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
{
public:
- typedef typename ei_traits<Derived>::Scalar Scalar;
-// typedef typename Derived::InnerIterator InnerIterator;
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+
+ typedef SparseMatrixBase StorageBaseType;
+ typedef EigenBase<Derived> Base;
+
+ template<typename OtherDerived>
+ Derived& operator=(const EigenBase<OtherDerived> &other)
+ {
+ other.derived().evalTo(derived());
+ return derived();
+ }
+
+// using Base::operator=;
enum {
- RowsAtCompileTime = ei_traits<Derived>::RowsAtCompileTime,
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
/**< The number of rows at compile-time. This is just a copy of the value provided
* by the \a Derived type. If a value is not known at compile-time,
* it is set to the \a Dynamic constant.
* \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
- ColsAtCompileTime = ei_traits<Derived>::ColsAtCompileTime,
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
/**< The number of columns at compile-time. This is just a copy of the value provided
* by the \a Derived type. If a value is not known at compile-time,
* it is set to the \a Dynamic constant.
* \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
- SizeAtCompileTime = (ei_size_at_compile_time<ei_traits<Derived>::RowsAtCompileTime,
- ei_traits<Derived>::ColsAtCompileTime>::ret),
+ SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+ internal::traits<Derived>::ColsAtCompileTime>::ret),
/**< This is equal to the number of coefficients, i.e. the number of
* rows times the number of columns, or to \a Dynamic if this is not
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
@@ -56,7 +81,7 @@ template<typename Derived> class SparseMatrixBase
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
- MaxSizeAtCompileTime = (ei_size_at_compile_time<MaxRowsAtCompileTime,
+ MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
MaxColsAtCompileTime>::ret),
IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
@@ -65,31 +90,51 @@ template<typename Derived> class SparseMatrixBase
* we are dealing with a column-vector (if there is only one column) or with
* a row-vector (if there is only one row). */
- Flags = ei_traits<Derived>::Flags,
+ Flags = internal::traits<Derived>::Flags,
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
* constructed from this one. See the \ref flags "list of flags".
*/
-
- CoeffReadCost = ei_traits<Derived>::CoeffReadCost,
+
+ CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
/**< This is a rough measure of how expensive it is to read one coefficient from
* this expression.
*/
- IsRowMajor = Flags&RowMajorBit ? 1 : 0
+ IsRowMajor = Flags&RowMajorBit ? 1 : 0,
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
+ #endif
};
- /** \internal the return type of MatrixBase::conjugate() */
- typedef typename ei_meta_if<NumTraits<Scalar>::IsComplex,
- const SparseCwiseUnaryOp<ei_scalar_conjugate_op<Scalar>, Derived>,
- const Derived&
- >::ret ConjugateReturnType;
- /** \internal the return type of MatrixBase::real() */
- typedef CwiseUnaryOp<ei_scalar_real_op<Scalar>, Derived> RealReturnType;
- /** \internal the return type of MatrixBase::imag() */
- typedef CwiseUnaryOp<ei_scalar_imag_op<Scalar>, Derived> ImagReturnType;
+ /* \internal the return type of MatrixBase::conjugate() */
+// typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+// const SparseCwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Derived>,
+// const Derived&
+// >::type ConjugateReturnType;
+ /* \internal the return type of MatrixBase::real() */
+// typedef SparseCwiseUnaryOp<internal::scalar_real_op<Scalar>, Derived> RealReturnType;
+ /* \internal the return type of MatrixBase::imag() */
+// typedef SparseCwiseUnaryOp<internal::scalar_imag_op<Scalar>, Derived> ImagReturnType;
/** \internal the return type of MatrixBase::adjoint() */
- typedef SparseTranspose</*NestByValue<*/typename ei_cleantype<ConjugateReturnType>::type> /*>*/
- AdjointReturnType;
+ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
+ Transpose<const Derived>
+ >::type AdjointReturnType;
+
+
+ typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject;
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
+# include "../plugins/CommonCwiseUnaryOps.h"
+# include "../plugins/CommonCwiseBinaryOps.h"
+# include "../plugins/MatrixCwiseUnaryOps.h"
+# include "../plugins/MatrixCwiseBinaryOps.h"
+# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
+# include EIGEN_SPARSEMATRIXBASE_PLUGIN
+# endif
+# undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is the "real scalar" type; if the \a Scalar type is already real numbers
@@ -100,9 +145,16 @@ template<typename Derived> class SparseMatrixBase
*/
typedef typename NumTraits<Scalar>::Real RealScalar;
+ /** \internal the return type of coeff()
+ */
+ typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
+
+ /** \internal Represents a matrix with all coefficients equal to one another*/
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
+
/** type of the equivalent square matrix */
- typedef Matrix<Scalar,EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime),
- EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+ typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
+ EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
inline Derived& derived() { return *static_cast<Derived*>(this); }
@@ -111,15 +163,15 @@ template<typename Derived> class SparseMatrixBase
#endif // not EIGEN_PARSED_BY_DOXYGEN
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */
- inline int rows() const { return derived().rows(); }
+ inline Index rows() const { return derived().rows(); }
/** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
- inline int cols() const { return derived().cols(); }
+ inline Index cols() const { return derived().cols(); }
/** \returns the number of coefficients, which is \a rows()*cols().
* \sa rows(), cols(), SizeAtCompileTime. */
- inline int size() const { return rows() * cols(); }
+ inline Index size() const { return rows() * cols(); }
/** \returns the number of nonzero coefficients which is in practice the number
* of stored coefficients. */
- inline int nonZeros() const { return derived().nonZeros(); }
+ inline Index nonZeros() const { return derived().nonZeros(); }
/** \returns true if either the number of rows or the number of columns is equal to 1.
* In other words, this function returns
* \code rows()==1 || cols()==1 \endcode
@@ -127,16 +179,16 @@ template<typename Derived> class SparseMatrixBase
inline bool isVector() const { return rows()==1 || cols()==1; }
/** \returns the size of the storage major dimension,
* i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
- int outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
+ Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
/** \returns the size of the inner dimension according to the storage order,
* i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
- int innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
+ Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
bool isRValue() const { return m_isRValue; }
Derived& markAsRValue() { m_isRValue = true; return derived(); }
SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
-
+
inline Derived& operator=(const Derived& other)
{
// std::cout << "Derived& operator=(const Derived& other)\n";
@@ -146,6 +198,13 @@ template<typename Derived> class SparseMatrixBase
this->operator=<Derived>(other);
return derived();
}
+
+ template<typename OtherDerived>
+ Derived& operator=(const ReturnByValue<OtherDerived>& other)
+ {
+ other.evalTo(derived());
+ return derived();
+ }
template<typename OtherDerived>
@@ -153,29 +212,29 @@ template<typename Derived> class SparseMatrixBase
{
// std::cout << "Derived& operator=(const MatrixBase<OtherDerived>& other)\n";
//const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
- ei_assert(( ((ei_traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+ eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
(!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
"the transpose operation is supposed to be handled in SparseMatrix::operator=");
- const int outerSize = other.outerSize();
- //typedef typename ei_meta_if<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::ret TempType;
+ enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
+
+ const Index outerSize = other.outerSize();
+ //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
// thanks to shallow copies, we always eval to a tempary
Derived temp(other.rows(), other.cols());
- temp.startFill(std::max(this->rows(),this->cols())*2);
- for (int j=0; j<outerSize; ++j)
+ temp.reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
{
+ temp.startVec(j);
for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
{
Scalar v = it.value();
if (v!=Scalar(0))
- {
- if (OtherDerived::Flags & RowMajorBit) temp.fill(j,it.index()) = v;
- else temp.fill(it.index(),j) = v;
- }
+ temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
}
}
- temp.endFill();
+ temp.finalize();
derived() = temp.markAsRValue();
}
@@ -188,25 +247,24 @@ template<typename Derived> class SparseMatrixBase
// std::cout << Flags << " " << OtherDerived::Flags << "\n";
const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
// std::cout << "eval transpose = " << transpose << "\n";
- const int outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
+ const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
if ((!transpose) && other.isRValue())
{
// eval without temporary
derived().resize(other.rows(), other.cols());
- derived().startFill(std::max(this->rows(),this->cols())*2);
- for (int j=0; j<outerSize; ++j)
+ derived().setZero();
+ derived().reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
{
+ derived().startVec(j);
for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
{
Scalar v = it.value();
if (v!=Scalar(0))
- {
- if (IsRowMajor) derived().fill(j,it.index()) = v;
- else derived().fill(it.index(),j) = v;
- }
+ derived().insertBackByOuterInner(j,it.index()) = v;
}
}
- derived().endFill();
+ derived().finalize();
}
else
{
@@ -216,15 +274,18 @@ template<typename Derived> class SparseMatrixBase
}
template<typename Lhs, typename Rhs>
- inline Derived& operator=(const SparseProduct<Lhs,Rhs,SparseTimeSparseProduct>& product);
+ inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
+
+ template<typename Lhs, typename Rhs>
+ inline void _experimentalNewProduct(const Lhs& lhs, const Rhs& rhs);
friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
{
if (Flags&RowMajorBit)
{
- for (int row=0; row<m.outerSize(); ++row)
+ for (Index row=0; row<m.outerSize(); ++row)
{
- int col = 0;
+ Index col = 0;
for (typename Derived::InnerIterator it(m.derived(), row); it; ++it)
{
for ( ; col<it.index(); ++col)
@@ -240,7 +301,7 @@ template<typename Derived> class SparseMatrixBase
else
{
if (m.cols() == 1) {
- int row = 0;
+ Index row = 0;
for (typename Derived::InnerIterator it(m.derived(), 0); it; ++it)
{
for ( ; row<it.index(); ++row)
@@ -260,15 +321,15 @@ template<typename Derived> class SparseMatrixBase
return s;
}
- const SparseCwiseUnaryOp<ei_scalar_opposite_op<typename ei_traits<Derived>::Scalar>,Derived> operator-() const;
+// const SparseCwiseUnaryOp<internal::scalar_opposite_op<typename internal::traits<Derived>::Scalar>,Derived> operator-() const;
- template<typename OtherDerived>
- const SparseCwiseBinaryOp<ei_scalar_sum_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
- operator+(const SparseMatrixBase<OtherDerived> &other) const;
+// template<typename OtherDerived>
+// const CwiseBinaryOp<internal::scalar_sum_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
+// operator+(const SparseMatrixBase<OtherDerived> &other) const;
- template<typename OtherDerived>
- const SparseCwiseBinaryOp<ei_scalar_difference_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
- operator-(const SparseMatrixBase<OtherDerived> &other) const;
+// template<typename OtherDerived>
+// const CwiseBinaryOp<internal::scalar_difference_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
+// operator-(const SparseMatrixBase<OtherDerived> &other) const;
template<typename OtherDerived>
Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
@@ -281,67 +342,108 @@ template<typename Derived> class SparseMatrixBase
Derived& operator*=(const Scalar& other);
Derived& operator/=(const Scalar& other);
- const SparseCwiseUnaryOp<ei_scalar_multiple_op<typename ei_traits<Derived>::Scalar>, Derived>
- operator*(const Scalar& scalar) const;
- const SparseCwiseUnaryOp<ei_scalar_quotient1_op<typename ei_traits<Derived>::Scalar>, Derived>
- operator/(const Scalar& scalar) const;
+ #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
+ CwiseBinaryOp< \
+ internal::scalar_product_op< \
+ typename internal::scalar_product_traits< \
+ typename internal::traits<Derived>::Scalar, \
+ typename internal::traits<OtherDerived>::Scalar \
+ >::ReturnType \
+ >, \
+ Derived, \
+ OtherDerived \
+ >
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+ cwiseProduct(const MatrixBase<OtherDerived> &other) const;
+
+// const SparseCwiseUnaryOp<internal::scalar_multiple_op<typename internal::traits<Derived>::Scalar>, Derived>
+// operator*(const Scalar& scalar) const;
+// const SparseCwiseUnaryOp<internal::scalar_quotient1_op<typename internal::traits<Derived>::Scalar>, Derived>
+// operator/(const Scalar& scalar) const;
- inline friend const SparseCwiseUnaryOp<ei_scalar_multiple_op<typename ei_traits<Derived>::Scalar>, Derived>
- operator*(const Scalar& scalar, const SparseMatrixBase& matrix)
- { return matrix*scalar; }
+// inline friend const SparseCwiseUnaryOp<internal::scalar_multiple_op<typename internal::traits<Derived>::Scalar>, Derived>
+// operator*(const Scalar& scalar, const SparseMatrixBase& matrix)
+// { return matrix*scalar; }
+ // sparse * sparse
template<typename OtherDerived>
- const typename SparseProductReturnType<Derived,OtherDerived>::Type
+ const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
operator*(const SparseMatrixBase<OtherDerived> &other) const;
-
- // dense * sparse (return a dense object)
- template<typename OtherDerived> friend
- const typename SparseProductReturnType<OtherDerived,Derived>::Type
+
+ // sparse * diagonal
+ template<typename OtherDerived>
+ const SparseDiagonalProduct<Derived,OtherDerived>
+ operator*(const DiagonalBase<OtherDerived> &other) const;
+
+ // diagonal * sparse
+ template<typename OtherDerived> friend
+ const SparseDiagonalProduct<OtherDerived,Derived>
+ operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
+ { return SparseDiagonalProduct<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
+
+ /** dense * sparse (return a dense object unless it is an outer product) */
+ template<typename OtherDerived> friend
+ const typename DenseSparseProductReturnType<OtherDerived,Derived>::Type
operator*(const MatrixBase<OtherDerived>& lhs, const Derived& rhs)
- { return typename SparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
-
+ { return typename DenseSparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
+
+ /** sparse * dense (returns a dense object unless it is an outer product) */
template<typename OtherDerived>
- const typename SparseProductReturnType<Derived,OtherDerived>::Type
+ const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
operator*(const MatrixBase<OtherDerived> &other) const;
template<typename OtherDerived>
Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
+ #ifdef EIGEN2_SUPPORT
+ // deprecated
template<typename OtherDerived>
- typename ei_plain_matrix_type_column_major<OtherDerived>::type
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type
solveTriangular(const MatrixBase<OtherDerived>& other) const;
+ // deprecated
template<typename OtherDerived>
void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
+// template<typename OtherDerived>
+// void solveTriangularInPlace(SparseMatrixBase<OtherDerived>& other) const;
+ #endif // EIGEN2_SUPPORT
+
+ template<int Mode>
+ inline const SparseTriangularView<Derived, Mode> triangularView() const;
+
+ template<unsigned int UpLo> inline const SparseSelfAdjointView<Derived, UpLo> selfadjointView() const;
+ template<unsigned int UpLo> inline SparseSelfAdjointView<Derived, UpLo> selfadjointView();
template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
RealScalar squaredNorm() const;
RealScalar norm() const;
-// const PlainMatrixType normalized() const;
+// const PlainObject normalized() const;
// void normalize();
- SparseTranspose<Derived> transpose() { return derived(); }
- const SparseTranspose<Derived> transpose() const { return derived(); }
+ Transpose<Derived> transpose() { return derived(); }
+ const Transpose<const Derived> transpose() const { return derived(); }
// void transposeInPlace();
- const AdjointReturnType adjoint() const { return conjugate()/*.nestByValue()*/; }
+ const AdjointReturnType adjoint() const { return transpose(); }
// sub-vector
- SparseInnerVectorSet<Derived,1> row(int i);
- const SparseInnerVectorSet<Derived,1> row(int i) const;
- SparseInnerVectorSet<Derived,1> col(int j);
- const SparseInnerVectorSet<Derived,1> col(int j) const;
- SparseInnerVectorSet<Derived,1> innerVector(int outer);
- const SparseInnerVectorSet<Derived,1> innerVector(int outer) const;
-
+ SparseInnerVectorSet<Derived,1> row(Index i);
+ const SparseInnerVectorSet<Derived,1> row(Index i) const;
+ SparseInnerVectorSet<Derived,1> col(Index j);
+ const SparseInnerVectorSet<Derived,1> col(Index j) const;
+ SparseInnerVectorSet<Derived,1> innerVector(Index outer);
+ const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const;
+
// set of sub-vectors
- SparseInnerVectorSet<Derived,Dynamic> subrows(int start, int size);
- const SparseInnerVectorSet<Derived,Dynamic> subrows(int start, int size) const;
- SparseInnerVectorSet<Derived,Dynamic> subcols(int start, int size);
- const SparseInnerVectorSet<Derived,Dynamic> subcols(int start, int size) const;
- SparseInnerVectorSet<Derived,Dynamic> innerVectors(int outerStart, int outerSize);
- const SparseInnerVectorSet<Derived,Dynamic> innerVectors(int outerStart, int outerSize) const;
+ SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
+ const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
// typename BlockReturnType<Derived>::Type block(int startRow, int startCol, int blockRows, int blockCols);
// const typename BlockReturnType<Derived>::Type
@@ -356,19 +458,11 @@ template<typename Derived> class SparseMatrixBase
// typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size);
// const typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size) const;
//
-// typename BlockReturnType<Derived>::Type corner(CornerType type, int cRows, int cCols);
-// const typename BlockReturnType<Derived>::Type corner(CornerType type, int cRows, int cCols) const;
-//
// template<int BlockRows, int BlockCols>
// typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol);
// template<int BlockRows, int BlockCols>
// const typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol) const;
-// template<int CRows, int CCols>
-// typename BlockReturnType<Derived, CRows, CCols>::Type corner(CornerType type);
-// template<int CRows, int CCols>
-// const typename BlockReturnType<Derived, CRows, CCols>::Type corner(CornerType type) const;
-
// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType start(void);
// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType start() const;
@@ -378,8 +472,8 @@ template<typename Derived> class SparseMatrixBase
// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType segment(int start);
// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType segment(int start) const;
-// DiagonalCoeffs<Derived> diagonal();
-// const DiagonalCoeffs<Derived> diagonal() const;
+// Diagonal<Derived> diagonal();
+// const Diagonal<Derived> diagonal() const;
// template<unsigned int Mode> Part<Derived, Mode> part();
// template<unsigned int Mode> const Part<Derived, Mode> part() const;
@@ -419,49 +513,49 @@ template<typename Derived> class SparseMatrixBase
// Derived& setRandom();
// Derived& setIdentity();
- Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+ /** \internal use operator= */
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived>& dst) const
{
- Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> res(rows(),cols());
- res.setZero();
- for (int j=0; j<outerSize(); ++j)
- {
+ dst.setZero();
+ for (Index j=0; j<outerSize(); ++j)
for (typename Derived::InnerIterator i(derived(),j); i; ++i)
- if(IsRowMajor)
- res.coeffRef(j,i.index()) = i.value();
- else
- res.coeffRef(i.index(),j) = i.value();
- }
- return res;
+ dst.coeffRef(i.row(),i.col()) = i.value();
+ }
+
+ Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+ {
+ return derived();
}
template<typename OtherDerived>
bool isApprox(const SparseMatrixBase<OtherDerived>& other,
- RealScalar prec = precision<Scalar>()) const
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
{ return toDense().isApprox(other.toDense(),prec); }
template<typename OtherDerived>
bool isApprox(const MatrixBase<OtherDerived>& other,
- RealScalar prec = precision<Scalar>()) const
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
{ return toDense().isApprox(other,prec); }
// bool isMuchSmallerThan(const RealScalar& other,
-// RealScalar prec = precision<Scalar>()) const;
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
// template<typename OtherDerived>
// bool isMuchSmallerThan(const MatrixBase<OtherDerived>& other,
-// RealScalar prec = precision<Scalar>()) const;
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
-// bool isApproxToConstant(const Scalar& value, RealScalar prec = precision<Scalar>()) const;
-// bool isZero(RealScalar prec = precision<Scalar>()) const;
-// bool isOnes(RealScalar prec = precision<Scalar>()) const;
-// bool isIdentity(RealScalar prec = precision<Scalar>()) const;
-// bool isDiagonal(RealScalar prec = precision<Scalar>()) const;
+// bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isZero(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isOnes(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
-// bool isUpperTriangular(RealScalar prec = precision<Scalar>()) const;
-// bool isLowerTriangular(RealScalar prec = precision<Scalar>()) const;
+// bool isUpper(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isLower(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
// template<typename OtherDerived>
// bool isOrthogonal(const MatrixBase<OtherDerived>& other,
-// RealScalar prec = precision<Scalar>()) const;
-// bool isUnitary(RealScalar prec = precision<Scalar>()) const;
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isUnitary(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
// template<typename OtherDerived>
// inline bool operator==(const MatrixBase<OtherDerived>& other) const
@@ -472,22 +566,22 @@ template<typename Derived> class SparseMatrixBase
// { return (cwise() != other).any(); }
- template<typename NewType>
- const SparseCwiseUnaryOp<ei_scalar_cast_op<typename ei_traits<Derived>::Scalar, NewType>, Derived> cast() const;
+// template<typename NewType>
+// const SparseCwiseUnaryOp<internal::scalar_cast_op<typename internal::traits<Derived>::Scalar, NewType>, Derived> cast() const;
/** \returns the matrix or vector obtained by evaluating this expression.
*
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns
* a const reference, in order to avoid a useless copy.
*/
- EIGEN_STRONG_INLINE const typename ei_eval<Derived>::type eval() const
- { return typename ei_eval<Derived>::type(derived()); }
+ inline const typename internal::eval<Derived>::type eval() const
+ { return typename internal::eval<Derived>::type(derived()); }
// template<typename OtherDerived>
-// void swap(const MatrixBase<OtherDerived>& other);
+// void swap(MatrixBase<OtherDerived> const & other);
- template<unsigned int Added>
- const SparseFlagged<Derived, Added, 0> marked() const;
+// template<unsigned int Added>
+// const SparseFlagged<Derived, Added, 0> marked() const;
// const Flagged<Derived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit> lazy() const;
/** \returns number of elements to skip to pass from one row (resp. column) to another
@@ -497,15 +591,13 @@ template<typename Derived> class SparseMatrixBase
*/
// inline int stride(void) const { return derived().stride(); }
-// inline const NestByValue<Derived> nestByValue() const;
-
-
- ConjugateReturnType conjugate() const;
- const RealReturnType real() const;
- const ImagReturnType imag() const;
+// FIXME
+// ConjugateReturnType conjugate() const;
+// const RealReturnType real() const;
+// const ImagReturnType imag() const;
- template<typename CustomUnaryOp>
- const SparseCwiseUnaryOp<CustomUnaryOp, Derived> unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const;
+// template<typename CustomUnaryOp>
+// const SparseCwiseUnaryOp<CustomUnaryOp, Derived> unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const;
// template<typename CustomBinaryOp, typename OtherDerived>
// const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
@@ -515,22 +607,22 @@ template<typename Derived> class SparseMatrixBase
Scalar sum() const;
// Scalar trace() const;
-// typename ei_traits<Derived>::Scalar minCoeff() const;
-// typename ei_traits<Derived>::Scalar maxCoeff() const;
+// typename internal::traits<Derived>::Scalar minCoeff() const;
+// typename internal::traits<Derived>::Scalar maxCoeff() const;
-// typename ei_traits<Derived>::Scalar minCoeff(int* row, int* col = 0) const;
-// typename ei_traits<Derived>::Scalar maxCoeff(int* row, int* col = 0) const;
+// typename internal::traits<Derived>::Scalar minCoeff(int* row, int* col = 0) const;
+// typename internal::traits<Derived>::Scalar maxCoeff(int* row, int* col = 0) const;
// template<typename BinaryOp>
-// typename ei_result_of<BinaryOp(typename ei_traits<Derived>::Scalar)>::type
+// typename internal::result_of<BinaryOp(typename internal::traits<Derived>::Scalar)>::type
// redux(const BinaryOp& func) const;
// template<typename Visitor>
// void visit(Visitor& func) const;
- const SparseCwise<Derived> cwise() const;
- SparseCwise<Derived> cwise();
+// const SparseCwise<Derived> cwise() const;
+// SparseCwise<Derived> cwise();
// inline const WithFormat<Derived> format(const IOFormat& fmt) const;
@@ -539,12 +631,12 @@ template<typename Derived> class SparseMatrixBase
bool all(void) const;
bool any(void) const;
- const PartialRedux<Derived,Horizontal> rowwise() const;
- const PartialRedux<Derived,Vertical> colwise() const;
+ const VectorwiseOp<Derived,Horizontal> rowwise() const;
+ const VectorwiseOp<Derived,Vertical> colwise() const;
- static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int rows, int cols);
- static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int size);
- static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random();
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(int rows, int cols);
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(int size);
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random();
template<typename ThenDerived,typename ElseDerived>
const Select<Derived,ThenDerived,ElseDerived>
@@ -552,11 +644,11 @@ template<typename Derived> class SparseMatrixBase
const MatrixBase<ElseDerived>& elseMatrix) const;
template<typename ThenDerived>
- inline const Select<Derived,ThenDerived, NestByValue<typename ThenDerived::ConstantReturnType> >
+ inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
select(const MatrixBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const;
template<typename ElseDerived>
- inline const Select<Derived, NestByValue<typename ElseDerived::ConstantReturnType>, ElseDerived >
+ inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
select(typename ElseDerived::Scalar thenScalar, const MatrixBase<ElseDerived>& elseMatrix) const;
template<int p> RealScalar lpNorm() const;
@@ -568,10 +660,10 @@ template<typename Derived> class SparseMatrixBase
// {
// EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
// EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
-// EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename OtherDerived::Scalar>::ret),
+// EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
//
-// ei_assert(derived().size() == other.size());
+// eigen_assert(derived().size() == other.size());
// // short version, but the assembly looks more complicated because
// // of the CwiseBinaryOp iterator complexity
// // return res = (derived().cwise() * other.derived().conjugate()).sum();
@@ -585,7 +677,7 @@ template<typename Derived> class SparseMatrixBase
// if (i.index()==j.index())
// {
// // std::cerr << i.value() << " * " << j.value() << "\n";
-// res += i.value() * ei_conj(j.value());
+// res += i.value() * internal::conj(j.value());
// ++i; ++j;
// }
// else if (i.index()<j.index())
@@ -606,18 +698,6 @@ template<typename Derived> class SparseMatrixBase
// return res;
// }
- #ifdef EIGEN_TAUCS_SUPPORT
- taucs_ccs_matrix asTaucsMatrix();
- #endif
-
- #ifdef EIGEN_CHOLMOD_SUPPORT
- cholmod_sparse asCholmodMatrix();
- #endif
-
- #ifdef EIGEN_SUPERLU_SUPPORT
- SluMatrix asSluMatrix();
- #endif
-
protected:
bool m_isRValue;
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseProduct.h
new file mode 100644
index 00000000000..1c1f54706ac
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseProduct.h
@@ -0,0 +1,141 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEPRODUCT_H
+#define EIGEN_SPARSEPRODUCT_H
+
+template<typename Lhs, typename Rhs>
+struct SparseSparseProductReturnType
+{
+ typedef typename internal::traits<Lhs>::Scalar Scalar;
+ enum {
+ LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
+ RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
+ TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
+ TransposeLhs = LhsRowMajor && (!RhsRowMajor)
+ };
+
+ typedef typename internal::conditional<TransposeLhs,
+ SparseMatrix<Scalar,0>,
+ const typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
+
+ typedef typename internal::conditional<TransposeRhs,
+ SparseMatrix<Scalar,0>,
+ const typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
+
+ typedef SparseSparseProduct<LhsNested, RhsNested> Type;
+};
+
+namespace internal {
+template<typename LhsNested, typename RhsNested>
+struct traits<SparseSparseProduct<LhsNested, RhsNested> >
+{
+ typedef MatrixXpr XprKind;
+ // clean the nested types:
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+ typedef typename _LhsNested::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<_LhsNested>::Index,
+ typename traits<_RhsNested>::Index>::type Index;
+
+ enum {
+ LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+ RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+ LhsFlags = _LhsNested::Flags,
+ RhsFlags = _RhsNested::Flags,
+
+ RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
+ ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
+ MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+ InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+ EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
+
+ RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
+
+ Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+ | EvalBeforeAssigningBit
+ | EvalBeforeNestingBit,
+
+ CoeffReadCost = Dynamic
+ };
+
+ typedef Sparse StorageKind;
+};
+
+} // end namespace internal
+
+template<typename LhsNested, typename RhsNested>
+class SparseSparseProduct : internal::no_assignment_operator,
+ public SparseMatrixBase<SparseSparseProduct<LhsNested, RhsNested> >
+{
+ public:
+
+ typedef SparseMatrixBase<SparseSparseProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct)
+
+ private:
+
+ typedef typename internal::traits<SparseSparseProduct>::_LhsNested _LhsNested;
+ typedef typename internal::traits<SparseSparseProduct>::_RhsNested _RhsNested;
+
+ public:
+
+ template<typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ eigen_assert(lhs.cols() == rhs.rows());
+
+ enum {
+ ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
+ || _RhsNested::RowsAtCompileTime==Dynamic
+ || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
+ AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwise()*v2
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseRedux.h b/extern/Eigen3/Eigen/src/Sparse/SparseRedux.h
new file mode 100644
index 00000000000..afc49de7aad
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseRedux.h
@@ -0,0 +1,56 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEREDUX_H
+#define EIGEN_SPARSEREDUX_H
+
+template<typename Derived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ Scalar res = 0;
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
+ res += iter.value();
+ return res;
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
+SparseMatrix<_Scalar,_Options,_Index>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
+SparseVector<_Scalar,_Options,_Index>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+#endif // EIGEN_SPARSEREDUX_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h b/extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h
new file mode 100644
index 00000000000..d82044c789c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h
@@ -0,0 +1,454 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
+#define EIGEN_SPARSE_SELFADJOINTVIEW_H
+
+/** \class SparseSelfAdjointView
+ *
+ *
+ * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
+ *
+ * \param MatrixType the type of the dense matrix storing the coefficients
+ * \param UpLo can be either \c #Lower or \c #Upper
+ *
+ * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
+ * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
+ * and most of the time this is the only way that it is used.
+ *
+ * \sa SparseMatrixBase::selfadjointView()
+ */
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct;
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct;
+
+template<typename MatrixType,int UpLo>
+class SparseSymmetricPermutationProduct;
+
+namespace internal {
+
+template<typename MatrixType, unsigned int UpLo>
+struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+}
+
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
+ : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
+{
+ public:
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+ inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
+ {
+ eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
+ }
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ /** \internal \returns a reference to the nested matrix */
+ const _MatrixTypeNested& matrix() const { return m_matrix; }
+ _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
+
+ /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
+ template<typename OtherDerived>
+ SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
+ operator*(const MatrixBase<OtherDerived>& rhs) const
+ {
+ return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
+ }
+
+ /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
+ template<typename OtherDerived> friend
+ DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
+ operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+ {
+ return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
+ }
+
+ /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
+ * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
+ *
+ * \returns a reference to \c *this
+ *
+ * Note that it is faster to set alpha=0 than initializing the matrix to zero
+ * and then keep the default value alpha=1.
+ *
+ * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
+ * call this function with u.adjoint().
+ */
+ template<typename DerivedU>
+ SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+
+ /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
+ template<typename DestScalar> void evalTo(SparseMatrix<DestScalar>& _dest) const
+ {
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
+ }
+
+ template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar>& _dest) const
+ {
+ // TODO directly evaluate into _dest;
+ SparseMatrix<DestScalar> tmp(_dest.rows(),_dest.cols());
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
+ _dest = tmp;
+ }
+
+ /** \returns an expression of P^-1 H P */
+ SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic>& perm) const
+ {
+ return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
+ }
+
+ template<typename SrcMatrixType,int SrcUpLo>
+ SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
+ {
+ permutedMatrix.evalTo(*this);
+ return *this;
+ }
+
+
+ // const SparseLLT<PlainObject, UpLo> llt() const;
+ // const SparseLDLT<PlainObject, UpLo> ldlt() const;
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ mutable VectorI m_countPerRow;
+ mutable VectorI m_countPerCol;
+};
+
+/***************************************************************************
+* Implementation of SparseMatrixBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<unsigned int UpLo>
+const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
+{
+ return derived();
+}
+
+template<typename Derived>
+template<unsigned int UpLo>
+SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
+{
+ return derived();
+}
+
+/***************************************************************************
+* Implementation of SparseSelfAdjointView methods
+***************************************************************************/
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU>
+SparseSelfAdjointView<MatrixType,UpLo>&
+SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha)
+{
+ SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
+ if(alpha==Scalar(0))
+ m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
+ else
+ m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
+
+ return *this;
+}
+
+/***************************************************************************
+* Implementation of sparse self-adjoint time dense matrix
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct
+ : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
+
+ SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ // TODO use alpha
+ eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
+ typedef typename internal::remove_all<Lhs>::type _Lhs;
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::InnerIterator LhsInnerIterator;
+ enum {
+ LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
+ ProcessFirstHalf =
+ ((UpLo&(Upper|Lower))==(Upper|Lower))
+ || ( (UpLo&Upper) && !LhsIsRowMajor)
+ || ( (UpLo&Lower) && LhsIsRowMajor),
+ ProcessSecondHalf = !ProcessFirstHalf
+ };
+ for (Index j=0; j<m_lhs.outerSize(); ++j)
+ {
+ LhsInnerIterator i(m_lhs,j);
+ if (ProcessSecondHalf && i && (i.index()==j))
+ {
+ dest.row(j) += i.value() * m_rhs.row(j);
+ ++i;
+ }
+ Block<Dest,1,Dest::ColsAtCompileTime> dest_j(dest.row(LhsIsRowMajor ? j : 0));
+ for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+ {
+ Index a = LhsIsRowMajor ? j : i.index();
+ Index b = LhsIsRowMajor ? i.index() : j;
+ typename Lhs::Scalar v = i.value();
+ dest.row(a) += (v) * m_rhs.row(b);
+ dest.row(b) += internal::conj(v) * m_rhs.row(a);
+ }
+ if (ProcessFirstHalf && i && (i.index()==j))
+ dest.row(j) += i.value() * m_rhs.row(j);
+ }
+ }
+
+ private:
+ SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&);
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct
+ : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
+
+ DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, Scalar /*alpha*/) const
+ {
+ // TODO
+ }
+
+ private:
+ DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
+};
+
+/***************************************************************************
+* Implementation of symmetric copies and permutations
+***************************************************************************/
+namespace internal {
+
+template<typename MatrixType, int UpLo>
+struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+
+ Dest& dest(_dest.derived());
+ enum {
+ StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
+ };
+ eigen_assert(perm==0);
+ Index size = mat.rows();
+ VectorI count;
+ count.resize(size);
+ count.setZero();
+ dest.resize(size,size);
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ Index ip = perm ? perm[i] : i;
+ if(i==j)
+ count[ip]++;
+ else if((UpLo==Lower && i>j) || (UpLo==Upper && i<j))
+ {
+ count[ip]++;
+ count[jp]++;
+ }
+ }
+ }
+ Index nnz = count.sum();
+
+ // reserve space
+ dest.reserve(nnz);
+ dest._outerIndexPtr()[0] = 0;
+ for(Index j=0; j<size; ++j)
+ dest._outerIndexPtr()[j+1] = dest._outerIndexPtr()[j] + count[j];
+ for(Index j=0; j<size; ++j)
+ count[j] = dest._outerIndexPtr()[j];
+
+ // copy data
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ Index ip = perm ? perm[i] : i;
+ if(i==j)
+ {
+ int k = count[ip]++;
+ dest._innerIndexPtr()[k] = ip;
+ dest._valuePtr()[k] = it.value();
+ }
+ else if((UpLo==Lower && i>j) || (UpLo==Upper && i<j))
+ {
+ int k = count[jp]++;
+ dest._innerIndexPtr()[k] = ip;
+ dest._valuePtr()[k] = it.value();
+ k = count[ip]++;
+ dest._innerIndexPtr()[k] = jp;
+ dest._valuePtr()[k] = internal::conj(it.value());
+ }
+ }
+ }
+}
+
+template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
+ Dest& dest(_dest.derived());
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ //internal::conj_if<SrcUpLo!=DstUpLo> cj;
+
+ Index size = mat.rows();
+ VectorI count(size);
+ count.setZero();
+ dest.resize(size,size);
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ if((SrcUpLo==Lower && i<j) || (SrcUpLo==Upper && i>j))
+ continue;
+
+ Index ip = perm ? perm[i] : i;
+ count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ }
+ }
+ dest._outerIndexPtr()[0] = 0;
+ for(Index j=0; j<size; ++j)
+ dest._outerIndexPtr()[j+1] = dest._outerIndexPtr()[j] + count[j];
+ dest.resizeNonZeros(dest._outerIndexPtr()[size]);
+ for(Index j=0; j<size; ++j)
+ count[j] = dest._outerIndexPtr()[j];
+
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ if((SrcUpLo==Lower && i<j) || (SrcUpLo==Upper && i>j))
+ continue;
+
+ Index ip = perm? perm[i] : i;
+ Index k = count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ dest._innerIndexPtr()[k] = DstUpLo==Lower ? (std::max)(ip,jp) : (std::min)(ip,jp);
+
+ if((DstUpLo==Lower && ip<jp) || (DstUpLo==Upper && ip>jp))
+ dest._valuePtr()[k] = conj(it.value());
+ else
+ dest._valuePtr()[k] = it.value();
+ }
+ }
+}
+
+}
+
+template<typename MatrixType,int UpLo>
+class SparseSymmetricPermutationProduct
+ : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
+{
+ typedef PermutationMatrix<Dynamic> Perm;
+ public:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+ SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
+ : m_matrix(mat), m_perm(perm)
+ {}
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ template<typename DestScalar> void evalTo(SparseMatrix<DestScalar>& _dest) const
+ {
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+ }
+
+ template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
+ {
+ internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
+ }
+
+ protected:
+ const MatrixTypeNested m_matrix;
+ const Perm& m_perm;
+
+};
+
+#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h
new file mode 100644
index 00000000000..19abcd1f8e4
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h
@@ -0,0 +1,401 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSESPARSEPRODUCT_H
+#define EIGEN_SPARSESPARSEPRODUCT_H
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Index Index;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ std::vector<bool> mask(rows,false);
+ Matrix<Scalar,Dynamic,1> values(rows);
+ Matrix<Index,Dynamic,1> indices(rows);
+
+ // estimate the number of non zero entries
+ float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
+ float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
+ float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
+
+// int t200 = rows/(log2(200)*1.39);
+// int t = (rows*100)/139;
+
+ res.resize(rows, cols);
+ res.reserve(Index(ratioRes*rows*cols));
+ // we compute each column of the result, one after the other
+ for (Index j=0; j<cols; ++j)
+ {
+
+ res.startVec(j);
+ Index nnz = 0;
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ Scalar y = rhsIt.value();
+ Index k = rhsIt.index();
+ for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt)
+ {
+ Index i = lhsIt.index();
+ Scalar x = lhsIt.value();
+ if(!mask[i])
+ {
+ mask[i] = true;
+// values[i] = x * y;
+// indices[nnz] = i;
+ ++nnz;
+ }
+ else
+ values[i] += x * y;
+ }
+ }
+ // FIXME reserve nnz non zeros
+ // FIXME implement fast sort algorithms for very small nnz
+ // if the result is sparse enough => use a quick sort
+ // otherwise => loop through the entire vector
+ // In order to avoid to perform an expensive log2 when the
+ // result is clearly very sparse we use a linear bound up to 200.
+// if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
+// {
+// if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
+// for(int k=0; k<nnz; ++k)
+// {
+// int i = indices[k];
+// res.insertBackNoCheck(j,i) = values[i];
+// mask[i] = false;
+// }
+// }
+// else
+// {
+// // dense path
+// for(int i=0; i<rows; ++i)
+// {
+// if(mask[i])
+// {
+// mask[i] = false;
+// res.insertBackNoCheck(j,i) = values[i];
+// }
+// }
+// }
+
+ }
+ res.finalize();
+}
+
+// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+// return sparse_product_impl2(lhs,rhs,res);
+
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Index Index;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
+ //int size = lhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ // allocate a temporary buffer
+ AmbiVector<Scalar,Index> tempVector(rows);
+
+ // estimate the number of non zero entries
+ float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
+ float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
+ float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
+
+ // mimics a resizeByInnerOuter:
+ if(ResultType::IsRowMajor)
+ res.resize(cols, rows);
+ else
+ res.resize(rows, cols);
+
+ res.reserve(Index(ratioRes*rows*cols));
+ for (Index j=0; j<cols; ++j)
+ {
+ // let's do a more accurate determination of the nnz ratio for the current column j of res
+ //float ratioColRes = (std::min)(ratioLhs * rhs.innerNonZeros(j), 1.f);
+ // FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
+ float ratioColRes = ratioRes;
+ tempVector.init(ratioColRes);
+ tempVector.setZero();
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
+ tempVector.restart();
+ Scalar x = rhsIt.value();
+ for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
+ {
+ tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
+ }
+ }
+ res.startVec(j);
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector); it; ++it)
+ res.insertBackByOuterInner(j,it.index()) = it.value();
+ }
+ res.finalize();
+}
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct sparse_product_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << __LINE__ << "\n";
+ typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+ sparse_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << __LINE__ << "\n";
+ // we need a col-major matrix to hold the result
+ typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+ SparseTemporaryType _res(res.rows(), res.cols());
+ sparse_product_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res);
+ res = _res;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << __LINE__ << "\n";
+ // let's transpose the product to get a column x column product
+ typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+ sparse_product_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << "here...\n";
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix colLhs(lhs);
+ ColMajorMatrix colRhs(rhs);
+// std::cerr << "more...\n";
+ sparse_product_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res);
+// std::cerr << "OK.\n";
+
+ // let's transpose the product to get a column x column product
+
+// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+// SparseTemporaryType _res(res.cols(), res.rows());
+// sparse_product_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
+// res = _res.transpose();
+ }
+};
+
+// NOTE the 2 others cases (col row *) must never occur since they are caught
+// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
+
+} // end namespace internal
+
+// sparse = sparse * sparse
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+{
+// std::cerr << "there..." << typeid(Lhs).name() << " " << typeid(Lhs).name() << " " << (Derived::Flags&&RowMajorBit) << "\n";
+ internal::sparse_product_selector<
+ typename internal::remove_all<Lhs>::type,
+ typename internal::remove_all<Rhs>::type,
+ Derived>::run(product.lhs(),product.rhs(),derived());
+ return derived();
+}
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct sparse_product_selector2;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ sparse_product_impl2<Lhs,Rhs,ResultType>(lhs, rhs, res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ // prevent warnings until the code is fixed
+ EIGEN_UNUSED_VARIABLE(lhs);
+ EIGEN_UNUSED_VARIABLE(rhs);
+ EIGEN_UNUSED_VARIABLE(res);
+
+// typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+// RowMajorMatrix rhsRow = rhs;
+// RowMajorMatrix resRow(res.rows(), res.cols());
+// sparse_product_impl2<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
+// res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix lhsRow = lhs;
+ RowMajorMatrix resRow(res.rows(), res.cols());
+ sparse_product_impl2<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
+ res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix resRow(res.rows(), res.cols());
+ sparse_product_impl2<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
+ res = resRow;
+ }
+};
+
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix lhsCol = lhs;
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix rhsCol = rhs;
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+// ColMajorMatrix lhsTr(lhs);
+// ColMajorMatrix rhsTr(rhs);
+// ColMajorMatrix aux(res.rows(), res.cols());
+// sparse_product_impl2<Rhs,Lhs,ColMajorMatrix>(rhs, lhs, aux);
+// // ColMajorMatrix aux2 = aux.transpose();
+// res = aux;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix lhsCol(lhs);
+ ColMajorMatrix rhsCol(rhs);
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<ColMajorMatrix,ColMajorMatrix,ColMajorMatrix>(lhsCol, rhsCol, resCol);
+ res = resCol;
+ }
+};
+
+} // end namespace internal
+
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline void SparseMatrixBase<Derived>::_experimentalNewProduct(const Lhs& lhs, const Rhs& rhs)
+{
+ //derived().resize(lhs.rows(), rhs.cols());
+ internal::sparse_product_selector2<
+ typename internal::remove_all<Lhs>::type,
+ typename internal::remove_all<Rhs>::type,
+ Derived>::run(lhs,rhs,derived());
+}
+
+// sparse * sparse
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return typename SparseSparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+#endif // EIGEN_SPARSESPARSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h b/extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h
new file mode 100644
index 00000000000..2aea2fa32c7
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h
@@ -0,0 +1,68 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSETRANSPOSE_H
+#define EIGEN_SPARSETRANSPOSE_H
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
+ : public SparseMatrixBase<Transpose<MatrixType> >
+{
+ typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
+};
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerIterator
+ : public _MatrixTypeNested::InnerIterator
+{
+ typedef typename _MatrixTypeNested::InnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, Index outer)
+ : Base(trans.derived().nestedExpression(), outer)
+ {}
+ inline Index row() const { return Base::col(); }
+ inline Index col() const { return Base::row(); }
+};
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
+ : public _MatrixTypeNested::ReverseInnerIterator
+{
+ typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, Index outer)
+ : Base(xpr.derived().nestedExpression(), outer)
+ {}
+ inline Index row() const { return Base::col(); }
+ inline Index col() const { return Base::row(); }
+};
+
+#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h b/extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h
new file mode 100644
index 00000000000..319eaf06638
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h
@@ -0,0 +1,100 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
+#define EIGEN_SPARSE_TRIANGULARVIEW_H
+
+namespace internal {
+
+template<typename MatrixType, int Mode>
+struct traits<SparseTriangularView<MatrixType,Mode> >
+: public traits<MatrixType>
+{};
+
+} // namespace internal
+
+template<typename MatrixType, int Mode> class SparseTriangularView
+ : public SparseMatrixBase<SparseTriangularView<MatrixType,Mode> >
+{
+ enum { SkipFirst = (Mode==Lower && !(MatrixType::Flags&RowMajorBit))
+ || (Mode==Upper && (MatrixType::Flags&RowMajorBit)) };
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView)
+
+ class InnerIterator;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ typedef typename internal::conditional<internal::must_nest_by_value<MatrixType>::ret,
+ MatrixType, const MatrixType&>::type MatrixTypeNested;
+
+ inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {}
+
+ /** \internal */
+ inline const MatrixType& nestedExpression() const { return m_matrix; }
+
+ template<typename OtherDerived>
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type
+ solve(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
+
+ protected:
+ MatrixTypeNested m_matrix;
+};
+
+template<typename MatrixType, int Mode>
+class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixType::InnerIterator
+{
+ typedef typename MatrixType::InnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
+ : Base(view.nestedExpression(), outer)
+ {
+ if(SkipFirst)
+ while((*this) && this->index()<outer)
+ ++(*this);
+ }
+ inline Index row() const { return Base::row(); }
+ inline Index col() const { return Base::col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const
+ {
+ return SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer());
+ }
+};
+
+template<typename Derived>
+template<int Mode>
+inline const SparseTriangularView<Derived, Mode>
+SparseMatrixBase<Derived>::triangularView() const
+{
+ return derived();
+}
+
+#endif // EIGEN_SPARSE_TRIANGULARVIEW_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseUtil.h b/extern/Eigen3/Eigen/src/Sparse/SparseUtil.h
new file mode 100644
index 00000000000..db9ae98e7a0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseUtil.h
@@ -0,0 +1,130 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEUTIL_H
+#define EIGEN_SPARSEUTIL_H
+
+#ifdef NDEBUG
+#define EIGEN_DBG_SPARSE(X)
+#else
+#define EIGEN_DBG_SPARSE(X) X
+#endif
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
+{ \
+ return Base::operator Op(other.derived()); \
+} \
+EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
+{ \
+ return Base::operator Op(other); \
+}
+
+#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename Other> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
+{ \
+ return Base::operator Op(scalar); \
+}
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
+
+#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
+ typedef BaseClass Base; \
+ typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+ typedef typename Eigen::internal::nested<Derived>::type Nested; \
+ typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::internal::traits<Derived>::Index Index; \
+ enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+ Flags = Eigen::internal::traits<Derived>::Flags, \
+ CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+ using Base::derived; \
+ using Base::const_cast_derived;
+
+#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived>)
+
+const int CoherentAccessPattern = 0x1;
+const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
+const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
+const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
+
+template<typename Derived> class SparseMatrixBase;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix;
+
+template<typename MatrixType, int Size> class SparseInnerVectorSet;
+template<typename MatrixType, int Mode> class SparseTriangularView;
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
+template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
+template<typename MatrixType> class SparseView;
+
+template<typename Lhs, typename Rhs> class SparseSparseProduct;
+template<typename Lhs, typename Rhs> class SparseTimeDenseProduct;
+template<typename Lhs, typename Rhs> class DenseTimeSparseProduct;
+template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
+
+template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
+template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct DenseSparseProductReturnType;
+template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct SparseDenseProductReturnType;
+
+namespace internal {
+
+template<typename T> struct eval<T,Sparse>
+{
+ typedef typename traits<T>::Scalar _Scalar;
+ enum {
+ _Flags = traits<T>::Flags
+ };
+
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+template<typename T> struct plain_matrix_type<T,Sparse>
+{
+ typedef typename traits<T>::Scalar _Scalar;
+ enum {
+ _Flags = traits<T>::Flags
+ };
+
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+} // end namespace internal
+
+#endif // EIGEN_SPARSEUTIL_H
diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseVector.h b/extern/Eigen3/Eigen/src/Sparse/SparseVector.h
index 5d47209f790..ce4bb51a27e 100644
--- a/extern/Eigen2/Eigen/src/Sparse/SparseVector.h
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseVector.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra. Eigen itself is part of the KDE project.
+// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@@ -29,34 +29,42 @@
*
* \brief a sparse vector class
*
- * \param _Scalar the scalar type, i.e. the type of the coefficients
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
*
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
*
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
*/
-template<typename _Scalar, int _Flags>
-struct ei_traits<SparseVector<_Scalar, _Flags> >
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseVector<_Scalar, _Options, _Index> >
{
typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
enum {
- IsColVector = _Flags & RowMajorBit ? 0 : 1,
+ IsColVector = _Options & RowMajorBit ? 0 : 1,
RowsAtCompileTime = IsColVector ? Dynamic : 1,
ColsAtCompileTime = IsColVector ? 1 : Dynamic,
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
- Flags = SparseBit | _Flags,
+ Flags = _Options | NestByRefBit | LvalueBit,
CoeffReadCost = NumTraits<Scalar>::ReadCost,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
+}
-template<typename _Scalar, int _Flags>
+template<typename _Scalar, int _Options, typename _Index>
class SparseVector
- : public SparseMatrixBase<SparseVector<_Scalar, _Flags> >
+ : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
{
public:
- EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseVector)
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, =)
@@ -65,48 +73,52 @@ class SparseVector
public:
typedef SparseMatrixBase<SparseVector> SparseBase;
- enum { IsColVector = ei_traits<SparseVector>::IsColVector };
-
- CompressedStorage<Scalar> m_data;
- int m_size;
+ enum { IsColVector = internal::traits<SparseVector>::IsColVector };
- CompressedStorage<Scalar>& _data() { return m_data; }
- CompressedStorage<Scalar>& _data() const { return m_data; }
+ enum {
+ Options = _Options
+ };
+
+ CompressedStorage<Scalar,Index> m_data;
+ Index m_size;
+
+ CompressedStorage<Scalar,Index>& _data() { return m_data; }
+ CompressedStorage<Scalar,Index>& _data() const { return m_data; }
public:
- EIGEN_STRONG_INLINE int rows() const { return IsColVector ? m_size : 1; }
- EIGEN_STRONG_INLINE int cols() const { return IsColVector ? 1 : m_size; }
- EIGEN_STRONG_INLINE int innerSize() const { return m_size; }
- EIGEN_STRONG_INLINE int outerSize() const { return 1; }
- EIGEN_STRONG_INLINE int innerNonZeros(int j) const { ei_assert(j==0); return m_size; }
+ EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
+ EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
+ EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
+ EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
+ EIGEN_STRONG_INLINE Index innerNonZeros(Index j) const { eigen_assert(j==0); return m_size; }
EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); }
EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); }
- EIGEN_STRONG_INLINE const int* _innerIndexPtr() const { return &m_data.index(0); }
- EIGEN_STRONG_INLINE int* _innerIndexPtr() { return &m_data.index(0); }
+ EIGEN_STRONG_INLINE const Index* _innerIndexPtr() const { return &m_data.index(0); }
+ EIGEN_STRONG_INLINE Index* _innerIndexPtr() { return &m_data.index(0); }
- inline Scalar coeff(int row, int col) const
+ inline Scalar coeff(Index row, Index col) const
{
- ei_assert((IsColVector ? col : row)==0);
+ eigen_assert((IsColVector ? col : row)==0);
return coeff(IsColVector ? row : col);
}
- inline Scalar coeff(int i) const { return m_data.at(i); }
+ inline Scalar coeff(Index i) const { return m_data.at(i); }
- inline Scalar& coeffRef(int row, int col)
+ inline Scalar& coeffRef(Index row, Index col)
{
- ei_assert((IsColVector ? col : row)==0);
+ eigen_assert((IsColVector ? col : row)==0);
return coeff(IsColVector ? row : col);
}
/** \returns a reference to the coefficient value at given index \a i
* This operation involes a log(rho*size) binary search. If the coefficient does not
* exist yet, then a sorted insertion into a sequential buffer is performed.
- *
+ *
* This insertion might be very costly if the number of nonzeros above \a i is large.
*/
- inline Scalar& coeffRef(int i)
+ inline Scalar& coeffRef(Index i)
{
return m_data.atWithInsertion(i);
}
@@ -118,83 +130,80 @@ class SparseVector
inline void setZero() { m_data.clear(); }
/** \returns the number of non zero coefficients */
- inline int nonZeros() const { return m_data.size(); }
+ inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
- /**
- */
- inline void reserve(int reserveSize) { m_data.reserve(reserveSize); }
-
- inline void startFill(int reserve)
+ inline void startVec(Index outer)
{
- setZero();
- m_data.reserve(reserve);
+ eigen_assert(outer==0);
}
- /**
- */
- inline Scalar& fill(int r, int c)
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
{
- ei_assert(r==0 || c==0);
- return fill(IsColVector ? r : c);
+ eigen_assert(outer==0);
+ return insertBack(inner);
}
-
- inline Scalar& fill(int i)
+ inline Scalar& insertBack(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
-
- inline Scalar& fillrand(int r, int c)
+
+ inline Scalar& insert(Index row, Index col)
{
- ei_assert(r==0 || c==0);
- return fillrand(IsColVector ? r : c);
+ Index inner = IsColVector ? row : col;
+ Index outer = IsColVector ? col : row;
+ eigen_assert(outer==0);
+ return insert(inner);
}
-
- /** Like fill() but with random coordinates.
- */
- inline Scalar& fillrand(int i)
+ Scalar& insert(Index i)
{
- int startId = 0;
- int id = m_data.size() - 1;
- m_data.resize(id+2,1);
+ Index startId = 0;
+ Index p = m_data.size() - 1;
+ // TODO smart realloc
+ m_data.resize(p+2,1);
- while ( (id >= startId) && (m_data.index(id) > i) )
+ while ( (p >= startId) && (m_data.index(p) > i) )
{
- m_data.index(id+1) = m_data.index(id);
- m_data.value(id+1) = m_data.value(id);
- --id;
+ m_data.index(p+1) = m_data.index(p);
+ m_data.value(p+1) = m_data.value(p);
+ --p;
}
- m_data.index(id+1) = i;
- m_data.value(id+1) = 0;
- return m_data.value(id+1);
+ m_data.index(p+1) = i;
+ m_data.value(p+1) = 0;
+ return m_data.value(p+1);
}
-
- inline void endFill() {}
-
- void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
+
+ /**
+ */
+ inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
+
+
+ inline void finalize() {}
+
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
{
m_data.prune(reference,epsilon);
}
-
- void resize(int rows, int cols)
+
+ void resize(Index rows, Index cols)
{
- ei_assert(rows==1 || cols==1);
+ eigen_assert(rows==1 || cols==1);
resize(IsColVector ? rows : cols);
}
- void resize(int newSize)
+ void resize(Index newSize)
{
m_size = newSize;
m_data.clear();
}
- void resizeNonZeros(int size) { m_data.resize(size); }
+ void resizeNonZeros(Index size) { m_data.resize(size); }
inline SparseVector() : m_size(0) { resize(0); }
- inline SparseVector(int size) : m_size(0) { resize(size); }
-
- inline SparseVector(int rows, int cols) : m_size(0) { resize(rows,cols); }
+ inline SparseVector(Index size) : m_size(0) { resize(size); }
+
+ inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
template<typename OtherDerived>
inline SparseVector(const MatrixBase<OtherDerived>& other)
@@ -202,7 +211,7 @@ class SparseVector
{
*this = other.derived();
}
-
+
template<typename OtherDerived>
inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
: m_size(0)
@@ -239,9 +248,20 @@ class SparseVector
template<typename OtherDerived>
inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
{
- return Base::operator=(other);
+ if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
+ return Base::operator=(other.transpose());
+ else
+ return Base::operator=(other);
}
-
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Lhs, typename Rhs>
+ inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+ {
+ return Base::operator=(product);
+ }
+ #endif
+
// const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
// if (needToTranspose)
// {
@@ -249,9 +269,9 @@ class SparseVector
// // 1 - compute the number of coeffs per dest inner vector
// // 2 - do the actual copy/eval
// // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed
-// typedef typename ei_nested<OtherDerived,2>::type OtherCopy;
+// typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
// OtherCopy otherCopy(other.derived());
-// typedef typename ei_cleantype<OtherCopy>::type _OtherCopy;
+// typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
//
// resize(other.rows(), other.cols());
// Eigen::Map<VectorXi>(m_outerIndex,outerSize()).setZero();
@@ -294,7 +314,7 @@ class SparseVector
friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
{
- for (unsigned int i=0; i<m.nonZeros(); ++i)
+ for (Index i=0; i<m.nonZeros(); ++i)
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
s << std::endl;
return s;
@@ -310,7 +330,7 @@ class SparseVector
// {
// if (m_data.index(i)==other.m_data.index(j))
// {
-// res += m_data.value(i) * ei_conj(other.m_data.value(j));
+// res += m_data.value(i) * internal::conj(other.m_data.value(j));
// ++i; ++j;
// }
// else if (m_data.index(i)<other.m_data.index(j))
@@ -324,24 +344,70 @@ class SparseVector
/** Destructor */
inline ~SparseVector() {}
+
+ /** Overloaded for performance */
+ Scalar sum() const;
+
+ public:
+
+ /** \deprecated use setZero() and reserve() */
+ EIGEN_DEPRECATED void startFill(Index reserve)
+ {
+ setZero();
+ m_data.reserve(reserve);
+ }
+
+ /** \deprecated use insertBack(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
+ {
+ eigen_assert(r==0 || c==0);
+ return fill(IsColVector ? r : c);
+ }
+
+ /** \deprecated use insertBack(Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
+
+ /** \deprecated use insert(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
+ {
+ eigen_assert(r==0 || c==0);
+ return fillrand(IsColVector ? r : c);
+ }
+
+ /** \deprecated use insert(Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index i)
+ {
+ return insert(i);
+ }
+
+ /** \deprecated use finalize() */
+ EIGEN_DEPRECATED void endFill() {}
+
+# ifdef EIGEN_SPARSEVECTOR_PLUGIN
+# include EIGEN_SPARSEVECTOR_PLUGIN
+# endif
};
-template<typename Scalar, int _Flags>
-class SparseVector<Scalar,_Flags>::InnerIterator
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::InnerIterator
{
public:
- InnerIterator(const SparseVector& vec, int outer=0)
- : m_data(vec.m_data), m_id(0), m_end(m_data.size())
+ InnerIterator(const SparseVector& vec, Index outer=0)
+ : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
{
- ei_assert(outer==0);
+ eigen_assert(outer==0);
}
-
- InnerIterator(const CompressedStorage<Scalar>& data)
- : m_data(data), m_id(0), m_end(m_data.size())
+
+ InnerIterator(const CompressedStorage<Scalar,Index>& data)
+ : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
{}
template<unsigned int Added, unsigned int Removed>
- InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, int outer)
+ InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, Index )
: m_data(vec._expression().m_data), m_id(0), m_end(m_data.size())
{}
@@ -350,19 +416,16 @@ class SparseVector<Scalar,_Flags>::InnerIterator
inline Scalar value() const { return m_data.value(m_id); }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
- inline int index() const { return m_data.index(m_id); }
- inline int row() const { return IsColVector ? index() : 0; }
- inline int col() const { return IsColVector ? 0 : index(); }
+ inline Index index() const { return m_data.index(m_id); }
+ inline Index row() const { return IsColVector ? index() : 0; }
+ inline Index col() const { return IsColVector ? 0 : index(); }
inline operator bool() const { return (m_id < m_end); }
protected:
- const CompressedStorage<Scalar>& m_data;
- int m_id;
- const int m_end;
-
- private:
- InnerIterator& operator=(const InnerIterator&);
+ const CompressedStorage<Scalar,Index>& m_data;
+ Index m_id;
+ const Index m_end;
};
#endif // EIGEN_SPARSEVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseView.h b/extern/Eigen3/Eigen/src/Sparse/SparseView.h
new file mode 100644
index 00000000000..24306561098
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseView.h
@@ -0,0 +1,109 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEVIEW_H
+#define EIGEN_SPARSEVIEW_H
+
+namespace internal {
+
+template<typename MatrixType>
+struct traits<SparseView<MatrixType> > : traits<MatrixType>
+{
+ typedef int Index;
+ typedef Sparse StorageKind;
+ enum {
+ Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
+ };
+};
+
+} // end namespace internal
+
+template<typename MatrixType>
+class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
+{
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
+
+ SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0),
+ typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) :
+ m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
+
+ class InnerIterator;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ inline Index innerSize() const { return m_matrix.innerSize(); }
+ inline Index outerSize() const { return m_matrix.outerSize(); }
+
+protected:
+ const MatrixTypeNested m_matrix;
+ Scalar m_reference;
+ typename NumTraits<Scalar>::Real m_epsilon;
+};
+
+template<typename MatrixType>
+class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
+{
+public:
+ typedef typename _MatrixTypeNested::InnerIterator IterBase;
+ InnerIterator(const SparseView& view, Index outer) :
+ IterBase(view.m_matrix, outer), m_view(view)
+ {
+ incrementToNonZero();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ IterBase::operator++();
+ incrementToNonZero();
+ return *this;
+ }
+
+ using IterBase::value;
+
+protected:
+ const SparseView& m_view;
+
+private:
+ void incrementToNonZero()
+ {
+ while(internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon) && (bool(*this)))
+ {
+ IterBase::operator++();
+ }
+ }
+};
+
+template<typename Derived>
+const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
+ typename NumTraits<Scalar>::Real m_epsilon) const
+{
+ return SparseView<Derived>(derived(), m_reference, m_epsilon);
+}
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h b/extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h
new file mode 100644
index 00000000000..73468e0446c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h
@@ -0,0 +1,339 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
+#define EIGEN_SPARSETRIANGULARSOLVER_H
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+ int UpLo = (Mode & Lower)
+ ? Lower
+ : (Mode & Upper)
+ ? Upper
+ : -1,
+ int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
+struct sparse_solve_triangular_selector;
+
+// forward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.rows(); ++i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ Scalar lastVal = 0;
+ int lastIndex = 0;
+ for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
+ {
+ lastVal = it.value();
+ lastIndex = it.index();
+ if(lastIndex==i)
+ break;
+ tmp -= lastVal * other.coeff(lastIndex,col);
+ }
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ eigen_assert(lastIndex==i);
+ other.coeffRef(i,col) = tmp/lastVal;
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.rows()-1 ; i>=0 ; --i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ typename Lhs::InnerIterator it(lhs, i);
+ if (it && it.index() == i)
+ ++it;
+ for(; it; ++it)
+ {
+ tmp -= it.value() * other.coeff(it.index(),col);
+ }
+
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ typename Lhs::InnerIterator it(lhs, i);
+ eigen_assert(it && it.index() == i);
+ other.coeffRef(i,col) = tmp/it.value();
+ }
+ }
+ }
+ }
+};
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.cols(); ++i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ typename Lhs::InnerIterator it(lhs, i);
+ if(!(Mode & UnitDiag))
+ {
+ eigen_assert(it.index()==i);
+ tmp /= it.value();
+ }
+ if (it && it.index()==i)
+ ++it;
+ for(; it; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.cols()-1; i>=0; --i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ if(!(Mode & UnitDiag))
+ {
+ // FIXME lhs.coeff(i,i) might not be always efficient while it must simply be the
+ // last element of the column !
+ other.coeffRef(i,col) /= lhs.innerVector(i).lastCoeff();
+ }
+ typename Lhs::InnerIterator it(lhs, i);
+ for(; it && it.index()<i; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDerived>& other) const
+{
+ eigen_assert(m_matrix.cols() == m_matrix.rows());
+ eigen_assert(m_matrix.cols() == other.rows());
+ eigen_assert(!(Mode & ZeroDiag));
+ eigen_assert(Mode & (Upper|Lower));
+
+ enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+ typedef typename internal::conditional<copy,
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(m_matrix, otherCopy);
+
+ if (copy)
+ other = otherCopy;
+}
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseTriangularView<ExpressionType,Mode>::solve(const MatrixBase<OtherDerived>& other) const
+{
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+ solveInPlace(res);
+ return res;
+}
+
+// pure sparse path
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+ int UpLo = (Mode & Lower)
+ ? Lower
+ : (Mode & Upper)
+ ? Upper
+ : -1,
+ int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>
+struct sparse_solve_triangular_sparse_selector;
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode, int UpLo>
+struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ const bool IsLower = (UpLo==Lower);
+ AmbiVector<Scalar,Index> tempVector(other.rows()*2);
+ tempVector.setBounds(0,other.rows());
+
+ Rhs res(other.rows(), other.cols());
+ res.reserve(other.nonZeros());
+
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ // FIXME estimate number of non zeros
+ tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
+ tempVector.setZero();
+ tempVector.restart();
+ for (typename Rhs::InnerIterator rhsIt(other, col); rhsIt; ++rhsIt)
+ {
+ tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
+ }
+
+ for(int i=IsLower?0:lhs.cols()-1;
+ IsLower?i<lhs.cols():i>=0;
+ i+=IsLower?1:-1)
+ {
+ tempVector.restart();
+ Scalar& ci = tempVector.coeffRef(i);
+ if (ci!=Scalar(0))
+ {
+ // find
+ typename Lhs::InnerIterator it(lhs, i);
+ if(!(Mode & UnitDiag))
+ {
+ if (IsLower)
+ {
+ eigen_assert(it.index()==i);
+ ci /= it.value();
+ }
+ else
+ ci /= lhs.coeff(i,i);
+ }
+ tempVector.restart();
+ if (IsLower)
+ {
+ if (it.index()==i)
+ ++it;
+ for(; it; ++it)
+ tempVector.coeffRef(it.index()) -= ci * it.value();
+ }
+ else
+ {
+ for(; it && it.index()<i; ++it)
+ tempVector.coeffRef(it.index()) -= ci * it.value();
+ }
+ }
+ }
+
+
+ int count = 0;
+ // FIXME compute a reference value to filter zeros
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector/*,1e-12*/); it; ++it)
+ {
+ ++ count;
+// std::cerr << "fill " << it.index() << ", " << col << "\n";
+// std::cout << it.value() << " ";
+ // FIXME use insertBack
+ res.insert(it.index(), col) = it.value();
+ }
+// std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n";
+ }
+ res.finalize();
+ other = res.markAsRValue();
+ }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
+{
+ eigen_assert(m_matrix.cols() == m_matrix.rows());
+ eigen_assert(m_matrix.cols() == other.rows());
+ eigen_assert(!(Mode & ZeroDiag));
+ eigen_assert(Mode & (Upper|Lower));
+
+// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+// typedef typename internal::conditional<copy,
+// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+// OtherCopy otherCopy(other.derived());
+
+ internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(m_matrix, other.derived());
+
+// if (copy)
+// other = otherCopy;
+}
+
+#ifdef EIGEN2_SUPPORT
+
+// deprecated stuff:
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+void SparseMatrixBase<Derived>::solveTriangularInPlace(MatrixBase<OtherDerived>& other) const
+{
+ this->template triangular<Flags&(Upper|Lower)>().solveInPlace(other);
+}
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseMatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
+{
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+ derived().solveTriangularInPlace(res);
+ return res;
+}
+#endif // EIGEN2_SUPPORT
+
+#endif // EIGEN_SPARSETRIANGULARSOLVER_H
diff --git a/extern/Eigen3/Eigen/src/StlSupport/StdDeque.h b/extern/Eigen3/Eigen/src/StlSupport/StdDeque.h
new file mode 100644
index 00000000000..6f12c106dbc
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/StlSupport/StdDeque.h
@@ -0,0 +1,149 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_STDDEQUE_H
+#define EIGEN_STDDEQUE_H
+
+#include "Eigen/src/StlSupport/details.h"
+
+// Define the explicit instantiation (e.g. necessary for the Intel compiler)
+#if defined(__INTEL_COMPILER) || defined(__GNUC__)
+ #define EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(...) template class std::deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >;
+#else
+ #define EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(...)
+#endif
+
+/**
+ * This section contains a convenience MACRO which allows an easy specialization of
+ * std::deque such that for data types with alignment issues the correct allocator
+ * is used automatically.
+ */
+#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) \
+EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(__VA_ARGS__) \
+namespace std \
+{ \
+ template<typename _Ay> \
+ class deque<__VA_ARGS__, _Ay> \
+ : public deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
+ { \
+ typedef deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > deque_base; \
+ public: \
+ typedef __VA_ARGS__ value_type; \
+ typedef typename deque_base::allocator_type allocator_type; \
+ typedef typename deque_base::size_type size_type; \
+ typedef typename deque_base::iterator iterator; \
+ explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \
+ template<typename InputIterator> \
+ deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \
+ deque(const deque& c) : deque_base(c) {} \
+ explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
+ deque(iterator start, iterator end) : deque_base(start, end) {} \
+ deque& operator=(const deque& x) { \
+ deque_base::operator=(x); \
+ return *this; \
+ } \
+ }; \
+}
+
+// check whether we really need the std::deque specialization
+#if !(defined(_GLIBCXX_DEQUE) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::deque::resize(size_type,const T&). */
+
+namespace std {
+
+#define EIGEN_STD_DEQUE_SPECIALIZATION_BODY \
+ public: \
+ typedef T value_type; \
+ typedef typename deque_base::allocator_type allocator_type; \
+ typedef typename deque_base::size_type size_type; \
+ typedef typename deque_base::iterator iterator; \
+ typedef typename deque_base::const_iterator const_iterator; \
+ explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \
+ template<typename InputIterator> \
+ deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \
+ : deque_base(first, last, a) {} \
+ deque(const deque& c) : deque_base(c) {} \
+ explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
+ deque(iterator start, iterator end) : deque_base(start, end) {} \
+ deque& operator=(const deque& x) { \
+ deque_base::operator=(x); \
+ return *this; \
+ }
+
+ template<typename T>
+ class deque<T,EIGEN_ALIGNED_ALLOCATOR<T> >
+ : public deque<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+ Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >
+{
+ typedef deque<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+ Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > deque_base;
+ EIGEN_STD_DEQUE_SPECIALIZATION_BODY
+
+ void resize(size_type new_size)
+ { resize(new_size, T()); }
+
+#if defined(_DEQUE_)
+ // workaround MSVC std::deque implementation
+ void resize(size_type new_size, const value_type& x)
+ {
+ if (deque_base::size() < new_size)
+ deque_base::_Insert_n(deque_base::end(), new_size - deque_base::size(), x);
+ else if (new_size < deque_base::size())
+ deque_base::erase(deque_base::begin() + new_size, deque_base::end());
+ }
+ void push_back(const value_type& x)
+ { deque_base::push_back(x); }
+ void push_front(const value_type& x)
+ { deque_base::push_front(x); }
+ using deque_base::insert;
+ iterator insert(const_iterator position, const value_type& x)
+ { return deque_base::insert(position,x); }
+ void insert(const_iterator position, size_type new_size, const value_type& x)
+ { deque_base::insert(position, new_size, x); }
+#elif defined(_GLIBCXX_DEQUE) && EIGEN_GNUC_AT_LEAST(4,2)
+ // workaround GCC std::deque implementation
+ void resize(size_type new_size, const value_type& x)
+ {
+ if (new_size < deque_base::size())
+ deque_base::_M_erase_at_end(this->_M_impl._M_start + new_size);
+ else
+ deque_base::insert(deque_base::end(), new_size - deque_base::size(), x);
+ }
+#else
+ // either GCC 4.1 or non-GCC
+ // default implementation which should always work.
+ void resize(size_type new_size, const value_type& x)
+ {
+ if (new_size < deque_base::size())
+ deque_base::erase(deque_base::begin() + new_size, deque_base::end());
+ else if (new_size > deque_base::size())
+ deque_base::insert(deque_base::end(), new_size - deque_base::size(), x);
+ }
+#endif
+ };
+}
+
+#endif // check whether specialization is actually required
+
+#endif // EIGEN_STDDEQUE_H
diff --git a/extern/Eigen3/Eigen/src/StlSupport/StdList.h b/extern/Eigen3/Eigen/src/StlSupport/StdList.h
new file mode 100644
index 00000000000..d329a0b2dc5
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/StlSupport/StdList.h
@@ -0,0 +1,129 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_STDLIST_H
+#define EIGEN_STDLIST_H
+
+#include "Eigen/src/StlSupport/details.h"
+
+// Define the explicit instantiation (e.g. necessary for the Intel compiler)
+#if defined(__INTEL_COMPILER) || defined(__GNUC__)
+ #define EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(...) template class std::list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >;
+#else
+ #define EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(...)
+#endif
+
+/**
+ * This section contains a convenience MACRO which allows an easy specialization of
+ * std::list such that for data types with alignment issues the correct allocator
+ * is used automatically.
+ */
+#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) \
+EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(__VA_ARGS__) \
+namespace std \
+{ \
+ template<typename _Ay> \
+ class list<__VA_ARGS__, _Ay> \
+ : public list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
+ { \
+ typedef list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > list_base; \
+ public: \
+ typedef __VA_ARGS__ value_type; \
+ typedef typename list_base::allocator_type allocator_type; \
+ typedef typename list_base::size_type size_type; \
+ typedef typename list_base::iterator iterator; \
+ explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \
+ template<typename InputIterator> \
+ list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \
+ list(const list& c) : list_base(c) {} \
+ explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
+ list(iterator start, iterator end) : list_base(start, end) {} \
+ list& operator=(const list& x) { \
+ list_base::operator=(x); \
+ return *this; \
+ } \
+ }; \
+}
+
+// check whether we really need the std::vector specialization
+#if !(defined(_GLIBCXX_VECTOR) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::list::resize(size_type,const T&). */
+
+namespace std
+{
+
+#define EIGEN_STD_LIST_SPECIALIZATION_BODY \
+ public: \
+ typedef T value_type; \
+ typedef typename list_base::allocator_type allocator_type; \
+ typedef typename list_base::size_type size_type; \
+ typedef typename list_base::iterator iterator; \
+ typedef typename list_base::const_iterator const_iterator; \
+ explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \
+ template<typename InputIterator> \
+ list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \
+ : list_base(first, last, a) {} \
+ list(const list& c) : list_base(c) {} \
+ explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
+ list(iterator start, iterator end) : list_base(start, end) {} \
+ list& operator=(const list& x) { \
+ list_base::operator=(x); \
+ return *this; \
+ }
+
+ template<typename T>
+ class list<T,EIGEN_ALIGNED_ALLOCATOR<T> >
+ : public list<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+ Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >
+ {
+ typedef list<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+ Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > list_base;
+ EIGEN_STD_LIST_SPECIALIZATION_BODY
+
+ void resize(size_type new_size)
+ { resize(new_size, T()); }
+
+ void resize(size_type new_size, const value_type& x)
+ {
+ if (list_base::size() < new_size)
+ list_base::insert(list_base::end(), new_size - list_base::size(), x);
+ else
+ while (new_size < list_base::size()) list_base::pop_back();
+ }
+
+#if defined(_LIST_)
+ // workaround MSVC std::list implementation
+ void push_back(const value_type& x)
+ { list_base::push_back(x); }
+ using list_base::insert;
+ iterator insert(const_iterator position, const value_type& x)
+ { return list_base::insert(position,x); }
+ void insert(const_iterator position, size_type new_size, const value_type& x)
+ { list_base::insert(position, new_size, x); }
+#endif
+ };
+}
+
+#endif // check whether specialization is actually required
+
+#endif // EIGEN_STDLIST_H
diff --git a/extern/Eigen2/Eigen/NewStdVector b/extern/Eigen3/Eigen/src/StlSupport/StdVector.h
index f37de5ff673..27d6ab539f9 100644
--- a/extern/Eigen2/Eigen/NewStdVector
+++ b/extern/Eigen3/Eigen/src/StlSupport/StdVector.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
//
// Eigen is free software; you can redistribute it and/or
@@ -23,68 +23,40 @@
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
-#ifndef EIGEN_STDVECTOR_MODULE_H
-#define EIGEN_STDVECTOR_MODULE_H
-
-#include "Core"
-#include <vector>
-
-namespace Eigen {
-
-// This one is needed to prevent reimplementing the whole std::vector.
-template <class T>
-class aligned_allocator_indirection : public aligned_allocator<T>
-{
-public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef T* pointer;
- typedef const T* const_pointer;
- typedef T& reference;
- typedef const T& const_reference;
- typedef T value_type;
-
- template<class U>
- struct rebind
- {
- typedef aligned_allocator_indirection<U> other;
- };
-
- aligned_allocator_indirection() throw() {}
- aligned_allocator_indirection(const aligned_allocator_indirection& ) throw() : aligned_allocator<T>() {}
- aligned_allocator_indirection(const aligned_allocator<T>& ) throw() {}
- template<class U>
- aligned_allocator_indirection(const aligned_allocator_indirection<U>& ) throw() {}
- template<class U>
- aligned_allocator_indirection(const aligned_allocator<U>& ) throw() {}
- ~aligned_allocator_indirection() throw() {}
-};
-
-#ifdef _MSC_VER
-
- // sometimes, MSVC detects, at compile time, that the argument x
- // in std::vector::resize(size_t s,T x) won't be aligned and generate an error
- // even if this function is never called. Whence this little wrapper.
- #define EIGEN_WORKAROUND_MSVC_STD_VECTOR(T) Eigen::ei_workaround_msvc_std_vector<T>
- template<typename T> struct ei_workaround_msvc_std_vector : public T
- {
- inline ei_workaround_msvc_std_vector() : T() {}
- inline ei_workaround_msvc_std_vector(const T& other) : T(other) {}
- inline operator T& () { return *static_cast<T*>(this); }
- inline operator const T& () const { return *static_cast<const T*>(this); }
- template<typename OtherT>
- inline T& operator=(const OtherT& other)
- { T::operator=(other); return *this; }
- inline ei_workaround_msvc_std_vector& operator=(const ei_workaround_msvc_std_vector& other)
- { T::operator=(other); return *this; }
- };
-
-#else
-
- #define EIGEN_WORKAROUND_MSVC_STD_VECTOR(T) T
-
-#endif
-
+#ifndef EIGEN_STDVECTOR_H
+#define EIGEN_STDVECTOR_H
+
+#include "Eigen/src/StlSupport/details.h"
+
+/**
+ * This section contains a convenience MACRO which allows an easy specialization of
+ * std::vector such that for data types with alignment issues the correct allocator
+ * is used automatically.
+ */
+#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \
+namespace std \
+{ \
+ template<> \
+ class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> > \
+ : public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
+ { \
+ typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \
+ public: \
+ typedef __VA_ARGS__ value_type; \
+ typedef vector_base::allocator_type allocator_type; \
+ typedef vector_base::size_type size_type; \
+ typedef vector_base::iterator iterator; \
+ explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \
+ template<typename InputIterator> \
+ vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \
+ vector(const vector& c) : vector_base(c) {} \
+ explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
+ vector(iterator start, iterator end) : vector_base(start, end) {} \
+ vector& operator=(const vector& x) { \
+ vector_base::operator=(x); \
+ return *this; \
+ } \
+ }; \
}
namespace std {
@@ -108,13 +80,13 @@ namespace std {
return *this; \
}
-template<typename T>
-class vector<T,Eigen::aligned_allocator<T> >
- : public vector<EIGEN_WORKAROUND_MSVC_STD_VECTOR(T),
- Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STD_VECTOR(T)> >
+ template<typename T>
+ class vector<T,EIGEN_ALIGNED_ALLOCATOR<T> >
+ : public vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+ Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >
{
- typedef vector<EIGEN_WORKAROUND_MSVC_STD_VECTOR(T),
- Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STD_VECTOR(T)> > vector_base;
+ typedef vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+ Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > vector_base;
EIGEN_STD_VECTOR_SPECIALIZATION_BODY
void resize(size_type new_size)
@@ -136,6 +108,13 @@ class vector<T,Eigen::aligned_allocator<T> >
{ return vector_base::insert(position,x); }
void insert(const_iterator position, size_type new_size, const value_type& x)
{ vector_base::insert(position, new_size, x); }
+#elif defined(_GLIBCXX_VECTOR) && (!(EIGEN_GNUC_AT_LEAST(4,1)))
+ /* Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&).
+ * However, this specialization is still needed to make the above EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION trick to work. */
+ void resize(size_type new_size, const value_type& x)
+ {
+ vector_base::resize(new_size,x);
+ }
#elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2)
// workaround GCC std::vector implementation
void resize(size_type new_size, const value_type& x)
@@ -145,10 +124,6 @@ class vector<T,Eigen::aligned_allocator<T> >
else
vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);
}
-#elif defined(_GLIBCXX_VECTOR) && (!EIGEN_GNUC_AT_LEAST(4,1))
- // Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&),
- // no no need to workaround !
- using vector_base::resize;
#else
// either GCC 4.1 or non-GCC
// default implementation which should always work.
@@ -160,9 +135,7 @@ class vector<T,Eigen::aligned_allocator<T> >
vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);
}
#endif
-
-};
-
+ };
}
-#endif // EIGEN_STDVECTOR_MODULE_H
+#endif // EIGEN_STDVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/StlSupport/details.h b/extern/Eigen3/Eigen/src/StlSupport/details.h
new file mode 100644
index 00000000000..397c8ef8581
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/StlSupport/details.h
@@ -0,0 +1,99 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_STL_DETAILS_H
+#define EIGEN_STL_DETAILS_H
+
+#ifndef EIGEN_ALIGNED_ALLOCATOR
+ #define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator
+#endif
+
+namespace Eigen {
+
+ // This one is needed to prevent reimplementing the whole std::vector.
+ template <class T>
+ class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR<T>
+ {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+
+ template<class U>
+ struct rebind
+ {
+ typedef aligned_allocator_indirection<U> other;
+ };
+
+ aligned_allocator_indirection() {}
+ aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR<T>() {}
+ aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<T>& ) {}
+ template<class U>
+ aligned_allocator_indirection(const aligned_allocator_indirection<U>& ) {}
+ template<class U>
+ aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<U>& ) {}
+ ~aligned_allocator_indirection() {}
+ };
+
+#ifdef _MSC_VER
+
+ // sometimes, MSVC detects, at compile time, that the argument x
+ // in std::vector::resize(size_t s,T x) won't be aligned and generate an error
+ // even if this function is never called. Whence this little wrapper.
+#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) \
+ typename Eigen::internal::conditional< \
+ Eigen::internal::is_arithmetic<T>::value, \
+ T, \
+ Eigen::internal::workaround_msvc_stl_support<T> \
+ >::type
+
+ namespace internal {
+ template<typename T> struct workaround_msvc_stl_support : public T
+ {
+ inline workaround_msvc_stl_support() : T() {}
+ inline workaround_msvc_stl_support(const T& other) : T(other) {}
+ inline operator T& () { return *static_cast<T*>(this); }
+ inline operator const T& () const { return *static_cast<const T*>(this); }
+ template<typename OtherT>
+ inline T& operator=(const OtherT& other)
+ { T::operator=(other); return *this; }
+ inline workaround_msvc_stl_support& operator=(const workaround_msvc_stl_support& other)
+ { T::operator=(other); return *this; }
+ };
+ }
+
+#else
+
+#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) T
+
+#endif
+
+}
+
+#endif // EIGEN_STL_DETAILS_H
diff --git a/extern/Eigen3/Eigen/src/misc/Image.h b/extern/Eigen3/Eigen/src/misc/Image.h
new file mode 100644
index 00000000000..19b3e08cbfd
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/misc/Image.h
@@ -0,0 +1,95 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MISC_IMAGE_H
+#define EIGEN_MISC_IMAGE_H
+
+namespace internal {
+
+/** \class image_retval_base
+ *
+ */
+template<typename DecompositionType>
+struct traits<image_retval_base<DecompositionType> >
+{
+ typedef typename DecompositionType::MatrixType MatrixType;
+ typedef Matrix<
+ typename MatrixType::Scalar,
+ MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose
+ // dimension is the number of rows of the original matrix
+ Dynamic, // we don't know at compile time the dimension of the image (the rank)
+ MatrixType::Options,
+ MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix,
+ MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns.
+ > ReturnType;
+};
+
+template<typename _DecompositionType> struct image_retval_base
+ : public ReturnByValue<image_retval_base<_DecompositionType> >
+{
+ typedef _DecompositionType DecompositionType;
+ typedef typename DecompositionType::MatrixType MatrixType;
+ typedef ReturnByValue<image_retval_base> Base;
+ typedef typename Base::Index Index;
+
+ image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix)
+ : m_dec(dec), m_rank(dec.rank()),
+ m_cols(m_rank == 0 ? 1 : m_rank),
+ m_originalMatrix(originalMatrix)
+ {}
+
+ inline Index rows() const { return m_dec.rows(); }
+ inline Index cols() const { return m_cols; }
+ inline Index rank() const { return m_rank; }
+ inline const DecompositionType& dec() const { return m_dec; }
+ inline const MatrixType& originalMatrix() const { return m_originalMatrix; }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ static_cast<const image_retval<DecompositionType>*>(this)->evalTo(dst);
+ }
+
+ protected:
+ const DecompositionType& m_dec;
+ Index m_rank, m_cols;
+ const MatrixType& m_originalMatrix;
+};
+
+} // end namespace internal
+
+#define EIGEN_MAKE_IMAGE_HELPERS(DecompositionType) \
+ typedef typename DecompositionType::MatrixType MatrixType; \
+ typedef typename MatrixType::Scalar Scalar; \
+ typedef typename MatrixType::RealScalar RealScalar; \
+ typedef typename MatrixType::Index Index; \
+ typedef Eigen::internal::image_retval_base<DecompositionType> Base; \
+ using Base::dec; \
+ using Base::originalMatrix; \
+ using Base::rank; \
+ using Base::rows; \
+ using Base::cols; \
+ image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \
+ : Base(dec, originalMatrix) {}
+
+#endif // EIGEN_MISC_IMAGE_H
diff --git a/extern/Eigen3/Eigen/src/misc/Kernel.h b/extern/Eigen3/Eigen/src/misc/Kernel.h
new file mode 100644
index 00000000000..0115970e8eb
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/misc/Kernel.h
@@ -0,0 +1,92 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MISC_KERNEL_H
+#define EIGEN_MISC_KERNEL_H
+
+namespace internal {
+
+/** \class kernel_retval_base
+ *
+ */
+template<typename DecompositionType>
+struct traits<kernel_retval_base<DecompositionType> >
+{
+ typedef typename DecompositionType::MatrixType MatrixType;
+ typedef Matrix<
+ typename MatrixType::Scalar,
+ MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix"
+ // is the number of cols of the original matrix
+ // so that the product "matrix * kernel = zero" makes sense
+ Dynamic, // we don't know at compile-time the dimension of the kernel
+ MatrixType::Options,
+ MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter
+ MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space,
+ // whose dimension is the number of columns of the original matrix
+ > ReturnType;
+};
+
+template<typename _DecompositionType> struct kernel_retval_base
+ : public ReturnByValue<kernel_retval_base<_DecompositionType> >
+{
+ typedef _DecompositionType DecompositionType;
+ typedef ReturnByValue<kernel_retval_base> Base;
+ typedef typename Base::Index Index;
+
+ kernel_retval_base(const DecompositionType& dec)
+ : m_dec(dec),
+ m_rank(dec.rank()),
+ m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank)
+ {}
+
+ inline Index rows() const { return m_dec.cols(); }
+ inline Index cols() const { return m_cols; }
+ inline Index rank() const { return m_rank; }
+ inline const DecompositionType& dec() const { return m_dec; }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ static_cast<const kernel_retval<DecompositionType>*>(this)->evalTo(dst);
+ }
+
+ protected:
+ const DecompositionType& m_dec;
+ Index m_rank, m_cols;
+};
+
+} // end namespace internal
+
+#define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \
+ typedef typename DecompositionType::MatrixType MatrixType; \
+ typedef typename MatrixType::Scalar Scalar; \
+ typedef typename MatrixType::RealScalar RealScalar; \
+ typedef typename MatrixType::Index Index; \
+ typedef Eigen::internal::kernel_retval_base<DecompositionType> Base; \
+ using Base::dec; \
+ using Base::rank; \
+ using Base::rows; \
+ using Base::cols; \
+ kernel_retval(const DecompositionType& dec) : Base(dec) {}
+
+#endif // EIGEN_MISC_KERNEL_H
diff --git a/extern/Eigen3/Eigen/src/misc/Solve.h b/extern/Eigen3/Eigen/src/misc/Solve.h
new file mode 100644
index 00000000000..b7cbcadb392
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/misc/Solve.h
@@ -0,0 +1,87 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MISC_SOLVE_H
+#define EIGEN_MISC_SOLVE_H
+
+namespace internal {
+
+/** \class solve_retval_base
+ *
+ */
+template<typename DecompositionType, typename Rhs>
+struct traits<solve_retval_base<DecompositionType, Rhs> >
+{
+ typedef typename DecompositionType::MatrixType MatrixType;
+ typedef Matrix<typename Rhs::Scalar,
+ MatrixType::ColsAtCompileTime,
+ Rhs::ColsAtCompileTime,
+ Rhs::PlainObject::Options,
+ MatrixType::MaxColsAtCompileTime,
+ Rhs::MaxColsAtCompileTime> ReturnType;
+};
+
+template<typename _DecompositionType, typename Rhs> struct solve_retval_base
+ : public ReturnByValue<solve_retval_base<_DecompositionType, Rhs> >
+{
+ typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned;
+ typedef _DecompositionType DecompositionType;
+ typedef ReturnByValue<solve_retval_base> Base;
+ typedef typename Base::Index Index;
+
+ solve_retval_base(const DecompositionType& dec, const Rhs& rhs)
+ : m_dec(dec), m_rhs(rhs)
+ {}
+
+ inline Index rows() const { return m_dec.cols(); }
+ inline Index cols() const { return m_rhs.cols(); }
+ inline const DecompositionType& dec() const { return m_dec; }
+ inline const RhsNestedCleaned& rhs() const { return m_rhs; }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ static_cast<const solve_retval<DecompositionType,Rhs>*>(this)->evalTo(dst);
+ }
+
+ protected:
+ const DecompositionType& m_dec;
+ const typename Rhs::Nested m_rhs;
+};
+
+} // end namespace internal
+
+#define EIGEN_MAKE_SOLVE_HELPERS(DecompositionType,Rhs) \
+ typedef typename DecompositionType::MatrixType MatrixType; \
+ typedef typename MatrixType::Scalar Scalar; \
+ typedef typename MatrixType::RealScalar RealScalar; \
+ typedef typename MatrixType::Index Index; \
+ typedef Eigen::internal::solve_retval_base<DecompositionType,Rhs> Base; \
+ using Base::dec; \
+ using Base::rhs; \
+ using Base::rows; \
+ using Base::cols; \
+ solve_retval(const DecompositionType& dec, const Rhs& rhs) \
+ : Base(dec, rhs) {}
+
+#endif // EIGEN_MISC_SOLVE_H
diff --git a/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h
new file mode 100644
index 00000000000..7d509e78f3a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h
@@ -0,0 +1,143 @@
+/** \returns an expression of the coefficient wise product of \c *this and \a other
+ *
+ * \sa MatrixBase::cwiseProduct
+ */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)
+operator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient wise quotient of \c *this and \a other
+ *
+ * \sa MatrixBase::cwiseQuotient
+ */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>
+operator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise min of \c *this and \a other
+ *
+ * Example: \include Cwise_min.cpp
+ * Output: \verbinclude Cwise_min.out
+ *
+ * \sa max()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(min,internal::scalar_min_op)
+
+/** \returns an expression of the coefficient-wise max of \c *this and \a other
+ *
+ * Example: \include Cwise_max.cpp
+ * Output: \verbinclude Cwise_max.out
+ *
+ * \sa min()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(max,internal::scalar_max_op)
+
+/** \returns an expression of the coefficient-wise \< operator of *this and \a other
+ *
+ * Example: \include Cwise_less.cpp
+ * Output: \verbinclude Cwise_less.out
+ *
+ * \sa all(), any(), operator>(), operator<=()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less)
+
+/** \returns an expression of the coefficient-wise \<= operator of *this and \a other
+ *
+ * Example: \include Cwise_less_equal.cpp
+ * Output: \verbinclude Cwise_less_equal.out
+ *
+ * \sa all(), any(), operator>=(), operator<()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal)
+
+/** \returns an expression of the coefficient-wise \> operator of *this and \a other
+ *
+ * Example: \include Cwise_greater.cpp
+ * Output: \verbinclude Cwise_greater.out
+ *
+ * \sa all(), any(), operator>=(), operator<()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater)
+
+/** \returns an expression of the coefficient-wise \>= operator of *this and \a other
+ *
+ * Example: \include Cwise_greater_equal.cpp
+ * Output: \verbinclude Cwise_greater_equal.out
+ *
+ * \sa all(), any(), operator>(), operator<=()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal)
+
+/** \returns an expression of the coefficient-wise == operator of *this and \a other
+ *
+ * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+ * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+ * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+ * isMuchSmallerThan().
+ *
+ * Example: \include Cwise_equal_equal.cpp
+ * Output: \verbinclude Cwise_equal_equal.out
+ *
+ * \sa all(), any(), isApprox(), isMuchSmallerThan()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to)
+
+/** \returns an expression of the coefficient-wise != operator of *this and \a other
+ *
+ * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+ * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+ * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+ * isMuchSmallerThan().
+ *
+ * Example: \include Cwise_not_equal.cpp
+ * Output: \verbinclude Cwise_not_equal.out
+ *
+ * \sa all(), any(), isApprox(), isMuchSmallerThan()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(operator!=,std::not_equal_to)
+
+// scalar addition
+
+/** \returns an expression of \c *this with each coeff incremented by the constant \a scalar
+ *
+ * Example: \include Cwise_plus.cpp
+ * Output: \verbinclude Cwise_plus.out
+ *
+ * \sa operator+=(), operator-()
+ */
+inline const CwiseUnaryOp<internal::scalar_add_op<Scalar>, const Derived>
+operator+(const Scalar& scalar) const
+{
+ return CwiseUnaryOp<internal::scalar_add_op<Scalar>, const Derived>(derived(), internal::scalar_add_op<Scalar>(scalar));
+}
+
+friend inline const CwiseUnaryOp<internal::scalar_add_op<Scalar>, const Derived>
+operator+(const Scalar& scalar,const EIGEN_CURRENT_STORAGE_BASE_CLASS<Derived>& other)
+{
+ return other + scalar;
+}
+
+/** \returns an expression of \c *this with each coeff decremented by the constant \a scalar
+ *
+ * Example: \include Cwise_minus.cpp
+ * Output: \verbinclude Cwise_minus.out
+ *
+ * \sa operator+(), operator-=()
+ */
+inline const CwiseUnaryOp<internal::scalar_add_op<Scalar>, const Derived>
+operator-(const Scalar& scalar) const
+{
+ return *this + (-scalar);
+}
+
+friend inline const CwiseUnaryOp<internal::scalar_add_op<Scalar>, const CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const Derived> >
+operator-(const Scalar& scalar,const EIGEN_CURRENT_STORAGE_BASE_CLASS<Derived>& other)
+{
+ return (-other) + scalar;
+}
diff --git a/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h
new file mode 100644
index 00000000000..0dffaf4135c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h
@@ -0,0 +1,202 @@
+
+
+/** \returns an expression of the coefficient-wise absolute value of \c *this
+ *
+ * Example: \include Cwise_abs.cpp
+ * Output: \verbinclude Cwise_abs.out
+ *
+ * \sa abs2()
+ */
+EIGEN_STRONG_INLINE const CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived>
+abs() const
+{
+ return derived();
+}
+
+/** \returns an expression of the coefficient-wise squared absolute value of \c *this
+ *
+ * Example: \include Cwise_abs2.cpp
+ * Output: \verbinclude Cwise_abs2.out
+ *
+ * \sa abs(), square()
+ */
+EIGEN_STRONG_INLINE const CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived>
+abs2() const
+{
+ return derived();
+}
+
+/** \returns an expression of the coefficient-wise exponential of *this.
+ *
+ * Example: \include Cwise_exp.cpp
+ * Output: \verbinclude Cwise_exp.out
+ *
+ * \sa pow(), log(), sin(), cos()
+ */
+inline const CwiseUnaryOp<internal::scalar_exp_op<Scalar>, const Derived>
+exp() const
+{
+ return derived();
+}
+
+/** \returns an expression of the coefficient-wise logarithm of *this.
+ *
+ * Example: \include Cwise_log.cpp
+ * Output: \verbinclude Cwise_log.out
+ *
+ * \sa exp()
+ */
+inline const CwiseUnaryOp<internal::scalar_log_op<Scalar>, const Derived>
+log() const
+{
+ return derived();
+}
+
+/** \returns an expression of the coefficient-wise square root of *this.
+ *
+ * Example: \include Cwise_sqrt.cpp
+ * Output: \verbinclude Cwise_sqrt.out
+ *
+ * \sa pow(), square()
+ */
+inline const CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived>
+sqrt() const
+{
+ return derived();
+}
+
+/** \returns an expression of the coefficient-wise cosine of *this.
+ *
+ * Example: \include Cwise_cos.cpp
+ * Output: \verbinclude Cwise_cos.out
+ *
+ * \sa sin(), acos()
+ */
+inline const CwiseUnaryOp<internal::scalar_cos_op<Scalar>, const Derived>
+cos() const
+{
+ return derived();
+}
+
+
+/** \returns an expression of the coefficient-wise sine of *this.
+ *
+ * Example: \include Cwise_sin.cpp
+ * Output: \verbinclude Cwise_sin.out
+ *
+ * \sa cos(), asin()
+ */
+inline const CwiseUnaryOp<internal::scalar_sin_op<Scalar>, const Derived>
+sin() const
+{
+ return derived();
+}
+
+/** \returns an expression of the coefficient-wise arc cosine of *this.
+ *
+ * Example: \include Cwise_acos.cpp
+ * Output: \verbinclude Cwise_acos.out
+ *
+ * \sa cos(), asin()
+ */
+inline const CwiseUnaryOp<internal::scalar_acos_op<Scalar>, const Derived>
+acos() const
+{
+ return derived();
+}
+
+/** \returns an expression of the coefficient-wise arc sine of *this.
+ *
+ * Example: \include Cwise_asin.cpp
+ * Output: \verbinclude Cwise_asin.out
+ *
+ * \sa sin(), acos()
+ */
+inline const CwiseUnaryOp<internal::scalar_asin_op<Scalar>, const Derived>
+asin() const
+{
+ return derived();
+}
+
+/** \returns an expression of the coefficient-wise tan of *this.
+ *
+ * Example: \include Cwise_tan.cpp
+ * Output: \verbinclude Cwise_tan.out
+ *
+ * \sa cos(), sin()
+ */
+inline const CwiseUnaryOp<internal::scalar_tan_op<Scalar>, Derived>
+tan() const
+{
+ return derived();
+}
+
+
+/** \returns an expression of the coefficient-wise power of *this to the given exponent.
+ *
+ * Example: \include Cwise_pow.cpp
+ * Output: \verbinclude Cwise_pow.out
+ *
+ * \sa exp(), log()
+ */
+inline const CwiseUnaryOp<internal::scalar_pow_op<Scalar>, const Derived>
+pow(const Scalar& exponent) const
+{
+ return CwiseUnaryOp<internal::scalar_pow_op<Scalar>, const Derived>
+ (derived(), internal::scalar_pow_op<Scalar>(exponent));
+}
+
+
+/** \returns an expression of the coefficient-wise inverse of *this.
+ *
+ * Example: \include Cwise_inverse.cpp
+ * Output: \verbinclude Cwise_inverse.out
+ *
+ * \sa operator/(), operator*()
+ */
+inline const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
+inverse() const
+{
+ return derived();
+}
+
+/** \returns an expression of the coefficient-wise square of *this.
+ *
+ * Example: \include Cwise_square.cpp
+ * Output: \verbinclude Cwise_square.out
+ *
+ * \sa operator/(), operator*(), abs2()
+ */
+inline const CwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived>
+square() const
+{
+ return derived();
+}
+
+/** \returns an expression of the coefficient-wise cube of *this.
+ *
+ * Example: \include Cwise_cube.cpp
+ * Output: \verbinclude Cwise_cube.out
+ *
+ * \sa square(), pow()
+ */
+inline const CwiseUnaryOp<internal::scalar_cube_op<Scalar>, const Derived>
+cube() const
+{
+ return derived();
+}
+
+#define EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(METHOD_NAME,FUNCTOR) \
+ inline const CwiseUnaryOp<std::binder2nd<FUNCTOR<Scalar> >, const Derived> \
+ METHOD_NAME(const Scalar& s) const { \
+ return CwiseUnaryOp<std::binder2nd<FUNCTOR<Scalar> >, const Derived> \
+ (derived(), std::bind2nd(FUNCTOR<Scalar>(), s)); \
+ }
+
+EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator==, std::equal_to)
+EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator!=, std::not_equal_to)
+EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<, std::less)
+EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<=, std::less_equal)
+EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>, std::greater)
+EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>=, std::greater_equal)
+
diff --git a/extern/Eigen3/Eigen/src/plugins/BlockMethods.h b/extern/Eigen3/Eigen/src/plugins/BlockMethods.h
new file mode 100644
index 00000000000..4eba933388a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/plugins/BlockMethods.h
@@ -0,0 +1,595 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_BLOCKMETHODS_H
+#define EIGEN_BLOCKMETHODS_H
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+/** \internal expression type of a column */
+typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, 1, !IsRowMajor> ColXpr;
+typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, 1, !IsRowMajor> ConstColXpr;
+/** \internal expression type of a row */
+typedef Block<Derived, 1, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> RowXpr;
+typedef const Block<const Derived, 1, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> ConstRowXpr;
+/** \internal expression type of a block of whole columns */
+typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, Dynamic, !IsRowMajor> ColsBlockXpr;
+typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, Dynamic, !IsRowMajor> ConstColsBlockXpr;
+/** \internal expression type of a block of whole rows */
+typedef Block<Derived, Dynamic, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> RowsBlockXpr;
+typedef const Block<const Derived, Dynamic, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> ConstRowsBlockXpr;
+/** \internal expression type of a block of whole columns */
+template<int N> struct NColsBlockXpr { typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, N, !IsRowMajor> Type; };
+template<int N> struct ConstNColsBlockXpr { typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, N, !IsRowMajor> Type; };
+/** \internal expression type of a block of whole rows */
+template<int N> struct NRowsBlockXpr { typedef Block<Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; };
+template<int N> struct ConstNRowsBlockXpr { typedef const Block<const Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; };
+
+
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+/** \returns a dynamic-size expression of a block in *this.
+ *
+ * \param startRow the first row in the block
+ * \param startCol the first column in the block
+ * \param blockRows the number of rows in the block
+ * \param blockCols the number of columns in the block
+ *
+ * Example: \include MatrixBase_block_int_int_int_int.cpp
+ * Output: \verbinclude MatrixBase_block_int_int_int_int.out
+ *
+ * \note Even though the returned expression has dynamic size, in the case
+ * when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
+ * which means that evaluating it does not cause a dynamic memory allocation.
+ *
+ * \sa class Block, block(Index,Index)
+ */
+inline Block<Derived> block(Index startRow, Index startCol, Index blockRows, Index blockCols)
+{
+ return Block<Derived>(derived(), startRow, startCol, blockRows, blockCols);
+}
+
+/** This is the const version of block(Index,Index,Index,Index). */
+inline const Block<const Derived> block(Index startRow, Index startCol, Index blockRows, Index blockCols) const
+{
+ return Block<const Derived>(derived(), startRow, startCol, blockRows, blockCols);
+}
+
+
+
+
+/** \returns a dynamic-size expression of a top-right corner of *this.
+ *
+ * \param cRows the number of rows in the corner
+ * \param cCols the number of columns in the corner
+ *
+ * Example: \include MatrixBase_topRightCorner_int_int.cpp
+ * Output: \verbinclude MatrixBase_topRightCorner_int_int.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+inline Block<Derived> topRightCorner(Index cRows, Index cCols)
+{
+ return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols);
+}
+
+/** This is the const version of topRightCorner(Index, Index).*/
+inline const Block<const Derived> topRightCorner(Index cRows, Index cCols) const
+{
+ return Block<const Derived>(derived(), 0, cols() - cCols, cRows, cCols);
+}
+
+/** \returns an expression of a fixed-size top-right corner of *this.
+ *
+ * The template parameters CRows and CCols are the number of rows and columns in the corner.
+ *
+ * Example: \include MatrixBase_template_int_int_topRightCorner.cpp
+ * Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> topRightCorner()
+{
+ return Block<Derived, CRows, CCols>(derived(), 0, cols() - CCols);
+}
+
+/** This is the const version of topRightCorner<int, int>().*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> topRightCorner() const
+{
+ return Block<const Derived, CRows, CCols>(derived(), 0, cols() - CCols);
+}
+
+
+
+
+/** \returns a dynamic-size expression of a top-left corner of *this.
+ *
+ * \param cRows the number of rows in the corner
+ * \param cCols the number of columns in the corner
+ *
+ * Example: \include MatrixBase_topLeftCorner_int_int.cpp
+ * Output: \verbinclude MatrixBase_topLeftCorner_int_int.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+inline Block<Derived> topLeftCorner(Index cRows, Index cCols)
+{
+ return Block<Derived>(derived(), 0, 0, cRows, cCols);
+}
+
+/** This is the const version of topLeftCorner(Index, Index).*/
+inline const Block<const Derived> topLeftCorner(Index cRows, Index cCols) const
+{
+ return Block<const Derived>(derived(), 0, 0, cRows, cCols);
+}
+
+/** \returns an expression of a fixed-size top-left corner of *this.
+ *
+ * The template parameters CRows and CCols are the number of rows and columns in the corner.
+ *
+ * Example: \include MatrixBase_template_int_int_topLeftCorner.cpp
+ * Output: \verbinclude MatrixBase_template_int_int_topLeftCorner.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> topLeftCorner()
+{
+ return Block<Derived, CRows, CCols>(derived(), 0, 0);
+}
+
+/** This is the const version of topLeftCorner<int, int>().*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> topLeftCorner() const
+{
+ return Block<const Derived, CRows, CCols>(derived(), 0, 0);
+}
+
+
+
+/** \returns a dynamic-size expression of a bottom-right corner of *this.
+ *
+ * \param cRows the number of rows in the corner
+ * \param cCols the number of columns in the corner
+ *
+ * Example: \include MatrixBase_bottomRightCorner_int_int.cpp
+ * Output: \verbinclude MatrixBase_bottomRightCorner_int_int.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+inline Block<Derived> bottomRightCorner(Index cRows, Index cCols)
+{
+ return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+}
+
+/** This is the const version of bottomRightCorner(Index, Index).*/
+inline const Block<const Derived> bottomRightCorner(Index cRows, Index cCols) const
+{
+ return Block<const Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+}
+
+/** \returns an expression of a fixed-size bottom-right corner of *this.
+ *
+ * The template parameters CRows and CCols are the number of rows and columns in the corner.
+ *
+ * Example: \include MatrixBase_template_int_int_bottomRightCorner.cpp
+ * Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> bottomRightCorner()
+{
+ return Block<Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
+}
+
+/** This is the const version of bottomRightCorner<int, int>().*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> bottomRightCorner() const
+{
+ return Block<const Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
+}
+
+
+
+/** \returns a dynamic-size expression of a bottom-left corner of *this.
+ *
+ * \param cRows the number of rows in the corner
+ * \param cCols the number of columns in the corner
+ *
+ * Example: \include MatrixBase_bottomLeftCorner_int_int.cpp
+ * Output: \verbinclude MatrixBase_bottomLeftCorner_int_int.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+inline Block<Derived> bottomLeftCorner(Index cRows, Index cCols)
+{
+ return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols);
+}
+
+/** This is the const version of bottomLeftCorner(Index, Index).*/
+inline const Block<const Derived> bottomLeftCorner(Index cRows, Index cCols) const
+{
+ return Block<const Derived>(derived(), rows() - cRows, 0, cRows, cCols);
+}
+
+/** \returns an expression of a fixed-size bottom-left corner of *this.
+ *
+ * The template parameters CRows and CCols are the number of rows and columns in the corner.
+ *
+ * Example: \include MatrixBase_template_int_int_bottomLeftCorner.cpp
+ * Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> bottomLeftCorner()
+{
+ return Block<Derived, CRows, CCols>(derived(), rows() - CRows, 0);
+}
+
+/** This is the const version of bottomLeftCorner<int, int>().*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> bottomLeftCorner() const
+{
+ return Block<const Derived, CRows, CCols>(derived(), rows() - CRows, 0);
+}
+
+
+
+/** \returns a block consisting of the top rows of *this.
+ *
+ * \param n the number of rows in the block
+ *
+ * Example: \include MatrixBase_topRows_int.cpp
+ * Output: \verbinclude MatrixBase_topRows_int.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+inline RowsBlockXpr topRows(Index n)
+{
+ return RowsBlockXpr(derived(), 0, 0, n, cols());
+}
+
+/** This is the const version of topRows(Index).*/
+inline ConstRowsBlockXpr topRows(Index n) const
+{
+ return ConstRowsBlockXpr(derived(), 0, 0, n, cols());
+}
+
+/** \returns a block consisting of the top rows of *this.
+ *
+ * \tparam N the number of rows in the block
+ *
+ * Example: \include MatrixBase_template_int_topRows.cpp
+ * Output: \verbinclude MatrixBase_template_int_topRows.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int N>
+inline typename NRowsBlockXpr<N>::Type topRows()
+{
+ return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, N, cols());
+}
+
+/** This is the const version of topRows<int>().*/
+template<int N>
+inline typename ConstNRowsBlockXpr<N>::Type topRows() const
+{
+ return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, N, cols());
+}
+
+
+
+/** \returns a block consisting of the bottom rows of *this.
+ *
+ * \param n the number of rows in the block
+ *
+ * Example: \include MatrixBase_bottomRows_int.cpp
+ * Output: \verbinclude MatrixBase_bottomRows_int.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+inline RowsBlockXpr bottomRows(Index n)
+{
+ return RowsBlockXpr(derived(), rows() - n, 0, n, cols());
+}
+
+/** This is the const version of bottomRows(Index).*/
+inline ConstRowsBlockXpr bottomRows(Index n) const
+{
+ return ConstRowsBlockXpr(derived(), rows() - n, 0, n, cols());
+}
+
+/** \returns a block consisting of the bottom rows of *this.
+ *
+ * \tparam N the number of rows in the block
+ *
+ * Example: \include MatrixBase_template_int_bottomRows.cpp
+ * Output: \verbinclude MatrixBase_template_int_bottomRows.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int N>
+inline typename NRowsBlockXpr<N>::Type bottomRows()
+{
+ return typename NRowsBlockXpr<N>::Type(derived(), rows() - N, 0, N, cols());
+}
+
+/** This is the const version of bottomRows<int>().*/
+template<int N>
+inline typename ConstNRowsBlockXpr<N>::Type bottomRows() const
+{
+ return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - N, 0, N, cols());
+}
+
+
+
+/** \returns a block consisting of a range of rows of *this.
+ *
+ * \param startRow the index of the first row in the block
+ * \param numRows the number of rows in the block
+ *
+ * Example: \include DenseBase_middleRows_int.cpp
+ * Output: \verbinclude DenseBase_middleRows_int.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+inline RowsBlockXpr middleRows(Index startRow, Index numRows)
+{
+ return RowsBlockXpr(derived(), startRow, 0, numRows, cols());
+}
+
+/** This is the const version of middleRows(Index,Index).*/
+inline ConstRowsBlockXpr middleRows(Index startRow, Index numRows) const
+{
+ return ConstRowsBlockXpr(derived(), startRow, 0, numRows, cols());
+}
+
+/** \returns a block consisting of a range of rows of *this.
+ *
+ * \tparam N the number of rows in the block
+ * \param startRow the index of the first row in the block
+ *
+ * Example: \include DenseBase_template_int_middleRows.cpp
+ * Output: \verbinclude DenseBase_template_int_middleRows.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int N>
+inline typename NRowsBlockXpr<N>::Type middleRows(Index startRow)
+{
+ return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, N, cols());
+}
+
+/** This is the const version of middleRows<int>().*/
+template<int N>
+inline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow) const
+{
+ return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, N, cols());
+}
+
+
+
+/** \returns a block consisting of the left columns of *this.
+ *
+ * \param n the number of columns in the block
+ *
+ * Example: \include MatrixBase_leftCols_int.cpp
+ * Output: \verbinclude MatrixBase_leftCols_int.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+inline ColsBlockXpr leftCols(Index n)
+{
+ return ColsBlockXpr(derived(), 0, 0, rows(), n);
+}
+
+/** This is the const version of leftCols(Index).*/
+inline ConstColsBlockXpr leftCols(Index n) const
+{
+ return ConstColsBlockXpr(derived(), 0, 0, rows(), n);
+}
+
+/** \returns a block consisting of the left columns of *this.
+ *
+ * \tparam N the number of columns in the block
+ *
+ * Example: \include MatrixBase_template_int_leftCols.cpp
+ * Output: \verbinclude MatrixBase_template_int_leftCols.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int N>
+inline typename NColsBlockXpr<N>::Type leftCols()
+{
+ return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), N);
+}
+
+/** This is the const version of leftCols<int>().*/
+template<int N>
+inline typename ConstNColsBlockXpr<N>::Type leftCols() const
+{
+ return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), N);
+}
+
+
+
+/** \returns a block consisting of the right columns of *this.
+ *
+ * \param n the number of columns in the block
+ *
+ * Example: \include MatrixBase_rightCols_int.cpp
+ * Output: \verbinclude MatrixBase_rightCols_int.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+inline ColsBlockXpr rightCols(Index n)
+{
+ return ColsBlockXpr(derived(), 0, cols() - n, rows(), n);
+}
+
+/** This is the const version of rightCols(Index).*/
+inline ConstColsBlockXpr rightCols(Index n) const
+{
+ return ConstColsBlockXpr(derived(), 0, cols() - n, rows(), n);
+}
+
+/** \returns a block consisting of the right columns of *this.
+ *
+ * \tparam N the number of columns in the block
+ *
+ * Example: \include MatrixBase_template_int_rightCols.cpp
+ * Output: \verbinclude MatrixBase_template_int_rightCols.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int N>
+inline typename NColsBlockXpr<N>::Type rightCols()
+{
+ return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - N, rows(), N);
+}
+
+/** This is the const version of rightCols<int>().*/
+template<int N>
+inline typename ConstNColsBlockXpr<N>::Type rightCols() const
+{
+ return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - N, rows(), N);
+}
+
+
+
+/** \returns a block consisting of a range of columns of *this.
+ *
+ * \param startCol the index of the first column in the block
+ * \param numCols the number of columns in the block
+ *
+ * Example: \include DenseBase_middleCols_int.cpp
+ * Output: \verbinclude DenseBase_middleCols_int.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+inline ColsBlockXpr middleCols(Index startCol, Index numCols)
+{
+ return ColsBlockXpr(derived(), 0, startCol, rows(), numCols);
+}
+
+/** This is the const version of middleCols(Index,Index).*/
+inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const
+{
+ return ConstColsBlockXpr(derived(), 0, startCol, rows(), numCols);
+}
+
+/** \returns a block consisting of a range of columns of *this.
+ *
+ * \tparam N the number of columns in the block
+ * \param startCol the index of the first column in the block
+ *
+ * Example: \include DenseBase_template_int_middleCols.cpp
+ * Output: \verbinclude DenseBase_template_int_middleCols.out
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int N>
+inline typename NColsBlockXpr<N>::Type middleCols(Index startCol)
+{
+ return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), N);
+}
+
+/** This is the const version of middleCols<int>().*/
+template<int N>
+inline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol) const
+{
+ return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), N);
+}
+
+
+
+/** \returns a fixed-size expression of a block in *this.
+ *
+ * The template parameters \a BlockRows and \a BlockCols are the number of
+ * rows and columns in the block.
+ *
+ * \param startRow the first row in the block
+ * \param startCol the first column in the block
+ *
+ * Example: \include MatrixBase_block_int_int.cpp
+ * Output: \verbinclude MatrixBase_block_int_int.out
+ *
+ * \note since block is a templated member, the keyword template has to be used
+ * if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode
+ *
+ * \sa class Block, block(Index,Index,Index,Index)
+ */
+template<int BlockRows, int BlockCols>
+inline Block<Derived, BlockRows, BlockCols> block(Index startRow, Index startCol)
+{
+ return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
+}
+
+/** This is the const version of block<>(Index, Index). */
+template<int BlockRows, int BlockCols>
+inline const Block<const Derived, BlockRows, BlockCols> block(Index startRow, Index startCol) const
+{
+ return Block<const Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
+}
+
+/** \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0.
+ *
+ * Example: \include MatrixBase_col.cpp
+ * Output: \verbinclude MatrixBase_col.out
+ *
+ * \sa row(), class Block */
+inline ColXpr col(Index i)
+{
+ return ColXpr(derived(), i);
+}
+
+/** This is the const version of col(). */
+inline ConstColXpr col(Index i) const
+{
+ return ConstColXpr(derived(), i);
+}
+
+/** \returns an expression of the \a i-th row of *this. Note that the numbering starts at 0.
+ *
+ * Example: \include MatrixBase_row.cpp
+ * Output: \verbinclude MatrixBase_row.out
+ *
+ * \sa col(), class Block */
+inline RowXpr row(Index i)
+{
+ return RowXpr(derived(), i);
+}
+
+/** This is the const version of row(). */
+inline ConstRowXpr row(Index i) const
+{
+ return ConstRowXpr(derived(), i);
+}
+
+#endif // EIGEN_BLOCKMETHODS_H
diff --git a/extern/Eigen3/Eigen/src/plugins/CommonCwiseBinaryOps.h b/extern/Eigen3/Eigen/src/plugins/CommonCwiseBinaryOps.h
new file mode 100644
index 00000000000..8f7765e72bd
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/plugins/CommonCwiseBinaryOps.h
@@ -0,0 +1,61 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+// This file is a base class plugin containing common coefficient wise functions.
+
+/** \returns an expression of the difference of \c *this and \a other
+ *
+ * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-().
+ *
+ * \sa class CwiseBinaryOp, operator-=()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(operator-,internal::scalar_difference_op)
+
+/** \returns an expression of the sum of \c *this and \a other
+ *
+ * \note If you want to add a given scalar to all coefficients, see Cwise::operator+().
+ *
+ * \sa class CwiseBinaryOp, operator+=()
+ */
+EIGEN_MAKE_CWISE_BINARY_OP(operator+,internal::scalar_sum_op)
+
+/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other
+ *
+ * The template parameter \a CustomBinaryOp is the type of the functor
+ * of the custom operator (see class CwiseBinaryOp for an example)
+ *
+ * Here is an example illustrating the use of custom functors:
+ * \include class_CwiseBinaryOp.cpp
+ * Output: \verbinclude class_CwiseBinaryOp.out
+ *
+ * \sa class CwiseBinaryOp, operator+(), operator-(), cwiseProduct()
+ */
+template<typename CustomBinaryOp, typename OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>
+binaryExpr(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const
+{
+ return CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>(derived(), other.derived(), func);
+}
+
diff --git a/extern/Eigen3/Eigen/src/plugins/CommonCwiseUnaryOps.h b/extern/Eigen3/Eigen/src/plugins/CommonCwiseUnaryOps.h
new file mode 100644
index 00000000000..941d5153c59
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/plugins/CommonCwiseUnaryOps.h
@@ -0,0 +1,187 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+// This file is a base class plugin containing common coefficient wise functions.
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+/** \internal Represents a scalar multiple of an expression */
+typedef CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const Derived> ScalarMultipleReturnType;
+/** \internal Represents a quotient of an expression by a scalar*/
+typedef CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>, const Derived> ScalarQuotient1ReturnType;
+/** \internal the return type of conjugate() */
+typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ const CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>,
+ const Derived&
+ >::type ConjugateReturnType;
+/** \internal the return type of real() const */
+typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ const CwiseUnaryOp<internal::scalar_real_op<Scalar>, const Derived>,
+ const Derived&
+ >::type RealReturnType;
+/** \internal the return type of real() */
+typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ CwiseUnaryView<internal::scalar_real_ref_op<Scalar>, Derived>,
+ Derived&
+ >::type NonConstRealReturnType;
+/** \internal the return type of imag() const */
+typedef CwiseUnaryOp<internal::scalar_imag_op<Scalar>, const Derived> ImagReturnType;
+/** \internal the return type of imag() */
+typedef CwiseUnaryView<internal::scalar_imag_ref_op<Scalar>, Derived> NonConstImagReturnType;
+
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+/** \returns an expression of the opposite of \c *this
+ */
+inline const CwiseUnaryOp<internal::scalar_opposite_op<typename internal::traits<Derived>::Scalar>, const Derived>
+operator-() const { return derived(); }
+
+
+/** \returns an expression of \c *this scaled by the scalar factor \a scalar */
+inline const ScalarMultipleReturnType
+operator*(const Scalar& scalar) const
+{
+ return CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const Derived>
+ (derived(), internal::scalar_multiple_op<Scalar>(scalar));
+}
+
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+const ScalarMultipleReturnType operator*(const RealScalar& scalar) const;
+#endif
+
+/** \returns an expression of \c *this divided by the scalar value \a scalar */
+inline const CwiseUnaryOp<internal::scalar_quotient1_op<typename internal::traits<Derived>::Scalar>, const Derived>
+operator/(const Scalar& scalar) const
+{
+ return CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>, const Derived>
+ (derived(), internal::scalar_quotient1_op<Scalar>(scalar));
+}
+
+/** Overloaded for efficient real matrix times complex scalar value */
+inline const CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,std::complex<Scalar> >, const Derived>
+operator*(const std::complex<Scalar>& scalar) const
+{
+ return CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,std::complex<Scalar> >, const Derived>
+ (*static_cast<const Derived*>(this), internal::scalar_multiple2_op<Scalar,std::complex<Scalar> >(scalar));
+}
+
+inline friend const ScalarMultipleReturnType
+operator*(const Scalar& scalar, const StorageBaseType& matrix)
+{ return matrix*scalar; }
+
+inline friend const CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,std::complex<Scalar> >, const Derived>
+operator*(const std::complex<Scalar>& scalar, const StorageBaseType& matrix)
+{ return matrix*scalar; }
+
+/** \returns an expression of *this with the \a Scalar type casted to
+ * \a NewScalar.
+ *
+ * The template parameter \a NewScalar is the type we are casting the scalars to.
+ *
+ * \sa class CwiseUnaryOp
+ */
+template<typename NewType>
+typename internal::cast_return_type<Derived,const CwiseUnaryOp<internal::scalar_cast_op<typename internal::traits<Derived>::Scalar, NewType>, const Derived> >::type
+cast() const
+{
+ return derived();
+}
+
+/** \returns an expression of the complex conjugate of \c *this.
+ *
+ * \sa adjoint() */
+inline ConjugateReturnType
+conjugate() const
+{
+ return ConjugateReturnType(derived());
+}
+
+/** \returns a read-only expression of the real part of \c *this.
+ *
+ * \sa imag() */
+inline RealReturnType
+real() const { return derived(); }
+
+/** \returns an read-only expression of the imaginary part of \c *this.
+ *
+ * \sa real() */
+inline const ImagReturnType
+imag() const { return derived(); }
+
+/** \brief Apply a unary operator coefficient-wise
+ * \param[in] func Functor implementing the unary operator
+ * \tparam CustomUnaryOp Type of \a func
+ * \returns An expression of a custom coefficient-wise unary operator \a func of *this
+ *
+ * The function \c ptr_fun() from the C++ standard library can be used to make functors out of normal functions.
+ *
+ * Example:
+ * \include class_CwiseUnaryOp_ptrfun.cpp
+ * Output: \verbinclude class_CwiseUnaryOp_ptrfun.out
+ *
+ * Genuine functors allow for more possibilities, for instance it may contain a state.
+ *
+ * Example:
+ * \include class_CwiseUnaryOp.cpp
+ * Output: \verbinclude class_CwiseUnaryOp.out
+ *
+ * \sa class CwiseUnaryOp, class CwiseBinaryOp
+ */
+template<typename CustomUnaryOp>
+inline const CwiseUnaryOp<CustomUnaryOp, const Derived>
+unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const
+{
+ return CwiseUnaryOp<CustomUnaryOp, const Derived>(derived(), func);
+}
+
+/** \returns an expression of a custom coefficient-wise unary operator \a func of *this
+ *
+ * The template parameter \a CustomUnaryOp is the type of the functor
+ * of the custom unary operator.
+ *
+ * Example:
+ * \include class_CwiseUnaryOp.cpp
+ * Output: \verbinclude class_CwiseUnaryOp.out
+ *
+ * \sa class CwiseUnaryOp, class CwiseBinaryOp
+ */
+template<typename CustomViewOp>
+inline const CwiseUnaryView<CustomViewOp, const Derived>
+unaryViewExpr(const CustomViewOp& func = CustomViewOp()) const
+{
+ return CwiseUnaryView<CustomViewOp, const Derived>(derived(), func);
+}
+
+/** \returns a non const expression of the real part of \c *this.
+ *
+ * \sa imag() */
+inline NonConstRealReturnType
+real() { return derived(); }
+
+/** \returns a non const expression of the imaginary part of \c *this.
+ *
+ * \sa real() */
+inline NonConstImagReturnType
+imag() { return derived(); }
diff --git a/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h
new file mode 100644
index 00000000000..35183f91f80
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h
@@ -0,0 +1,120 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+// This file is a base class plugin containing matrix specifics coefficient wise functions.
+
+/** \returns an expression of the Schur product (coefficient wise product) of *this and \a other
+ *
+ * Example: \include MatrixBase_cwiseProduct.cpp
+ * Output: \verbinclude MatrixBase_cwiseProduct.out
+ *
+ * \sa class CwiseBinaryOp, cwiseAbs2
+ */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)
+cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise == operator of *this and \a other
+ *
+ * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+ * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+ * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+ * isMuchSmallerThan().
+ *
+ * Example: \include MatrixBase_cwiseEqual.cpp
+ * Output: \verbinclude MatrixBase_cwiseEqual.out
+ *
+ * \sa cwiseNotEqual(), isApprox(), isMuchSmallerThan()
+ */
+template<typename OtherDerived>
+inline const CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>
+cwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise != operator of *this and \a other
+ *
+ * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+ * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+ * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+ * isMuchSmallerThan().
+ *
+ * Example: \include MatrixBase_cwiseNotEqual.cpp
+ * Output: \verbinclude MatrixBase_cwiseNotEqual.out
+ *
+ * \sa cwiseEqual(), isApprox(), isMuchSmallerThan()
+ */
+template<typename OtherDerived>
+inline const CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>
+cwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise min of *this and \a other
+ *
+ * Example: \include MatrixBase_cwiseMin.cpp
+ * Output: \verbinclude MatrixBase_cwiseMin.out
+ *
+ * \sa class CwiseBinaryOp, max()
+ */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const OtherDerived>
+cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise max of *this and \a other
+ *
+ * Example: \include MatrixBase_cwiseMax.cpp
+ * Output: \verbinclude MatrixBase_cwiseMax.out
+ *
+ * \sa class CwiseBinaryOp, min()
+ */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const OtherDerived>
+cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise quotient of *this and \a other
+ *
+ * Example: \include MatrixBase_cwiseQuotient.cpp
+ * Output: \verbinclude MatrixBase_cwiseQuotient.out
+ *
+ * \sa class CwiseBinaryOp, cwiseProduct(), cwiseInverse()
+ */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>
+cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+ return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
diff --git a/extern/Eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h b/extern/Eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h
new file mode 100644
index 00000000000..a3d9a0e1465
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h
@@ -0,0 +1,82 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+// This file is a base class plugin containing matrix specifics coefficient wise functions.
+
+/** \returns an expression of the coefficient-wise absolute value of \c *this
+ *
+ * Example: \include MatrixBase_cwiseAbs.cpp
+ * Output: \verbinclude MatrixBase_cwiseAbs.out
+ *
+ * \sa cwiseAbs2()
+ */
+EIGEN_STRONG_INLINE const CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived>
+cwiseAbs() const { return derived(); }
+
+/** \returns an expression of the coefficient-wise squared absolute value of \c *this
+ *
+ * Example: \include MatrixBase_cwiseAbs2.cpp
+ * Output: \verbinclude MatrixBase_cwiseAbs2.out
+ *
+ * \sa cwiseAbs()
+ */
+EIGEN_STRONG_INLINE const CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived>
+cwiseAbs2() const { return derived(); }
+
+/** \returns an expression of the coefficient-wise square root of *this.
+ *
+ * Example: \include MatrixBase_cwiseSqrt.cpp
+ * Output: \verbinclude MatrixBase_cwiseSqrt.out
+ *
+ * \sa cwisePow(), cwiseSquare()
+ */
+inline const CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived>
+cwiseSqrt() const { return derived(); }
+
+/** \returns an expression of the coefficient-wise inverse of *this.
+ *
+ * Example: \include MatrixBase_cwiseInverse.cpp
+ * Output: \verbinclude MatrixBase_cwiseInverse.out
+ *
+ * \sa cwiseProduct()
+ */
+inline const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
+cwiseInverse() const { return derived(); }
+
+/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
+ *
+ * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+ * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+ * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+ * isMuchSmallerThan().
+ *
+ * \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
+ */
+inline const CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >, const Derived>
+cwiseEqual(const Scalar& s) const
+{
+ return CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >,const Derived>
+ (derived(), std::bind1st(std::equal_to<Scalar>(), s));
+}
diff --git a/extern/Eigen2/eigen-update.sh b/extern/Eigen3/eigen-update.sh
index 797c710c196..7be67890173 100755
--- a/extern/Eigen2/eigen-update.sh
+++ b/extern/Eigen3/eigen-update.sh
@@ -1,7 +1,7 @@
#!/bin/sh
-echo "*** EIGEN2-HG Update utility"
-echo "*** This gets a new eigen2-hg tree and adapts it to blenders build structure"
+echo "*** EIGEN#-HG Update utility"
+echo "*** This gets a new eigen3-hg tree and adapts it to blenders build structure"
echo "*** Warning! This script will wipe all the header file"
if [ "x$1" = "x--i-really-know-what-im-doing" ] ; then
@@ -12,16 +12,16 @@ else
fi
# get the latest revision from repository.
-hg clone http://bitbucket.org/eigen/eigen2
-if [ -d eigen2 ]
+hg clone http://bitbucket.org/eigen/eigen
+if [ -d eigen ]
then
- cd eigen2
+ cd eigen
# put here the version you want to use
- hg up 2.0
+ hg up 3.0
rm -f `find Eigen/ -type f -name "CMakeLists.txt"`
cp -r Eigen ..
cd ..
- rm -rf eigen2
+ rm -rf eigen
else
echo "Did you install Mercurial?"
fi
diff --git a/extern/SConscript b/extern/SConscript
index 6bb060adeda..031471a8a01 100644
--- a/extern/SConscript
+++ b/extern/SConscript
@@ -3,6 +3,7 @@
Import('env')
SConscript(['glew/SConscript'])
+SConscript(['colamd/SConscript'])
if env['WITH_BF_GAMEENGINE']:
SConscript(['recastnavigation/SConscript'])
@@ -27,3 +28,6 @@ if env['WITH_BF_LZO']:
if env['WITH_BF_LZMA']:
SConscript(['lzma/SConscript'])
+
+if env['WITH_BF_LIBMV']:
+ SConscript(['libmv/SConscript'])
diff --git a/extern/colamd/CMakeLists.txt b/extern/colamd/CMakeLists.txt
new file mode 100644
index 00000000000..fe7a0441b29
--- /dev/null
+++ b/extern/colamd/CMakeLists.txt
@@ -0,0 +1,41 @@
+# $Id$
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# The Original Code is Copyright (C) 2011, Blender Foundation
+# All rights reserved.
+#
+# Contributor(s): Blender Foundation,
+# Sergey Sharybin
+#
+# ***** END GPL LICENSE BLOCK *****
+
+set(INC
+ ./Include
+)
+
+set(INC_SYS
+
+)
+
+set(SRC
+ Source/colamd.c
+ Source/colamd_global.c
+
+ Include//colamd.h
+)
+
+blender_add_lib(extern_colamd "${SRC}" "${INC}" "${INC_SYS}")
diff --git a/extern/colamd/Doc/ChangeLog b/extern/colamd/Doc/ChangeLog
new file mode 100644
index 00000000000..29308e9ad01
--- /dev/null
+++ b/extern/colamd/Doc/ChangeLog
@@ -0,0 +1,129 @@
+May 31, 2007: version 2.7.0
+
+ * ported to 64-bit MATLAB
+
+ * subdirectories added (Source/, Include/, Lib/, Doc/, MATLAB/, Demo/)
+
+Dec 12, 2006, version 2.5.2
+
+ * minor MATLAB cleanup. MATLAB functions renamed colamd2 and symamd2,
+ so that they do not conflict with the built-in versions. Note that
+ the MATLAB built-in functions colamd and symamd are identical to
+ the colamd and symamd functions here.
+
+Aug 31, 2006: Version 2.5.1
+
+ * minor change to colamd.m and symamd.m, to use etree instead
+ of sparsfun.
+
+Apr. 30, 2006: Version 2.5
+
+ * colamd_recommended modified, to do more careful integer overflow
+ checking. It now returns size_t, not int. colamd_l_recommended
+ also returns size_t. A zero is returned if an error occurs. A
+ postive return value denotes success. In v2.4 and earlier,
+ -1 was returned on error (an int or long).
+
+ * long replaced with UF_long integer, which is long except on WIN64.
+
+Nov 15, 2005:
+
+ * minor editting of comments; version number (2.4) unchanged.
+
+Changes from Version 2.3 to 2.4 (Aug 30, 2005)
+
+ * Makefile now relies on ../UFconfig/UFconfig.mk
+
+ * changed the dense row/col detection. The meaning of the knobs
+ has thus changed.
+
+ * added an option to turn off aggressive absorption. It was
+ always on in versions 2.3 and earlier.
+
+ * added a #define'd version number
+
+ * added a function pointer (colamd_printf) for COLAMD's printing.
+
+ * added a -DNPRINT option, to turn off printing at compile-time.
+
+ * added a check for integer overflow in colamd_recommended
+
+ * minor changes to allow for more simpler 100% test coverage
+
+ * bug fix. If symamd v2.3 fails to allocate its copy of the input
+ matrix, then it erroneously frees a calloc'd workspace twice.
+ This bug has no effect on the MATLAB symamd mexFunction, since
+ mxCalloc terminates the mexFunction if it fails to allocate
+ memory. Similarly, UMFPACK is not affected because it does not
+ use symamd. The bug has no effect on the colamd ordering
+ routine in v2.3.
+
+Changes from Version 2.2 to 2.3 (Sept. 8, 2003)
+
+ * removed the call to the MATLAB spparms ('spumoni') function.
+ This can take a lot of time if you are ordering many small
+ matrices. Only affects the MATLAB interface (colamdmex.c,
+ symamdmex.c, colamdtestmex.c, and symamdtestmex.c). The
+ usage of the optional 2nd argument to the colamd and symamd
+ mexFunctions was changed accordingly.
+
+Changes from Version 2.1 to 2.2 (Sept. 23, 2002)
+
+ * extensive testing routines added (colamd_test.m, colamdtestmex.c,
+ and symamdtestmex.c), and the Makefile modified accordingly.
+
+ * a few typos in the comments corrected
+
+ * use of the MATLAB "flops" command removed from colamd_demo, and an
+ m-file routine luflops.m added.
+
+ * an explicit typecast from unsigned to int added, for COLAMD_C and
+ COLAMD_R in colamd.h.
+
+ * #include <stdio.h> added to colamd_example.c
+
+
+Changes from Version 2.0 to 2.1 (May 4, 2001)
+
+ * TRUE and FALSE are predefined on some systems, so they are defined
+ here only if not already defined.
+
+ * web site changed
+
+ * UNIX Makefile modified, to handle the case if "." is not in your path.
+
+
+Changes from Version 1.0 to 2.0 (January 31, 2000)
+
+ No bugs were found in version 1.1. These changes merely add new
+ functionality.
+
+ * added the COLAMD_RECOMMENDED (nnz, n_row, n_col) macro.
+
+ * moved the output statistics, from A, to a separate output argument.
+ The arguments changed for the C-callable routines.
+
+ * added colamd_report and symamd_report.
+
+ * added a C-callable symamd routine. Formerly, symamd was only
+ available as a mexFunction from MATLAB.
+
+ * added error-checking to symamd. Formerly, it assumed its input
+ was error-free.
+
+ * added the optional stats and knobs arguments to the symamd mexFunction
+
+ * deleted colamd_help. A help message is still available from
+ "help colamd" and "help symamd" in MATLAB.
+
+ * deleted colamdtree.m and symamdtree.m. Now, colamd.m and symamd.m
+ also do the elimination tree post-ordering. The Version 1.1
+ colamd and symamd mexFunctions, which do not do the post-
+ ordering, are now visible as colamdmex and symamdmex from
+ MATLAB. Essentialy, the post-ordering is now the default
+ behavior of colamd.m and symamd.m, to match the behavior of
+ colmmd and symmmd. The post-ordering is only available in the
+ MATLAB interface, not the C-callable interface.
+
+ * made a slight change to the dense row/column detection in symamd,
+ to match the stated specifications.
diff --git a/extern/colamd/Doc/lesser.txt b/extern/colamd/Doc/lesser.txt
new file mode 100644
index 00000000000..8add30ad590
--- /dev/null
+++ b/extern/colamd/Doc/lesser.txt
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/extern/colamd/Include/UFconfig.h b/extern/colamd/Include/UFconfig.h
new file mode 100644
index 00000000000..7b5e79e544f
--- /dev/null
+++ b/extern/colamd/Include/UFconfig.h
@@ -0,0 +1,118 @@
+/* ========================================================================== */
+/* === UFconfig.h =========================================================== */
+/* ========================================================================== */
+
+/* Configuration file for SuiteSparse: a Suite of Sparse matrix packages
+ * (AMD, COLAMD, CCOLAMD, CAMD, CHOLMOD, UMFPACK, CXSparse, and others).
+ *
+ * UFconfig.h provides the definition of the long integer. On most systems,
+ * a C program can be compiled in LP64 mode, in which long's and pointers are
+ * both 64-bits, and int's are 32-bits. Windows 64, however, uses the LLP64
+ * model, in which int's and long's are 32-bits, and long long's and pointers
+ * are 64-bits.
+ *
+ * SuiteSparse packages that include long integer versions are
+ * intended for the LP64 mode. However, as a workaround for Windows 64
+ * (and perhaps other systems), the long integer can be redefined.
+ *
+ * If _WIN64 is defined, then the __int64 type is used instead of long.
+ *
+ * The long integer can also be defined at compile time. For example, this
+ * could be added to UFconfig.mk:
+ *
+ * CFLAGS = -O -D'UF_long=long long' -D'UF_long_max=9223372036854775801' \
+ * -D'UF_long_id="%lld"'
+ *
+ * This file defines UF_long as either long (on all but _WIN64) or
+ * __int64 on Windows 64. The intent is that a UF_long is always a 64-bit
+ * integer in a 64-bit code. ptrdiff_t might be a better choice than long;
+ * it is always the same size as a pointer.
+ *
+ * This file also defines the SUITESPARSE_VERSION and related definitions.
+ *
+ * Copyright (c) 2007, University of Florida. No licensing restrictions
+ * apply to this file or to the UFconfig directory. Author: Timothy A. Davis.
+ */
+
+#ifndef _UFCONFIG_H
+#define _UFCONFIG_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <limits.h>
+
+/* ========================================================================== */
+/* === UF_long ============================================================== */
+/* ========================================================================== */
+
+#ifndef UF_long
+
+#ifdef _WIN64
+
+#define UF_long __int64
+#define UF_long_max _I64_MAX
+#define UF_long_id "%I64d"
+
+#else
+
+#define UF_long long
+#define UF_long_max LONG_MAX
+#define UF_long_id "%ld"
+
+#endif
+#endif
+
+/* ========================================================================== */
+/* === SuiteSparse version ================================================== */
+/* ========================================================================== */
+
+/* SuiteSparse is not a package itself, but a collection of packages, some of
+ * which must be used together (UMFPACK requires AMD, CHOLMOD requires AMD,
+ * COLAMD, CAMD, and CCOLAMD, etc). A version number is provided here for the
+ * collection itself. The versions of packages within each version of
+ * SuiteSparse are meant to work together. Combining one packge from one
+ * version of SuiteSparse, with another package from another version of
+ * SuiteSparse, may or may not work.
+ *
+ * SuiteSparse Version 3.4.0 contains the following packages:
+ *
+ * AMD version 2.2.0
+ * CAMD version 2.2.0
+ * COLAMD version 2.7.1
+ * CCOLAMD version 2.7.1
+ * CHOLMOD version 1.7.1
+ * CSparse version 2.2.3
+ * CXSparse version 2.2.3
+ * KLU version 1.1.0
+ * BTF version 1.1.0
+ * LDL version 2.0.1
+ * UFconfig version number is the same as SuiteSparse
+ * UMFPACK version 5.4.0
+ * RBio version 1.1.2
+ * UFcollection version 1.2.0
+ * LINFACTOR version 1.1.0
+ * MESHND version 1.1.1
+ * SSMULT version 2.0.0
+ * MATLAB_Tools no specific version number
+ * SuiteSparseQR version 1.1.2
+ *
+ * Other package dependencies:
+ * BLAS required by CHOLMOD and UMFPACK
+ * LAPACK required by CHOLMOD
+ * METIS 4.0.1 required by CHOLMOD (optional) and KLU (optional)
+ */
+
+#define SUITESPARSE_DATE "May 20, 2009"
+#define SUITESPARSE_VER_CODE(main,sub) ((main) * 1000 + (sub))
+#define SUITESPARSE_MAIN_VERSION 3
+#define SUITESPARSE_SUB_VERSION 4
+#define SUITESPARSE_SUBSUB_VERSION 0
+#define SUITESPARSE_VERSION \
+ SUITESPARSE_VER_CODE(SUITESPARSE_MAIN_VERSION,SUITESPARSE_SUB_VERSION)
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/extern/colamd/Include/colamd.h b/extern/colamd/Include/colamd.h
new file mode 100644
index 00000000000..26372d8fa96
--- /dev/null
+++ b/extern/colamd/Include/colamd.h
@@ -0,0 +1,255 @@
+/* ========================================================================== */
+/* === colamd/symamd prototypes and definitions ============================= */
+/* ========================================================================== */
+
+/* COLAMD / SYMAMD include file
+
+ You must include this file (colamd.h) in any routine that uses colamd,
+ symamd, or the related macros and definitions.
+
+ Authors:
+
+ The authors of the code itself are Stefan I. Larimore and Timothy A.
+ Davis (davis at cise.ufl.edu), University of Florida. The algorithm was
+ developed in collaboration with John Gilbert, Xerox PARC, and Esmond
+ Ng, Oak Ridge National Laboratory.
+
+ Acknowledgements:
+
+ This work was supported by the National Science Foundation, under
+ grants DMS-9504974 and DMS-9803599.
+
+ Notice:
+
+ Copyright (c) 1998-2007, Timothy A. Davis, All Rights Reserved.
+
+ THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+ Permission is hereby granted to use, copy, modify, and/or distribute
+ this program, provided that the Copyright, this License, and the
+ Availability of the original version is retained on all copies and made
+ accessible to the end-user of any code or package that includes COLAMD
+ or any modified version of COLAMD.
+
+ Availability:
+
+ The colamd/symamd library is available at
+
+ http://www.cise.ufl.edu/research/sparse/colamd/
+
+ This is the http://www.cise.ufl.edu/research/sparse/colamd/colamd.h
+ file. It is required by the colamd.c, colamdmex.c, and symamdmex.c
+ files, and by any C code that calls the routines whose prototypes are
+ listed below, or that uses the colamd/symamd definitions listed below.
+
+*/
+
+#ifndef COLAMD_H
+#define COLAMD_H
+
+/* make it easy for C++ programs to include COLAMD */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ========================================================================== */
+/* === Include files ======================================================== */
+/* ========================================================================== */
+
+#include <stdlib.h>
+
+/* ========================================================================== */
+/* === COLAMD version ======================================================= */
+/* ========================================================================== */
+
+/* COLAMD Version 2.4 and later will include the following definitions.
+ * As an example, to test if the version you are using is 2.4 or later:
+ *
+ * #ifdef COLAMD_VERSION
+ * if (COLAMD_VERSION >= COLAMD_VERSION_CODE (2,4)) ...
+ * #endif
+ *
+ * This also works during compile-time:
+ *
+ * #if defined(COLAMD_VERSION) && (COLAMD_VERSION >= COLAMD_VERSION_CODE (2,4))
+ * printf ("This is version 2.4 or later\n") ;
+ * #else
+ * printf ("This is an early version\n") ;
+ * #endif
+ *
+ * Versions 2.3 and earlier of COLAMD do not include a #define'd version number.
+ */
+
+#define COLAMD_DATE "Nov 1, 2007"
+#define COLAMD_VERSION_CODE(main,sub) ((main) * 1000 + (sub))
+#define COLAMD_MAIN_VERSION 2
+#define COLAMD_SUB_VERSION 7
+#define COLAMD_SUBSUB_VERSION 1
+#define COLAMD_VERSION \
+ COLAMD_VERSION_CODE(COLAMD_MAIN_VERSION,COLAMD_SUB_VERSION)
+
+/* ========================================================================== */
+/* === Knob and statistics definitions ====================================== */
+/* ========================================================================== */
+
+/* size of the knobs [ ] array. Only knobs [0..1] are currently used. */
+#define COLAMD_KNOBS 20
+
+/* number of output statistics. Only stats [0..6] are currently used. */
+#define COLAMD_STATS 20
+
+/* knobs [0] and stats [0]: dense row knob and output statistic. */
+#define COLAMD_DENSE_ROW 0
+
+/* knobs [1] and stats [1]: dense column knob and output statistic. */
+#define COLAMD_DENSE_COL 1
+
+/* knobs [2]: aggressive absorption */
+#define COLAMD_AGGRESSIVE 2
+
+/* stats [2]: memory defragmentation count output statistic */
+#define COLAMD_DEFRAG_COUNT 2
+
+/* stats [3]: colamd status: zero OK, > 0 warning or notice, < 0 error */
+#define COLAMD_STATUS 3
+
+/* stats [4..6]: error info, or info on jumbled columns */
+#define COLAMD_INFO1 4
+#define COLAMD_INFO2 5
+#define COLAMD_INFO3 6
+
+/* error codes returned in stats [3]: */
+#define COLAMD_OK (0)
+#define COLAMD_OK_BUT_JUMBLED (1)
+#define COLAMD_ERROR_A_not_present (-1)
+#define COLAMD_ERROR_p_not_present (-2)
+#define COLAMD_ERROR_nrow_negative (-3)
+#define COLAMD_ERROR_ncol_negative (-4)
+#define COLAMD_ERROR_nnz_negative (-5)
+#define COLAMD_ERROR_p0_nonzero (-6)
+#define COLAMD_ERROR_A_too_small (-7)
+#define COLAMD_ERROR_col_length_negative (-8)
+#define COLAMD_ERROR_row_index_out_of_bounds (-9)
+#define COLAMD_ERROR_out_of_memory (-10)
+#define COLAMD_ERROR_internal_error (-999)
+
+
+/* ========================================================================== */
+/* === Prototypes of user-callable routines ================================= */
+/* ========================================================================== */
+
+/* define UF_long */
+#include "UFconfig.h"
+
+size_t colamd_recommended /* returns recommended value of Alen, */
+ /* or 0 if input arguments are erroneous */
+(
+ int nnz, /* nonzeros in A */
+ int n_row, /* number of rows in A */
+ int n_col /* number of columns in A */
+) ;
+
+size_t colamd_l_recommended /* returns recommended value of Alen, */
+ /* or 0 if input arguments are erroneous */
+(
+ UF_long nnz, /* nonzeros in A */
+ UF_long n_row, /* number of rows in A */
+ UF_long n_col /* number of columns in A */
+) ;
+
+void colamd_set_defaults /* sets default parameters */
+( /* knobs argument is modified on output */
+ double knobs [COLAMD_KNOBS] /* parameter settings for colamd */
+) ;
+
+void colamd_l_set_defaults /* sets default parameters */
+( /* knobs argument is modified on output */
+ double knobs [COLAMD_KNOBS] /* parameter settings for colamd */
+) ;
+
+int colamd /* returns (1) if successful, (0) otherwise*/
+( /* A and p arguments are modified on output */
+ int n_row, /* number of rows in A */
+ int n_col, /* number of columns in A */
+ int Alen, /* size of the array A */
+ int A [], /* row indices of A, of size Alen */
+ int p [], /* column pointers of A, of size n_col+1 */
+ double knobs [COLAMD_KNOBS],/* parameter settings for colamd */
+ int stats [COLAMD_STATS] /* colamd output statistics and error codes */
+) ;
+
+UF_long colamd_l /* returns (1) if successful, (0) otherwise*/
+( /* A and p arguments are modified on output */
+ UF_long n_row, /* number of rows in A */
+ UF_long n_col, /* number of columns in A */
+ UF_long Alen, /* size of the array A */
+ UF_long A [], /* row indices of A, of size Alen */
+ UF_long p [], /* column pointers of A, of size n_col+1 */
+ double knobs [COLAMD_KNOBS],/* parameter settings for colamd */
+ UF_long stats [COLAMD_STATS]/* colamd output statistics and error codes */
+) ;
+
+int symamd /* return (1) if OK, (0) otherwise */
+(
+ int n, /* number of rows and columns of A */
+ int A [], /* row indices of A */
+ int p [], /* column pointers of A */
+ int perm [], /* output permutation, size n_col+1 */
+ double knobs [COLAMD_KNOBS], /* parameters (uses defaults if NULL) */
+ int stats [COLAMD_STATS], /* output statistics and error codes */
+ void * (*allocate) (size_t, size_t),
+ /* pointer to calloc (ANSI C) or */
+ /* mxCalloc (for MATLAB mexFunction) */
+ void (*release) (void *)
+ /* pointer to free (ANSI C) or */
+ /* mxFree (for MATLAB mexFunction) */
+) ;
+
+UF_long symamd_l /* return (1) if OK, (0) otherwise */
+(
+ UF_long n, /* number of rows and columns of A */
+ UF_long A [], /* row indices of A */
+ UF_long p [], /* column pointers of A */
+ UF_long perm [], /* output permutation, size n_col+1 */
+ double knobs [COLAMD_KNOBS], /* parameters (uses defaults if NULL) */
+ UF_long stats [COLAMD_STATS], /* output statistics and error codes */
+ void * (*allocate) (size_t, size_t),
+ /* pointer to calloc (ANSI C) or */
+ /* mxCalloc (for MATLAB mexFunction) */
+ void (*release) (void *)
+ /* pointer to free (ANSI C) or */
+ /* mxFree (for MATLAB mexFunction) */
+) ;
+
+void colamd_report
+(
+ int stats [COLAMD_STATS]
+) ;
+
+void colamd_l_report
+(
+ UF_long stats [COLAMD_STATS]
+) ;
+
+void symamd_report
+(
+ int stats [COLAMD_STATS]
+) ;
+
+void symamd_l_report
+(
+ UF_long stats [COLAMD_STATS]
+) ;
+
+#ifndef EXTERN
+#define EXTERN extern
+#endif
+
+EXTERN int (*colamd_printf) (const char *, ...) ;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* COLAMD_H */
diff --git a/extern/colamd/README.txt b/extern/colamd/README.txt
new file mode 100644
index 00000000000..5ed81c71d02
--- /dev/null
+++ b/extern/colamd/README.txt
@@ -0,0 +1,127 @@
+The COLAMD ordering method - Version 2.7
+-------------------------------------------------------------------------------
+
+The COLAMD column approximate minimum degree ordering algorithm computes
+a permutation vector P such that the LU factorization of A (:,P)
+tends to be sparser than that of A. The Cholesky factorization of
+(A (:,P))'*(A (:,P)) will also tend to be sparser than that of A'*A.
+SYMAMD is a symmetric minimum degree ordering method based on COLAMD,
+available as a MATLAB-callable function. It constructs a matrix M such
+that M'*M has the same pattern as A, and then uses COLAMD to compute a column
+ordering of M. Colamd and symamd tend to be faster and generate better
+orderings than their MATLAB counterparts, colmmd and symmmd.
+
+To compile and test the colamd m-files and mexFunctions, just unpack the
+COLAMD/ directory from the COLAMD.tar.gz file, and run MATLAB from
+within that directory. Next, type colamd_test to compile and test colamd
+and symamd. This will work on any computer with MATLAB (Unix, PC, or Mac).
+Alternatively, type "make" (in Unix) to compile and run a simple example C
+code, without using MATLAB.
+
+To compile and install the colamd m-files and mexFunctions, just cd to
+COLAMD/MATLAB and type colamd_install in the MATLAB command window.
+A short demo will run. Optionally, type colamd_test to run an extensive tests.
+Type "make" in Unix in the COLAMD directory to compile the C-callable
+library and to run a short demo.
+
+If you have MATLAB 7.2 or earlier, you must first edit UFconfig/UFconfig.h to
+remove the "-largeArrayDims" option from the MEX command (or just use
+colamd_make.m inside MATLAB).
+
+Colamd is a built-in routine in MATLAB, available from The
+Mathworks, Inc. Under most cases, the compiled COLAMD from Versions 2.0 to the
+current version do not differ. Colamd Versions 2.2 and 2.3 differ only in their
+mexFunction interaces to MATLAB. v2.4 fixes a bug in the symamd routine in
+v2.3. The bug (in v2.3 and earlier) has no effect on the MATLAB symamd
+mexFunction. v2.5 adds additional checks for integer overflow, so that
+the "int" version can be safely used with 64-bit pointers. Refer to the
+ChangeLog for more details.
+
+To use colamd and symamd within an application written in C, all you need are
+colamd.c, colamd_global.c, and colamd.h, which are the C-callable
+colamd/symamd codes. See colamd.c for more information on how to call
+colamd from a C program.
+
+Requires UFconfig, in the ../UFconfig directory relative to this directory.
+
+ Copyright (c) 1998-2007, Timothy A. Davis, All Rights Reserved.
+
+ See http://www.cise.ufl.edu/research/sparse/colamd (the colamd.c
+ file) for the License.
+
+
+Related papers:
+
+ T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, An approximate column
+ minimum degree ordering algorithm, ACM Transactions on Mathematical
+ Software, vol. 30, no. 3., pp. 353-376, 2004.
+
+ T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836: COLAMD,
+ an approximate column minimum degree ordering algorithm, ACM
+ Transactions on Mathematical Software, vol. 30, no. 3., pp. 377-380,
+ 2004.
+
+ "An approximate minimum degree column ordering algorithm",
+ S. I. Larimore, MS Thesis, Dept. of Computer and Information
+ Science and Engineering, University of Florida, Gainesville, FL,
+ 1998. CISE Tech Report TR-98-016. Available at
+ ftp://ftp.cise.ufl.edu/cis/tech-reports/tr98/tr98-016.ps
+ via anonymous ftp.
+
+ Approximate Deficiency for Ordering the Columns of a Matrix,
+ J. L. Kern, Senior Thesis, Dept. of Computer and Information
+ Science and Engineering, University of Florida, Gainesville, FL,
+ 1999. Available at http://www.cise.ufl.edu/~davis/Kern/kern.ps
+
+
+Authors: Stefan I. Larimore and Timothy A. Davis, University of Florida,
+in collaboration with John Gilbert, Xerox PARC (now at UC Santa Barbara),
+and Esmong Ng, Lawrence Berkeley National Laboratory (much of this work
+he did while at Oak Ridge National Laboratory).
+
+COLAMD files:
+
+ Demo simple demo
+ Doc additional documentation (see colamd.c for more)
+ Include include file
+ Lib compiled C-callable library
+ Makefile primary Unix Makefile
+ MATLAB MATLAB functions
+ README.txt this file
+ Source C source code
+
+ ./Demo:
+ colamd_example.c simple example
+ colamd_example.out output of colamd_example.c
+ colamd_l_example.c simple example, long integers
+ colamd_l_example.out output of colamd_l_example.c
+ Makefile Makefile for C demos
+
+ ./Doc:
+ ChangeLog change log
+ lesser.txt license
+
+ ./Include:
+ colamd.h include file
+
+ ./Lib:
+ Makefile Makefile for C-callable library
+
+ ./MATLAB:
+ colamd2.m MATLAB interface for colamd2
+ colamd_demo.m simple demo
+ colamd_install.m compile and install colamd2 and symamd2
+ colamd_make.m compile colamd2 and symamd2
+ colamdmex.ca MATLAB mexFunction for colamd2
+ colamd_test.m extensive test
+ colamdtestmex.c test function for colamd
+ Contents.m contents of the MATLAB directory
+ luflops.m test code
+ Makefile Makefile for MATLAB functions
+ symamd2.m MATLAB interface for symamd2
+ symamdmex.c MATLAB mexFunction for symamd2
+ symamdtestmex.c test function for symamd
+
+ ./Source:
+ colamd.c primary source code
+ colamd_global.c globally defined function pointers (malloc, free, ...)
diff --git a/extern/colamd/SConscript b/extern/colamd/SConscript
new file mode 100644
index 00000000000..7930e3ace2d
--- /dev/null
+++ b/extern/colamd/SConscript
@@ -0,0 +1,14 @@
+#!/usr/bin/python
+import sys
+import os
+
+Import('env')
+
+defs = ''
+cflags = []
+
+src = env.Glob('Source/*.c')
+
+incs = './Include'
+
+env.BlenderLib ( libname = 'extern_colamd', sources=src, includes=Split(incs), defines=Split(defs), libtype=['extern', 'player'], priority=[20,137], compileflags=cflags )
diff --git a/extern/colamd/Source/colamd.c b/extern/colamd/Source/colamd.c
new file mode 100644
index 00000000000..5fe20d62822
--- /dev/null
+++ b/extern/colamd/Source/colamd.c
@@ -0,0 +1,3611 @@
+/* ========================================================================== */
+/* === colamd/symamd - a sparse matrix column ordering algorithm ============ */
+/* ========================================================================== */
+
+/* COLAMD / SYMAMD
+
+ colamd: an approximate minimum degree column ordering algorithm,
+ for LU factorization of symmetric or unsymmetric matrices,
+ QR factorization, least squares, interior point methods for
+ linear programming problems, and other related problems.
+
+ symamd: an approximate minimum degree ordering algorithm for Cholesky
+ factorization of symmetric matrices.
+
+ Purpose:
+
+ Colamd computes a permutation Q such that the Cholesky factorization of
+ (AQ)'(AQ) has less fill-in and requires fewer floating point operations
+ than A'A. This also provides a good ordering for sparse partial
+ pivoting methods, P(AQ) = LU, where Q is computed prior to numerical
+ factorization, and P is computed during numerical factorization via
+ conventional partial pivoting with row interchanges. Colamd is the
+ column ordering method used in SuperLU, part of the ScaLAPACK library.
+ It is also available as built-in function in MATLAB Version 6,
+ available from MathWorks, Inc. (http://www.mathworks.com). This
+ routine can be used in place of colmmd in MATLAB.
+
+ Symamd computes a permutation P of a symmetric matrix A such that the
+ Cholesky factorization of PAP' has less fill-in and requires fewer
+ floating point operations than A. Symamd constructs a matrix M such
+ that M'M has the same nonzero pattern of A, and then orders the columns
+ of M using colmmd. The column ordering of M is then returned as the
+ row and column ordering P of A.
+
+ Authors:
+
+ The authors of the code itself are Stefan I. Larimore and Timothy A.
+ Davis (davis at cise.ufl.edu), University of Florida. The algorithm was
+ developed in collaboration with John Gilbert, Xerox PARC, and Esmond
+ Ng, Oak Ridge National Laboratory.
+
+ Acknowledgements:
+
+ This work was supported by the National Science Foundation, under
+ grants DMS-9504974 and DMS-9803599.
+
+ Copyright and License:
+
+ Copyright (c) 1998-2007, Timothy A. Davis, All Rights Reserved.
+ COLAMD is also available under alternate licenses, contact T. Davis
+ for details.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
+ USA
+
+ Permission is hereby granted to use or copy this program under the
+ terms of the GNU LGPL, provided that the Copyright, this License,
+ and the Availability of the original version is retained on all copies.
+ User documentation of any code that uses this code or any modified
+ version of this code must cite the Copyright, this License, the
+ Availability note, and "Used by permission." Permission to modify
+ the code and to distribute modified code is granted, provided the
+ Copyright, this License, and the Availability note are retained,
+ and a notice that the code was modified is included.
+
+ Availability:
+
+ The colamd/symamd library is available at
+
+ http://www.cise.ufl.edu/research/sparse/colamd/
+
+ This is the http://www.cise.ufl.edu/research/sparse/colamd/colamd.c
+ file. It requires the colamd.h file. It is required by the colamdmex.c
+ and symamdmex.c files, for the MATLAB interface to colamd and symamd.
+ Appears as ACM Algorithm 836.
+
+ See the ChangeLog file for changes since Version 1.0.
+
+ References:
+
+ T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, An approximate column
+ minimum degree ordering algorithm, ACM Transactions on Mathematical
+ Software, vol. 30, no. 3., pp. 353-376, 2004.
+
+ T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836: COLAMD,
+ an approximate column minimum degree ordering algorithm, ACM
+ Transactions on Mathematical Software, vol. 30, no. 3., pp. 377-380,
+ 2004.
+
+*/
+
+/* ========================================================================== */
+/* === Description of user-callable routines ================================ */
+/* ========================================================================== */
+
+/* COLAMD includes both int and UF_long versions of all its routines. The
+ * description below is for the int version. For UF_long, all int arguments
+ * become UF_long. UF_long is normally defined as long, except for WIN64.
+
+ ----------------------------------------------------------------------------
+ colamd_recommended:
+ ----------------------------------------------------------------------------
+
+ C syntax:
+
+ #include "colamd.h"
+ size_t colamd_recommended (int nnz, int n_row, int n_col) ;
+ size_t colamd_l_recommended (UF_long nnz, UF_long n_row,
+ UF_long n_col) ;
+
+ Purpose:
+
+ Returns recommended value of Alen for use by colamd. Returns 0
+ if any input argument is negative. The use of this routine
+ is optional. Not needed for symamd, which dynamically allocates
+ its own memory.
+
+ Note that in v2.4 and earlier, these routines returned int or long.
+ They now return a value of type size_t.
+
+ Arguments (all input arguments):
+
+ int nnz ; Number of nonzeros in the matrix A. This must
+ be the same value as p [n_col] in the call to
+ colamd - otherwise you will get a wrong value
+ of the recommended memory to use.
+
+ int n_row ; Number of rows in the matrix A.
+
+ int n_col ; Number of columns in the matrix A.
+
+ ----------------------------------------------------------------------------
+ colamd_set_defaults:
+ ----------------------------------------------------------------------------
+
+ C syntax:
+
+ #include "colamd.h"
+ colamd_set_defaults (double knobs [COLAMD_KNOBS]) ;
+ colamd_l_set_defaults (double knobs [COLAMD_KNOBS]) ;
+
+ Purpose:
+
+ Sets the default parameters. The use of this routine is optional.
+
+ Arguments:
+
+ double knobs [COLAMD_KNOBS] ; Output only.
+
+ NOTE: the meaning of the dense row/col knobs has changed in v2.4
+
+ knobs [0] and knobs [1] control dense row and col detection:
+
+ Colamd: rows with more than
+ max (16, knobs [COLAMD_DENSE_ROW] * sqrt (n_col))
+ entries are removed prior to ordering. Columns with more than
+ max (16, knobs [COLAMD_DENSE_COL] * sqrt (MIN (n_row,n_col)))
+ entries are removed prior to
+ ordering, and placed last in the output column ordering.
+
+ Symamd: uses only knobs [COLAMD_DENSE_ROW], which is knobs [0].
+ Rows and columns with more than
+ max (16, knobs [COLAMD_DENSE_ROW] * sqrt (n))
+ entries are removed prior to ordering, and placed last in the
+ output ordering.
+
+ COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1,
+ respectively, in colamd.h. Default values of these two knobs
+ are both 10. Currently, only knobs [0] and knobs [1] are
+ used, but future versions may use more knobs. If so, they will
+ be properly set to their defaults by the future version of
+ colamd_set_defaults, so that the code that calls colamd will
+ not need to change, assuming that you either use
+ colamd_set_defaults, or pass a (double *) NULL pointer as the
+ knobs array to colamd or symamd.
+
+ knobs [2]: aggressive absorption
+
+ knobs [COLAMD_AGGRESSIVE] controls whether or not to do
+ aggressive absorption during the ordering. Default is TRUE.
+
+
+ ----------------------------------------------------------------------------
+ colamd:
+ ----------------------------------------------------------------------------
+
+ C syntax:
+
+ #include "colamd.h"
+ int colamd (int n_row, int n_col, int Alen, int *A, int *p,
+ double knobs [COLAMD_KNOBS], int stats [COLAMD_STATS]) ;
+ UF_long colamd_l (UF_long n_row, UF_long n_col, UF_long Alen,
+ UF_long *A, UF_long *p, double knobs [COLAMD_KNOBS],
+ UF_long stats [COLAMD_STATS]) ;
+
+ Purpose:
+
+ Computes a column ordering (Q) of A such that P(AQ)=LU or
+ (AQ)'AQ=LL' have less fill-in and require fewer floating point
+ operations than factorizing the unpermuted matrix A or A'A,
+ respectively.
+
+ Returns:
+
+ TRUE (1) if successful, FALSE (0) otherwise.
+
+ Arguments:
+
+ int n_row ; Input argument.
+
+ Number of rows in the matrix A.
+ Restriction: n_row >= 0.
+ Colamd returns FALSE if n_row is negative.
+
+ int n_col ; Input argument.
+
+ Number of columns in the matrix A.
+ Restriction: n_col >= 0.
+ Colamd returns FALSE if n_col is negative.
+
+ int Alen ; Input argument.
+
+ Restriction (see note):
+ Alen >= 2*nnz + 6*(n_col+1) + 4*(n_row+1) + n_col
+ Colamd returns FALSE if these conditions are not met.
+
+ Note: this restriction makes an modest assumption regarding
+ the size of the two typedef's structures in colamd.h.
+ We do, however, guarantee that
+
+ Alen >= colamd_recommended (nnz, n_row, n_col)
+
+ will be sufficient. Note: the macro version does not check
+ for integer overflow, and thus is not recommended. Use
+ the colamd_recommended routine instead.
+
+ int A [Alen] ; Input argument, undefined on output.
+
+ A is an integer array of size Alen. Alen must be at least as
+ large as the bare minimum value given above, but this is very
+ low, and can result in excessive run time. For best
+ performance, we recommend that Alen be greater than or equal to
+ colamd_recommended (nnz, n_row, n_col), which adds
+ nnz/5 to the bare minimum value given above.
+
+ On input, the row indices of the entries in column c of the
+ matrix are held in A [(p [c]) ... (p [c+1]-1)]. The row indices
+ in a given column c need not be in ascending order, and
+ duplicate row indices may be be present. However, colamd will
+ work a little faster if both of these conditions are met
+ (Colamd puts the matrix into this format, if it finds that the
+ the conditions are not met).
+
+ The matrix is 0-based. That is, rows are in the range 0 to
+ n_row-1, and columns are in the range 0 to n_col-1. Colamd
+ returns FALSE if any row index is out of range.
+
+ The contents of A are modified during ordering, and are
+ undefined on output.
+
+ int p [n_col+1] ; Both input and output argument.
+
+ p is an integer array of size n_col+1. On input, it holds the
+ "pointers" for the column form of the matrix A. Column c of
+ the matrix A is held in A [(p [c]) ... (p [c+1]-1)]. The first
+ entry, p [0], must be zero, and p [c] <= p [c+1] must hold
+ for all c in the range 0 to n_col-1. The value p [n_col] is
+ thus the total number of entries in the pattern of the matrix A.
+ Colamd returns FALSE if these conditions are not met.
+
+ On output, if colamd returns TRUE, the array p holds the column
+ permutation (Q, for P(AQ)=LU or (AQ)'(AQ)=LL'), where p [0] is
+ the first column index in the new ordering, and p [n_col-1] is
+ the last. That is, p [k] = j means that column j of A is the
+ kth pivot column, in AQ, where k is in the range 0 to n_col-1
+ (p [0] = j means that column j of A is the first column in AQ).
+
+ If colamd returns FALSE, then no permutation is returned, and
+ p is undefined on output.
+
+ double knobs [COLAMD_KNOBS] ; Input argument.
+
+ See colamd_set_defaults for a description.
+
+ int stats [COLAMD_STATS] ; Output argument.
+
+ Statistics on the ordering, and error status.
+ See colamd.h for related definitions.
+ Colamd returns FALSE if stats is not present.
+
+ stats [0]: number of dense or empty rows ignored.
+
+ stats [1]: number of dense or empty columns ignored (and
+ ordered last in the output permutation p)
+ Note that a row can become "empty" if it
+ contains only "dense" and/or "empty" columns,
+ and similarly a column can become "empty" if it
+ only contains "dense" and/or "empty" rows.
+
+ stats [2]: number of garbage collections performed.
+ This can be excessively high if Alen is close
+ to the minimum required value.
+
+ stats [3]: status code. < 0 is an error code.
+ > 1 is a warning or notice.
+
+ 0 OK. Each column of the input matrix contained
+ row indices in increasing order, with no
+ duplicates.
+
+ 1 OK, but columns of input matrix were jumbled
+ (unsorted columns or duplicate entries). Colamd
+ had to do some extra work to sort the matrix
+ first and remove duplicate entries, but it
+ still was able to return a valid permutation
+ (return value of colamd was TRUE).
+
+ stats [4]: highest numbered column that
+ is unsorted or has duplicate
+ entries.
+ stats [5]: last seen duplicate or
+ unsorted row index.
+ stats [6]: number of duplicate or
+ unsorted row indices.
+
+ -1 A is a null pointer
+
+ -2 p is a null pointer
+
+ -3 n_row is negative
+
+ stats [4]: n_row
+
+ -4 n_col is negative
+
+ stats [4]: n_col
+
+ -5 number of nonzeros in matrix is negative
+
+ stats [4]: number of nonzeros, p [n_col]
+
+ -6 p [0] is nonzero
+
+ stats [4]: p [0]
+
+ -7 A is too small
+
+ stats [4]: required size
+ stats [5]: actual size (Alen)
+
+ -8 a column has a negative number of entries
+
+ stats [4]: column with < 0 entries
+ stats [5]: number of entries in col
+
+ -9 a row index is out of bounds
+
+ stats [4]: column with bad row index
+ stats [5]: bad row index
+ stats [6]: n_row, # of rows of matrx
+
+ -10 (unused; see symamd.c)
+
+ -999 (unused; see symamd.c)
+
+ Future versions may return more statistics in the stats array.
+
+ Example:
+
+ See http://www.cise.ufl.edu/research/sparse/colamd/example.c
+ for a complete example.
+
+ To order the columns of a 5-by-4 matrix with 11 nonzero entries in
+ the following nonzero pattern
+
+ x 0 x 0
+ x 0 x x
+ 0 x x 0
+ 0 0 x x
+ x x 0 0
+
+ with default knobs and no output statistics, do the following:
+
+ #include "colamd.h"
+ #define ALEN 100
+ int A [ALEN] = {0, 1, 4, 2, 4, 0, 1, 2, 3, 1, 3} ;
+ int p [ ] = {0, 3, 5, 9, 11} ;
+ int stats [COLAMD_STATS] ;
+ colamd (5, 4, ALEN, A, p, (double *) NULL, stats) ;
+
+ The permutation is returned in the array p, and A is destroyed.
+
+ ----------------------------------------------------------------------------
+ symamd:
+ ----------------------------------------------------------------------------
+
+ C syntax:
+
+ #include "colamd.h"
+ int symamd (int n, int *A, int *p, int *perm,
+ double knobs [COLAMD_KNOBS], int stats [COLAMD_STATS],
+ void (*allocate) (size_t, size_t), void (*release) (void *)) ;
+ UF_long symamd_l (UF_long n, UF_long *A, UF_long *p, UF_long *perm,
+ double knobs [COLAMD_KNOBS], UF_long stats [COLAMD_STATS],
+ void (*allocate) (size_t, size_t), void (*release) (void *)) ;
+
+ Purpose:
+
+ The symamd routine computes an ordering P of a symmetric sparse
+ matrix A such that the Cholesky factorization PAP' = LL' remains
+ sparse. It is based on a column ordering of a matrix M constructed
+ so that the nonzero pattern of M'M is the same as A. The matrix A
+ is assumed to be symmetric; only the strictly lower triangular part
+ is accessed. You must pass your selected memory allocator (usually
+ calloc/free or mxCalloc/mxFree) to symamd, for it to allocate
+ memory for the temporary matrix M.
+
+ Returns:
+
+ TRUE (1) if successful, FALSE (0) otherwise.
+
+ Arguments:
+
+ int n ; Input argument.
+
+ Number of rows and columns in the symmetrix matrix A.
+ Restriction: n >= 0.
+ Symamd returns FALSE if n is negative.
+
+ int A [nnz] ; Input argument.
+
+ A is an integer array of size nnz, where nnz = p [n].
+
+ The row indices of the entries in column c of the matrix are
+ held in A [(p [c]) ... (p [c+1]-1)]. The row indices in a
+ given column c need not be in ascending order, and duplicate
+ row indices may be present. However, symamd will run faster
+ if the columns are in sorted order with no duplicate entries.
+
+ The matrix is 0-based. That is, rows are in the range 0 to
+ n-1, and columns are in the range 0 to n-1. Symamd
+ returns FALSE if any row index is out of range.
+
+ The contents of A are not modified.
+
+ int p [n+1] ; Input argument.
+
+ p is an integer array of size n+1. On input, it holds the
+ "pointers" for the column form of the matrix A. Column c of
+ the matrix A is held in A [(p [c]) ... (p [c+1]-1)]. The first
+ entry, p [0], must be zero, and p [c] <= p [c+1] must hold
+ for all c in the range 0 to n-1. The value p [n] is
+ thus the total number of entries in the pattern of the matrix A.
+ Symamd returns FALSE if these conditions are not met.
+
+ The contents of p are not modified.
+
+ int perm [n+1] ; Output argument.
+
+ On output, if symamd returns TRUE, the array perm holds the
+ permutation P, where perm [0] is the first index in the new
+ ordering, and perm [n-1] is the last. That is, perm [k] = j
+ means that row and column j of A is the kth column in PAP',
+ where k is in the range 0 to n-1 (perm [0] = j means
+ that row and column j of A are the first row and column in
+ PAP'). The array is used as a workspace during the ordering,
+ which is why it must be of length n+1, not just n.
+
+ double knobs [COLAMD_KNOBS] ; Input argument.
+
+ See colamd_set_defaults for a description.
+
+ int stats [COLAMD_STATS] ; Output argument.
+
+ Statistics on the ordering, and error status.
+ See colamd.h for related definitions.
+ Symamd returns FALSE if stats is not present.
+
+ stats [0]: number of dense or empty row and columns ignored
+ (and ordered last in the output permutation
+ perm). Note that a row/column can become
+ "empty" if it contains only "dense" and/or
+ "empty" columns/rows.
+
+ stats [1]: (same as stats [0])
+
+ stats [2]: number of garbage collections performed.
+
+ stats [3]: status code. < 0 is an error code.
+ > 1 is a warning or notice.
+
+ 0 OK. Each column of the input matrix contained
+ row indices in increasing order, with no
+ duplicates.
+
+ 1 OK, but columns of input matrix were jumbled
+ (unsorted columns or duplicate entries). Symamd
+ had to do some extra work to sort the matrix
+ first and remove duplicate entries, but it
+ still was able to return a valid permutation
+ (return value of symamd was TRUE).
+
+ stats [4]: highest numbered column that
+ is unsorted or has duplicate
+ entries.
+ stats [5]: last seen duplicate or
+ unsorted row index.
+ stats [6]: number of duplicate or
+ unsorted row indices.
+
+ -1 A is a null pointer
+
+ -2 p is a null pointer
+
+ -3 (unused, see colamd.c)
+
+ -4 n is negative
+
+ stats [4]: n
+
+ -5 number of nonzeros in matrix is negative
+
+ stats [4]: # of nonzeros (p [n]).
+
+ -6 p [0] is nonzero
+
+ stats [4]: p [0]
+
+ -7 (unused)
+
+ -8 a column has a negative number of entries
+
+ stats [4]: column with < 0 entries
+ stats [5]: number of entries in col
+
+ -9 a row index is out of bounds
+
+ stats [4]: column with bad row index
+ stats [5]: bad row index
+ stats [6]: n_row, # of rows of matrx
+
+ -10 out of memory (unable to allocate temporary
+ workspace for M or count arrays using the
+ "allocate" routine passed into symamd).
+
+ Future versions may return more statistics in the stats array.
+
+ void * (*allocate) (size_t, size_t)
+
+ A pointer to a function providing memory allocation. The
+ allocated memory must be returned initialized to zero. For a
+ C application, this argument should normally be a pointer to
+ calloc. For a MATLAB mexFunction, the routine mxCalloc is
+ passed instead.
+
+ void (*release) (size_t, size_t)
+
+ A pointer to a function that frees memory allocated by the
+ memory allocation routine above. For a C application, this
+ argument should normally be a pointer to free. For a MATLAB
+ mexFunction, the routine mxFree is passed instead.
+
+
+ ----------------------------------------------------------------------------
+ colamd_report:
+ ----------------------------------------------------------------------------
+
+ C syntax:
+
+ #include "colamd.h"
+ colamd_report (int stats [COLAMD_STATS]) ;
+ colamd_l_report (UF_long stats [COLAMD_STATS]) ;
+
+ Purpose:
+
+ Prints the error status and statistics recorded in the stats
+ array on the standard error output (for a standard C routine)
+ or on the MATLAB output (for a mexFunction).
+
+ Arguments:
+
+ int stats [COLAMD_STATS] ; Input only. Statistics from colamd.
+
+
+ ----------------------------------------------------------------------------
+ symamd_report:
+ ----------------------------------------------------------------------------
+
+ C syntax:
+
+ #include "colamd.h"
+ symamd_report (int stats [COLAMD_STATS]) ;
+ symamd_l_report (UF_long stats [COLAMD_STATS]) ;
+
+ Purpose:
+
+ Prints the error status and statistics recorded in the stats
+ array on the standard error output (for a standard C routine)
+ or on the MATLAB output (for a mexFunction).
+
+ Arguments:
+
+ int stats [COLAMD_STATS] ; Input only. Statistics from symamd.
+
+
+*/
+
+/* ========================================================================== */
+/* === Scaffolding code definitions ======================================== */
+/* ========================================================================== */
+
+/* Ensure that debugging is turned off: */
+#ifndef NDEBUG
+#define NDEBUG
+#endif
+
+/* turn on debugging by uncommenting the following line
+ #undef NDEBUG
+*/
+
+/*
+ Our "scaffolding code" philosophy: In our opinion, well-written library
+ code should keep its "debugging" code, and just normally have it turned off
+ by the compiler so as not to interfere with performance. This serves
+ several purposes:
+
+ (1) assertions act as comments to the reader, telling you what the code
+ expects at that point. All assertions will always be true (unless
+ there really is a bug, of course).
+
+ (2) leaving in the scaffolding code assists anyone who would like to modify
+ the code, or understand the algorithm (by reading the debugging output,
+ one can get a glimpse into what the code is doing).
+
+ (3) (gasp!) for actually finding bugs. This code has been heavily tested
+ and "should" be fully functional and bug-free ... but you never know...
+
+ The code will become outrageously slow when debugging is
+ enabled. To control the level of debugging output, set an environment
+ variable D to 0 (little), 1 (some), 2, 3, or 4 (lots). When debugging,
+ you should see the following message on the standard output:
+
+ colamd: debug version, D = 1 (THIS WILL BE SLOW!)
+
+ or a similar message for symamd. If you don't, then debugging has not
+ been enabled.
+
+*/
+
+/* ========================================================================== */
+/* === Include files ======================================================== */
+/* ========================================================================== */
+
+#include "colamd.h"
+#include <limits.h>
+#include <math.h>
+
+#ifdef MATLAB_MEX_FILE
+#include "mex.h"
+#include "matrix.h"
+#endif /* MATLAB_MEX_FILE */
+
+#if !defined (NPRINT) || !defined (NDEBUG)
+#include <stdio.h>
+#endif
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+/* ========================================================================== */
+/* === int or UF_long ======================================================= */
+/* ========================================================================== */
+
+/* define UF_long */
+#include "UFconfig.h"
+
+#ifdef DLONG
+
+#define Int UF_long
+#define ID UF_long_id
+#define Int_MAX UF_long_max
+
+#define COLAMD_recommended colamd_l_recommended
+#define COLAMD_set_defaults colamd_l_set_defaults
+#define COLAMD_MAIN colamd_l
+#define SYMAMD_MAIN symamd_l
+#define COLAMD_report colamd_l_report
+#define SYMAMD_report symamd_l_report
+
+#else
+
+#define Int int
+#define ID "%d"
+#define Int_MAX INT_MAX
+
+#define COLAMD_recommended colamd_recommended
+#define COLAMD_set_defaults colamd_set_defaults
+#define COLAMD_MAIN colamd
+#define SYMAMD_MAIN symamd
+#define COLAMD_report colamd_report
+#define SYMAMD_report symamd_report
+
+#endif
+
+/* ========================================================================== */
+/* === Row and Column structures ============================================ */
+/* ========================================================================== */
+
+/* User code that makes use of the colamd/symamd routines need not directly */
+/* reference these structures. They are used only for colamd_recommended. */
+
+typedef struct Colamd_Col_struct
+{
+ Int start ; /* index for A of first row in this column, or DEAD */
+ /* if column is dead */
+ Int length ; /* number of rows in this column */
+ union
+ {
+ Int thickness ; /* number of original columns represented by this */
+ /* col, if the column is alive */
+ Int parent ; /* parent in parent tree super-column structure, if */
+ /* the column is dead */
+ } shared1 ;
+ union
+ {
+ Int score ; /* the score used to maintain heap, if col is alive */
+ Int order ; /* pivot ordering of this column, if col is dead */
+ } shared2 ;
+ union
+ {
+ Int headhash ; /* head of a hash bucket, if col is at the head of */
+ /* a degree list */
+ Int hash ; /* hash value, if col is not in a degree list */
+ Int prev ; /* previous column in degree list, if col is in a */
+ /* degree list (but not at the head of a degree list) */
+ } shared3 ;
+ union
+ {
+ Int degree_next ; /* next column, if col is in a degree list */
+ Int hash_next ; /* next column, if col is in a hash list */
+ } shared4 ;
+
+} Colamd_Col ;
+
+typedef struct Colamd_Row_struct
+{
+ Int start ; /* index for A of first col in this row */
+ Int length ; /* number of principal columns in this row */
+ union
+ {
+ Int degree ; /* number of principal & non-principal columns in row */
+ Int p ; /* used as a row pointer in init_rows_cols () */
+ } shared1 ;
+ union
+ {
+ Int mark ; /* for computing set differences and marking dead rows*/
+ Int first_column ;/* first column in row (used in garbage collection) */
+ } shared2 ;
+
+} Colamd_Row ;
+
+/* ========================================================================== */
+/* === Definitions ========================================================== */
+/* ========================================================================== */
+
+/* Routines are either PUBLIC (user-callable) or PRIVATE (not user-callable) */
+#define PUBLIC
+#define PRIVATE static
+
+#define DENSE_DEGREE(alpha,n) \
+ ((Int) MAX (16.0, (alpha) * sqrt ((double) (n))))
+
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+
+#define ONES_COMPLEMENT(r) (-(r)-1)
+
+/* -------------------------------------------------------------------------- */
+/* Change for version 2.1: define TRUE and FALSE only if not yet defined */
+/* -------------------------------------------------------------------------- */
+
+#ifndef TRUE
+#define TRUE (1)
+#endif
+
+#ifndef FALSE
+#define FALSE (0)
+#endif
+
+/* -------------------------------------------------------------------------- */
+
+#define EMPTY (-1)
+
+/* Row and column status */
+#define ALIVE (0)
+#define DEAD (-1)
+
+/* Column status */
+#define DEAD_PRINCIPAL (-1)
+#define DEAD_NON_PRINCIPAL (-2)
+
+/* Macros for row and column status update and checking. */
+#define ROW_IS_DEAD(r) ROW_IS_MARKED_DEAD (Row[r].shared2.mark)
+#define ROW_IS_MARKED_DEAD(row_mark) (row_mark < ALIVE)
+#define ROW_IS_ALIVE(r) (Row [r].shared2.mark >= ALIVE)
+#define COL_IS_DEAD(c) (Col [c].start < ALIVE)
+#define COL_IS_ALIVE(c) (Col [c].start >= ALIVE)
+#define COL_IS_DEAD_PRINCIPAL(c) (Col [c].start == DEAD_PRINCIPAL)
+#define KILL_ROW(r) { Row [r].shared2.mark = DEAD ; }
+#define KILL_PRINCIPAL_COL(c) { Col [c].start = DEAD_PRINCIPAL ; }
+#define KILL_NON_PRINCIPAL_COL(c) { Col [c].start = DEAD_NON_PRINCIPAL ; }
+
+/* ========================================================================== */
+/* === Colamd reporting mechanism =========================================== */
+/* ========================================================================== */
+
+#if defined (MATLAB_MEX_FILE) || defined (MATHWORKS)
+/* In MATLAB, matrices are 1-based to the user, but 0-based internally */
+#define INDEX(i) ((i)+1)
+#else
+/* In C, matrices are 0-based and indices are reported as such in *_report */
+#define INDEX(i) (i)
+#endif
+
+/* All output goes through the PRINTF macro. */
+#define PRINTF(params) { if (colamd_printf != NULL) (void) colamd_printf params ; }
+
+/* ========================================================================== */
+/* === Prototypes of PRIVATE routines ======================================= */
+/* ========================================================================== */
+
+PRIVATE Int init_rows_cols
+(
+ Int n_row,
+ Int n_col,
+ Colamd_Row Row [],
+ Colamd_Col Col [],
+ Int A [],
+ Int p [],
+ Int stats [COLAMD_STATS]
+) ;
+
+PRIVATE void init_scoring
+(
+ Int n_row,
+ Int n_col,
+ Colamd_Row Row [],
+ Colamd_Col Col [],
+ Int A [],
+ Int head [],
+ double knobs [COLAMD_KNOBS],
+ Int *p_n_row2,
+ Int *p_n_col2,
+ Int *p_max_deg
+) ;
+
+PRIVATE Int find_ordering
+(
+ Int n_row,
+ Int n_col,
+ Int Alen,
+ Colamd_Row Row [],
+ Colamd_Col Col [],
+ Int A [],
+ Int head [],
+ Int n_col2,
+ Int max_deg,
+ Int pfree,
+ Int aggressive
+) ;
+
+PRIVATE void order_children
+(
+ Int n_col,
+ Colamd_Col Col [],
+ Int p []
+) ;
+
+PRIVATE void detect_super_cols
+(
+
+#ifndef NDEBUG
+ Int n_col,
+ Colamd_Row Row [],
+#endif /* NDEBUG */
+
+ Colamd_Col Col [],
+ Int A [],
+ Int head [],
+ Int row_start,
+ Int row_length
+) ;
+
+PRIVATE Int garbage_collection
+(
+ Int n_row,
+ Int n_col,
+ Colamd_Row Row [],
+ Colamd_Col Col [],
+ Int A [],
+ Int *pfree
+) ;
+
+PRIVATE Int clear_mark
+(
+ Int tag_mark,
+ Int max_mark,
+ Int n_row,
+ Colamd_Row Row []
+) ;
+
+PRIVATE void print_report
+(
+ char *method,
+ Int stats [COLAMD_STATS]
+) ;
+
+/* ========================================================================== */
+/* === Debugging prototypes and definitions ================================= */
+/* ========================================================================== */
+
+#ifndef NDEBUG
+
+#include <assert.h>
+
+/* colamd_debug is the *ONLY* global variable, and is only */
+/* present when debugging */
+
+PRIVATE Int colamd_debug = 0 ; /* debug print level */
+
+#define DEBUG0(params) { PRINTF (params) ; }
+#define DEBUG1(params) { if (colamd_debug >= 1) PRINTF (params) ; }
+#define DEBUG2(params) { if (colamd_debug >= 2) PRINTF (params) ; }
+#define DEBUG3(params) { if (colamd_debug >= 3) PRINTF (params) ; }
+#define DEBUG4(params) { if (colamd_debug >= 4) PRINTF (params) ; }
+
+#ifdef MATLAB_MEX_FILE
+#define ASSERT(expression) (mxAssert ((expression), ""))
+#else
+#define ASSERT(expression) (assert (expression))
+#endif /* MATLAB_MEX_FILE */
+
+PRIVATE void colamd_get_debug /* gets the debug print level from getenv */
+(
+ char *method
+) ;
+
+PRIVATE void debug_deg_lists
+(
+ Int n_row,
+ Int n_col,
+ Colamd_Row Row [],
+ Colamd_Col Col [],
+ Int head [],
+ Int min_score,
+ Int should,
+ Int max_deg
+) ;
+
+PRIVATE void debug_mark
+(
+ Int n_row,
+ Colamd_Row Row [],
+ Int tag_mark,
+ Int max_mark
+) ;
+
+PRIVATE void debug_matrix
+(
+ Int n_row,
+ Int n_col,
+ Colamd_Row Row [],
+ Colamd_Col Col [],
+ Int A []
+) ;
+
+PRIVATE void debug_structures
+(
+ Int n_row,
+ Int n_col,
+ Colamd_Row Row [],
+ Colamd_Col Col [],
+ Int A [],
+ Int n_col2
+) ;
+
+#else /* NDEBUG */
+
+/* === No debugging ========================================================= */
+
+#define DEBUG0(params) ;
+#define DEBUG1(params) ;
+#define DEBUG2(params) ;
+#define DEBUG3(params) ;
+#define DEBUG4(params) ;
+
+#define ASSERT(expression)
+
+#endif /* NDEBUG */
+
+/* ========================================================================== */
+/* === USER-CALLABLE ROUTINES: ============================================== */
+/* ========================================================================== */
+
+/* ========================================================================== */
+/* === colamd_recommended =================================================== */
+/* ========================================================================== */
+
+/*
+ The colamd_recommended routine returns the suggested size for Alen. This
+ value has been determined to provide good balance between the number of
+ garbage collections and the memory requirements for colamd. If any
+ argument is negative, or if integer overflow occurs, a 0 is returned as an
+ error condition. 2*nnz space is required for the row and column
+ indices of the matrix. COLAMD_C (n_col) + COLAMD_R (n_row) space is
+ required for the Col and Row arrays, respectively, which are internal to
+ colamd (roughly 6*n_col + 4*n_row). An additional n_col space is the
+ minimal amount of "elbow room", and nnz/5 more space is recommended for
+ run time efficiency.
+
+ Alen is approximately 2.2*nnz + 7*n_col + 4*n_row + 10.
+
+ This function is not needed when using symamd.
+*/
+
+/* add two values of type size_t, and check for integer overflow */
+static size_t t_add (size_t a, size_t b, int *ok)
+{
+ (*ok) = (*ok) && ((a + b) >= MAX (a,b)) ;
+ return ((*ok) ? (a + b) : 0) ;
+}
+
+/* compute a*k where k is a small integer, and check for integer overflow */
+static size_t t_mult (size_t a, size_t k, int *ok)
+{
+ size_t i, s = 0 ;
+ for (i = 0 ; i < k ; i++)
+ {
+ s = t_add (s, a, ok) ;
+ }
+ return (s) ;
+}
+
+/* size of the Col and Row structures */
+#define COLAMD_C(n_col,ok) \
+ ((t_mult (t_add (n_col, 1, ok), sizeof (Colamd_Col), ok) / sizeof (Int)))
+
+#define COLAMD_R(n_row,ok) \
+ ((t_mult (t_add (n_row, 1, ok), sizeof (Colamd_Row), ok) / sizeof (Int)))
+
+
+PUBLIC size_t COLAMD_recommended /* returns recommended value of Alen. */
+(
+ /* === Parameters ======================================================= */
+
+ Int nnz, /* number of nonzeros in A */
+ Int n_row, /* number of rows in A */
+ Int n_col /* number of columns in A */
+)
+{
+ size_t s, c, r ;
+ int ok = TRUE ;
+ if (nnz < 0 || n_row < 0 || n_col < 0)
+ {
+ return (0) ;
+ }
+ s = t_mult (nnz, 2, &ok) ; /* 2*nnz */
+ c = COLAMD_C (n_col, &ok) ; /* size of column structures */
+ r = COLAMD_R (n_row, &ok) ; /* size of row structures */
+ s = t_add (s, c, &ok) ;
+ s = t_add (s, r, &ok) ;
+ s = t_add (s, n_col, &ok) ; /* elbow room */
+ s = t_add (s, nnz/5, &ok) ; /* elbow room */
+ ok = ok && (s < Int_MAX) ;
+ return (ok ? s : 0) ;
+}
+
+
+/* ========================================================================== */
+/* === colamd_set_defaults ================================================== */
+/* ========================================================================== */
+
+/*
+ The colamd_set_defaults routine sets the default values of the user-
+ controllable parameters for colamd and symamd:
+
+ Colamd: rows with more than max (16, knobs [0] * sqrt (n_col))
+ entries are removed prior to ordering. Columns with more than
+ max (16, knobs [1] * sqrt (MIN (n_row,n_col))) entries are removed
+ prior to ordering, and placed last in the output column ordering.
+
+ Symamd: Rows and columns with more than max (16, knobs [0] * sqrt (n))
+ entries are removed prior to ordering, and placed last in the
+ output ordering.
+
+ knobs [0] dense row control
+
+ knobs [1] dense column control
+
+ knobs [2] if nonzero, do aggresive absorption
+
+ knobs [3..19] unused, but future versions might use this
+
+*/
+
+PUBLIC void COLAMD_set_defaults
+(
+ /* === Parameters ======================================================= */
+
+ double knobs [COLAMD_KNOBS] /* knob array */
+)
+{
+ /* === Local variables ================================================== */
+
+ Int i ;
+
+ if (!knobs)
+ {
+ return ; /* no knobs to initialize */
+ }
+ for (i = 0 ; i < COLAMD_KNOBS ; i++)
+ {
+ knobs [i] = 0 ;
+ }
+ knobs [COLAMD_DENSE_ROW] = 10 ;
+ knobs [COLAMD_DENSE_COL] = 10 ;
+ knobs [COLAMD_AGGRESSIVE] = TRUE ; /* default: do aggressive absorption*/
+}
+
+
+/* ========================================================================== */
+/* === symamd =============================================================== */
+/* ========================================================================== */
+
+PUBLIC Int SYMAMD_MAIN /* return TRUE if OK, FALSE otherwise */
+(
+ /* === Parameters ======================================================= */
+
+ Int n, /* number of rows and columns of A */
+ Int A [], /* row indices of A */
+ Int p [], /* column pointers of A */
+ Int perm [], /* output permutation, size n+1 */
+ double knobs [COLAMD_KNOBS], /* parameters (uses defaults if NULL) */
+ Int stats [COLAMD_STATS], /* output statistics and error codes */
+ void * (*allocate) (size_t, size_t),
+ /* pointer to calloc (ANSI C) or */
+ /* mxCalloc (for MATLAB mexFunction) */
+ void (*release) (void *)
+ /* pointer to free (ANSI C) or */
+ /* mxFree (for MATLAB mexFunction) */
+)
+{
+ /* === Local variables ================================================== */
+
+ Int *count ; /* length of each column of M, and col pointer*/
+ Int *mark ; /* mark array for finding duplicate entries */
+ Int *M ; /* row indices of matrix M */
+ size_t Mlen ; /* length of M */
+ Int n_row ; /* number of rows in M */
+ Int nnz ; /* number of entries in A */
+ Int i ; /* row index of A */
+ Int j ; /* column index of A */
+ Int k ; /* row index of M */
+ Int mnz ; /* number of nonzeros in M */
+ Int pp ; /* index into a column of A */
+ Int last_row ; /* last row seen in the current column */
+ Int length ; /* number of nonzeros in a column */
+
+ double cknobs [COLAMD_KNOBS] ; /* knobs for colamd */
+ double default_knobs [COLAMD_KNOBS] ; /* default knobs for colamd */
+
+#ifndef NDEBUG
+ colamd_get_debug ("symamd") ;
+#endif /* NDEBUG */
+
+ /* === Check the input arguments ======================================== */
+
+ if (!stats)
+ {
+ DEBUG0 (("symamd: stats not present\n")) ;
+ return (FALSE) ;
+ }
+ for (i = 0 ; i < COLAMD_STATS ; i++)
+ {
+ stats [i] = 0 ;
+ }
+ stats [COLAMD_STATUS] = COLAMD_OK ;
+ stats [COLAMD_INFO1] = -1 ;
+ stats [COLAMD_INFO2] = -1 ;
+
+ if (!A)
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ;
+ DEBUG0 (("symamd: A not present\n")) ;
+ return (FALSE) ;
+ }
+
+ if (!p) /* p is not present */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ;
+ DEBUG0 (("symamd: p not present\n")) ;
+ return (FALSE) ;
+ }
+
+ if (n < 0) /* n must be >= 0 */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ;
+ stats [COLAMD_INFO1] = n ;
+ DEBUG0 (("symamd: n negative %d\n", n)) ;
+ return (FALSE) ;
+ }
+
+ nnz = p [n] ;
+ if (nnz < 0) /* nnz must be >= 0 */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ;
+ stats [COLAMD_INFO1] = nnz ;
+ DEBUG0 (("symamd: number of entries negative %d\n", nnz)) ;
+ return (FALSE) ;
+ }
+
+ if (p [0] != 0)
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ;
+ stats [COLAMD_INFO1] = p [0] ;
+ DEBUG0 (("symamd: p[0] not zero %d\n", p [0])) ;
+ return (FALSE) ;
+ }
+
+ /* === If no knobs, set default knobs =================================== */
+
+ if (!knobs)
+ {
+ COLAMD_set_defaults (default_knobs) ;
+ knobs = default_knobs ;
+ }
+
+ /* === Allocate count and mark ========================================== */
+
+ count = (Int *) ((*allocate) (n+1, sizeof (Int))) ;
+ if (!count)
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_out_of_memory ;
+ DEBUG0 (("symamd: allocate count (size %d) failed\n", n+1)) ;
+ return (FALSE) ;
+ }
+
+ mark = (Int *) ((*allocate) (n+1, sizeof (Int))) ;
+ if (!mark)
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_out_of_memory ;
+ (*release) ((void *) count) ;
+ DEBUG0 (("symamd: allocate mark (size %d) failed\n", n+1)) ;
+ return (FALSE) ;
+ }
+
+ /* === Compute column counts of M, check if A is valid ================== */
+
+ stats [COLAMD_INFO3] = 0 ; /* number of duplicate or unsorted row indices*/
+
+ for (i = 0 ; i < n ; i++)
+ {
+ mark [i] = -1 ;
+ }
+
+ for (j = 0 ; j < n ; j++)
+ {
+ last_row = -1 ;
+
+ length = p [j+1] - p [j] ;
+ if (length < 0)
+ {
+ /* column pointers must be non-decreasing */
+ stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;
+ stats [COLAMD_INFO1] = j ;
+ stats [COLAMD_INFO2] = length ;
+ (*release) ((void *) count) ;
+ (*release) ((void *) mark) ;
+ DEBUG0 (("symamd: col %d negative length %d\n", j, length)) ;
+ return (FALSE) ;
+ }
+
+ for (pp = p [j] ; pp < p [j+1] ; pp++)
+ {
+ i = A [pp] ;
+ if (i < 0 || i >= n)
+ {
+ /* row index i, in column j, is out of bounds */
+ stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ;
+ stats [COLAMD_INFO1] = j ;
+ stats [COLAMD_INFO2] = i ;
+ stats [COLAMD_INFO3] = n ;
+ (*release) ((void *) count) ;
+ (*release) ((void *) mark) ;
+ DEBUG0 (("symamd: row %d col %d out of bounds\n", i, j)) ;
+ return (FALSE) ;
+ }
+
+ if (i <= last_row || mark [i] == j)
+ {
+ /* row index is unsorted or repeated (or both), thus col */
+ /* is jumbled. This is a notice, not an error condition. */
+ stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ;
+ stats [COLAMD_INFO1] = j ;
+ stats [COLAMD_INFO2] = i ;
+ (stats [COLAMD_INFO3]) ++ ;
+ DEBUG1 (("symamd: row %d col %d unsorted/duplicate\n", i, j)) ;
+ }
+
+ if (i > j && mark [i] != j)
+ {
+ /* row k of M will contain column indices i and j */
+ count [i]++ ;
+ count [j]++ ;
+ }
+
+ /* mark the row as having been seen in this column */
+ mark [i] = j ;
+
+ last_row = i ;
+ }
+ }
+
+ /* v2.4: removed free(mark) */
+
+ /* === Compute column pointers of M ===================================== */
+
+ /* use output permutation, perm, for column pointers of M */
+ perm [0] = 0 ;
+ for (j = 1 ; j <= n ; j++)
+ {
+ perm [j] = perm [j-1] + count [j-1] ;
+ }
+ for (j = 0 ; j < n ; j++)
+ {
+ count [j] = perm [j] ;
+ }
+
+ /* === Construct M ====================================================== */
+
+ mnz = perm [n] ;
+ n_row = mnz / 2 ;
+ Mlen = COLAMD_recommended (mnz, n_row, n) ;
+ M = (Int *) ((*allocate) (Mlen, sizeof (Int))) ;
+ DEBUG0 (("symamd: M is %d-by-%d with %d entries, Mlen = %g\n",
+ n_row, n, mnz, (double) Mlen)) ;
+
+ if (!M)
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_out_of_memory ;
+ (*release) ((void *) count) ;
+ (*release) ((void *) mark) ;
+ DEBUG0 (("symamd: allocate M (size %g) failed\n", (double) Mlen)) ;
+ return (FALSE) ;
+ }
+
+ k = 0 ;
+
+ if (stats [COLAMD_STATUS] == COLAMD_OK)
+ {
+ /* Matrix is OK */
+ for (j = 0 ; j < n ; j++)
+ {
+ ASSERT (p [j+1] - p [j] >= 0) ;
+ for (pp = p [j] ; pp < p [j+1] ; pp++)
+ {
+ i = A [pp] ;
+ ASSERT (i >= 0 && i < n) ;
+ if (i > j)
+ {
+ /* row k of M contains column indices i and j */
+ M [count [i]++] = k ;
+ M [count [j]++] = k ;
+ k++ ;
+ }
+ }
+ }
+ }
+ else
+ {
+ /* Matrix is jumbled. Do not add duplicates to M. Unsorted cols OK. */
+ DEBUG0 (("symamd: Duplicates in A.\n")) ;
+ for (i = 0 ; i < n ; i++)
+ {
+ mark [i] = -1 ;
+ }
+ for (j = 0 ; j < n ; j++)
+ {
+ ASSERT (p [j+1] - p [j] >= 0) ;
+ for (pp = p [j] ; pp < p [j+1] ; pp++)
+ {
+ i = A [pp] ;
+ ASSERT (i >= 0 && i < n) ;
+ if (i > j && mark [i] != j)
+ {
+ /* row k of M contains column indices i and j */
+ M [count [i]++] = k ;
+ M [count [j]++] = k ;
+ k++ ;
+ mark [i] = j ;
+ }
+ }
+ }
+ /* v2.4: free(mark) moved below */
+ }
+
+ /* count and mark no longer needed */
+ (*release) ((void *) count) ;
+ (*release) ((void *) mark) ; /* v2.4: free (mark) moved here */
+ ASSERT (k == n_row) ;
+
+ /* === Adjust the knobs for M =========================================== */
+
+ for (i = 0 ; i < COLAMD_KNOBS ; i++)
+ {
+ cknobs [i] = knobs [i] ;
+ }
+
+ /* there are no dense rows in M */
+ cknobs [COLAMD_DENSE_ROW] = -1 ;
+ cknobs [COLAMD_DENSE_COL] = knobs [COLAMD_DENSE_ROW] ;
+
+ /* === Order the columns of M =========================================== */
+
+ /* v2.4: colamd cannot fail here, so the error check is removed */
+ (void) COLAMD_MAIN (n_row, n, (Int) Mlen, M, perm, cknobs, stats) ;
+
+ /* Note that the output permutation is now in perm */
+
+ /* === get the statistics for symamd from colamd ======================== */
+
+ /* a dense column in colamd means a dense row and col in symamd */
+ stats [COLAMD_DENSE_ROW] = stats [COLAMD_DENSE_COL] ;
+
+ /* === Free M =========================================================== */
+
+ (*release) ((void *) M) ;
+ DEBUG0 (("symamd: done.\n")) ;
+ return (TRUE) ;
+
+}
+
+/* ========================================================================== */
+/* === colamd =============================================================== */
+/* ========================================================================== */
+
+/*
+ The colamd routine computes a column ordering Q of a sparse matrix
+ A such that the LU factorization P(AQ) = LU remains sparse, where P is
+ selected via partial pivoting. The routine can also be viewed as
+ providing a permutation Q such that the Cholesky factorization
+ (AQ)'(AQ) = LL' remains sparse.
+*/
+
+PUBLIC Int COLAMD_MAIN /* returns TRUE if successful, FALSE otherwise*/
+(
+ /* === Parameters ======================================================= */
+
+ Int n_row, /* number of rows in A */
+ Int n_col, /* number of columns in A */
+ Int Alen, /* length of A */
+ Int A [], /* row indices of A */
+ Int p [], /* pointers to columns in A */
+ double knobs [COLAMD_KNOBS],/* parameters (uses defaults if NULL) */
+ Int stats [COLAMD_STATS] /* output statistics and error codes */
+)
+{
+ /* === Local variables ================================================== */
+
+ Int i ; /* loop index */
+ Int nnz ; /* nonzeros in A */
+ size_t Row_size ; /* size of Row [], in integers */
+ size_t Col_size ; /* size of Col [], in integers */
+ size_t need ; /* minimum required length of A */
+ Colamd_Row *Row ; /* pointer into A of Row [0..n_row] array */
+ Colamd_Col *Col ; /* pointer into A of Col [0..n_col] array */
+ Int n_col2 ; /* number of non-dense, non-empty columns */
+ Int n_row2 ; /* number of non-dense, non-empty rows */
+ Int ngarbage ; /* number of garbage collections performed */
+ Int max_deg ; /* maximum row degree */
+ double default_knobs [COLAMD_KNOBS] ; /* default knobs array */
+ Int aggressive ; /* do aggressive absorption */
+ int ok ;
+
+#ifndef NDEBUG
+ colamd_get_debug ("colamd") ;
+#endif /* NDEBUG */
+
+ /* === Check the input arguments ======================================== */
+
+ if (!stats)
+ {
+ DEBUG0 (("colamd: stats not present\n")) ;
+ return (FALSE) ;
+ }
+ for (i = 0 ; i < COLAMD_STATS ; i++)
+ {
+ stats [i] = 0 ;
+ }
+ stats [COLAMD_STATUS] = COLAMD_OK ;
+ stats [COLAMD_INFO1] = -1 ;
+ stats [COLAMD_INFO2] = -1 ;
+
+ if (!A) /* A is not present */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ;
+ DEBUG0 (("colamd: A not present\n")) ;
+ return (FALSE) ;
+ }
+
+ if (!p) /* p is not present */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ;
+ DEBUG0 (("colamd: p not present\n")) ;
+ return (FALSE) ;
+ }
+
+ if (n_row < 0) /* n_row must be >= 0 */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_nrow_negative ;
+ stats [COLAMD_INFO1] = n_row ;
+ DEBUG0 (("colamd: nrow negative %d\n", n_row)) ;
+ return (FALSE) ;
+ }
+
+ if (n_col < 0) /* n_col must be >= 0 */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ;
+ stats [COLAMD_INFO1] = n_col ;
+ DEBUG0 (("colamd: ncol negative %d\n", n_col)) ;
+ return (FALSE) ;
+ }
+
+ nnz = p [n_col] ;
+ if (nnz < 0) /* nnz must be >= 0 */
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ;
+ stats [COLAMD_INFO1] = nnz ;
+ DEBUG0 (("colamd: number of entries negative %d\n", nnz)) ;
+ return (FALSE) ;
+ }
+
+ if (p [0] != 0)
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ;
+ stats [COLAMD_INFO1] = p [0] ;
+ DEBUG0 (("colamd: p[0] not zero %d\n", p [0])) ;
+ return (FALSE) ;
+ }
+
+ /* === If no knobs, set default knobs =================================== */
+
+ if (!knobs)
+ {
+ COLAMD_set_defaults (default_knobs) ;
+ knobs = default_knobs ;
+ }
+
+ aggressive = (knobs [COLAMD_AGGRESSIVE] != FALSE) ;
+
+ /* === Allocate the Row and Col arrays from array A ===================== */
+
+ ok = TRUE ;
+ Col_size = COLAMD_C (n_col, &ok) ; /* size of Col array of structs */
+ Row_size = COLAMD_R (n_row, &ok) ; /* size of Row array of structs */
+
+ /* need = 2*nnz + n_col + Col_size + Row_size ; */
+ need = t_mult (nnz, 2, &ok) ;
+ need = t_add (need, n_col, &ok) ;
+ need = t_add (need, Col_size, &ok) ;
+ need = t_add (need, Row_size, &ok) ;
+
+ if (!ok || need > (size_t) Alen || need > Int_MAX)
+ {
+ /* not enough space in array A to perform the ordering */
+ stats [COLAMD_STATUS] = COLAMD_ERROR_A_too_small ;
+ stats [COLAMD_INFO1] = need ;
+ stats [COLAMD_INFO2] = Alen ;
+ DEBUG0 (("colamd: Need Alen >= %d, given only Alen = %d\n", need,Alen));
+ return (FALSE) ;
+ }
+
+ Alen -= Col_size + Row_size ;
+ Col = (Colamd_Col *) &A [Alen] ;
+ Row = (Colamd_Row *) &A [Alen + Col_size] ;
+
+ /* === Construct the row and column data structures ===================== */
+
+ if (!init_rows_cols (n_row, n_col, Row, Col, A, p, stats))
+ {
+ /* input matrix is invalid */
+ DEBUG0 (("colamd: Matrix invalid\n")) ;
+ return (FALSE) ;
+ }
+
+ /* === Initialize scores, kill dense rows/columns ======================= */
+
+ init_scoring (n_row, n_col, Row, Col, A, p, knobs,
+ &n_row2, &n_col2, &max_deg) ;
+
+ /* === Order the supercolumns =========================================== */
+
+ ngarbage = find_ordering (n_row, n_col, Alen, Row, Col, A, p,
+ n_col2, max_deg, 2*nnz, aggressive) ;
+
+ /* === Order the non-principal columns ================================== */
+
+ order_children (n_col, Col, p) ;
+
+ /* === Return statistics in stats ======================================= */
+
+ stats [COLAMD_DENSE_ROW] = n_row - n_row2 ;
+ stats [COLAMD_DENSE_COL] = n_col - n_col2 ;
+ stats [COLAMD_DEFRAG_COUNT] = ngarbage ;
+ DEBUG0 (("colamd: done.\n")) ;
+ return (TRUE) ;
+}
+
+
+/* ========================================================================== */
+/* === colamd_report ======================================================== */
+/* ========================================================================== */
+
+PUBLIC void COLAMD_report
+(
+ Int stats [COLAMD_STATS]
+)
+{
+ print_report ("colamd", stats) ;
+}
+
+
+/* ========================================================================== */
+/* === symamd_report ======================================================== */
+/* ========================================================================== */
+
+PUBLIC void SYMAMD_report
+(
+ Int stats [COLAMD_STATS]
+)
+{
+ print_report ("symamd", stats) ;
+}
+
+
+
+/* ========================================================================== */
+/* === NON-USER-CALLABLE ROUTINES: ========================================== */
+/* ========================================================================== */
+
+/* There are no user-callable routines beyond this point in the file */
+
+
+/* ========================================================================== */
+/* === init_rows_cols ======================================================= */
+/* ========================================================================== */
+
+/*
+ Takes the column form of the matrix in A and creates the row form of the
+ matrix. Also, row and column attributes are stored in the Col and Row
+ structs. If the columns are un-sorted or contain duplicate row indices,
+ this routine will also sort and remove duplicate row indices from the
+ column form of the matrix. Returns FALSE if the matrix is invalid,
+ TRUE otherwise. Not user-callable.
+*/
+
+PRIVATE Int init_rows_cols /* returns TRUE if OK, or FALSE otherwise */
+(
+ /* === Parameters ======================================================= */
+
+ Int n_row, /* number of rows of A */
+ Int n_col, /* number of columns of A */
+ Colamd_Row Row [], /* of size n_row+1 */
+ Colamd_Col Col [], /* of size n_col+1 */
+ Int A [], /* row indices of A, of size Alen */
+ Int p [], /* pointers to columns in A, of size n_col+1 */
+ Int stats [COLAMD_STATS] /* colamd statistics */
+)
+{
+ /* === Local variables ================================================== */
+
+ Int col ; /* a column index */
+ Int row ; /* a row index */
+ Int *cp ; /* a column pointer */
+ Int *cp_end ; /* a pointer to the end of a column */
+ Int *rp ; /* a row pointer */
+ Int *rp_end ; /* a pointer to the end of a row */
+ Int last_row ; /* previous row */
+
+ /* === Initialize columns, and check column pointers ==================== */
+
+ for (col = 0 ; col < n_col ; col++)
+ {
+ Col [col].start = p [col] ;
+ Col [col].length = p [col+1] - p [col] ;
+
+ if (Col [col].length < 0)
+ {
+ /* column pointers must be non-decreasing */
+ stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;
+ stats [COLAMD_INFO1] = col ;
+ stats [COLAMD_INFO2] = Col [col].length ;
+ DEBUG0 (("colamd: col %d length %d < 0\n", col, Col [col].length)) ;
+ return (FALSE) ;
+ }
+
+ Col [col].shared1.thickness = 1 ;
+ Col [col].shared2.score = 0 ;
+ Col [col].shared3.prev = EMPTY ;
+ Col [col].shared4.degree_next = EMPTY ;
+ }
+
+ /* p [0..n_col] no longer needed, used as "head" in subsequent routines */
+
+ /* === Scan columns, compute row degrees, and check row indices ========= */
+
+ stats [COLAMD_INFO3] = 0 ; /* number of duplicate or unsorted row indices*/
+
+ for (row = 0 ; row < n_row ; row++)
+ {
+ Row [row].length = 0 ;
+ Row [row].shared2.mark = -1 ;
+ }
+
+ for (col = 0 ; col < n_col ; col++)
+ {
+ last_row = -1 ;
+
+ cp = &A [p [col]] ;
+ cp_end = &A [p [col+1]] ;
+
+ while (cp < cp_end)
+ {
+ row = *cp++ ;
+
+ /* make sure row indices within range */
+ if (row < 0 || row >= n_row)
+ {
+ stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ;
+ stats [COLAMD_INFO1] = col ;
+ stats [COLAMD_INFO2] = row ;
+ stats [COLAMD_INFO3] = n_row ;
+ DEBUG0 (("colamd: row %d col %d out of bounds\n", row, col)) ;
+ return (FALSE) ;
+ }
+
+ if (row <= last_row || Row [row].shared2.mark == col)
+ {
+ /* row index are unsorted or repeated (or both), thus col */
+ /* is jumbled. This is a notice, not an error condition. */
+ stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ;
+ stats [COLAMD_INFO1] = col ;
+ stats [COLAMD_INFO2] = row ;
+ (stats [COLAMD_INFO3]) ++ ;
+ DEBUG1 (("colamd: row %d col %d unsorted/duplicate\n",row,col));
+ }
+
+ if (Row [row].shared2.mark != col)
+ {
+ Row [row].length++ ;
+ }
+ else
+ {
+ /* this is a repeated entry in the column, */
+ /* it will be removed */
+ Col [col].length-- ;
+ }
+
+ /* mark the row as having been seen in this column */
+ Row [row].shared2.mark = col ;
+
+ last_row = row ;
+ }
+ }
+
+ /* === Compute row pointers ============================================= */
+
+ /* row form of the matrix starts directly after the column */
+ /* form of matrix in A */
+ Row [0].start = p [n_col] ;
+ Row [0].shared1.p = Row [0].start ;
+ Row [0].shared2.mark = -1 ;
+ for (row = 1 ; row < n_row ; row++)
+ {
+ Row [row].start = Row [row-1].start + Row [row-1].length ;
+ Row [row].shared1.p = Row [row].start ;
+ Row [row].shared2.mark = -1 ;
+ }
+
+ /* === Create row form ================================================== */
+
+ if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
+ {
+ /* if cols jumbled, watch for repeated row indices */
+ for (col = 0 ; col < n_col ; col++)
+ {
+ cp = &A [p [col]] ;
+ cp_end = &A [p [col+1]] ;
+ while (cp < cp_end)
+ {
+ row = *cp++ ;
+ if (Row [row].shared2.mark != col)
+ {
+ A [(Row [row].shared1.p)++] = col ;
+ Row [row].shared2.mark = col ;
+ }
+ }
+ }
+ }
+ else
+ {
+ /* if cols not jumbled, we don't need the mark (this is faster) */
+ for (col = 0 ; col < n_col ; col++)
+ {
+ cp = &A [p [col]] ;
+ cp_end = &A [p [col+1]] ;
+ while (cp < cp_end)
+ {
+ A [(Row [*cp++].shared1.p)++] = col ;
+ }
+ }
+ }
+
+ /* === Clear the row marks and set row degrees ========================== */
+
+ for (row = 0 ; row < n_row ; row++)
+ {
+ Row [row].shared2.mark = 0 ;
+ Row [row].shared1.degree = Row [row].length ;
+ }
+
+ /* === See if we need to re-create columns ============================== */
+
+ if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
+ {
+ DEBUG0 (("colamd: reconstructing column form, matrix jumbled\n")) ;
+
+#ifndef NDEBUG
+ /* make sure column lengths are correct */
+ for (col = 0 ; col < n_col ; col++)
+ {
+ p [col] = Col [col].length ;
+ }
+ for (row = 0 ; row < n_row ; row++)
+ {
+ rp = &A [Row [row].start] ;
+ rp_end = rp + Row [row].length ;
+ while (rp < rp_end)
+ {
+ p [*rp++]-- ;
+ }
+ }
+ for (col = 0 ; col < n_col ; col++)
+ {
+ ASSERT (p [col] == 0) ;
+ }
+ /* now p is all zero (different than when debugging is turned off) */
+#endif /* NDEBUG */
+
+ /* === Compute col pointers ========================================= */
+
+ /* col form of the matrix starts at A [0]. */
+ /* Note, we may have a gap between the col form and the row */
+ /* form if there were duplicate entries, if so, it will be */
+ /* removed upon the first garbage collection */
+ Col [0].start = 0 ;
+ p [0] = Col [0].start ;
+ for (col = 1 ; col < n_col ; col++)
+ {
+ /* note that the lengths here are for pruned columns, i.e. */
+ /* no duplicate row indices will exist for these columns */
+ Col [col].start = Col [col-1].start + Col [col-1].length ;
+ p [col] = Col [col].start ;
+ }
+
+ /* === Re-create col form =========================================== */
+
+ for (row = 0 ; row < n_row ; row++)
+ {
+ rp = &A [Row [row].start] ;
+ rp_end = rp + Row [row].length ;
+ while (rp < rp_end)
+ {
+ A [(p [*rp++])++] = row ;
+ }
+ }
+ }
+
+ /* === Done. Matrix is not (or no longer) jumbled ====================== */
+
+ return (TRUE) ;
+}
+
+
+/* ========================================================================== */
+/* === init_scoring ========================================================= */
+/* ========================================================================== */
+
+/*
+ Kills dense or empty columns and rows, calculates an initial score for
+ each column, and places all columns in the degree lists. Not user-callable.
+*/
+
+PRIVATE void init_scoring
+(
+ /* === Parameters ======================================================= */
+
+ Int n_row, /* number of rows of A */
+ Int n_col, /* number of columns of A */
+ Colamd_Row Row [], /* of size n_row+1 */
+ Colamd_Col Col [], /* of size n_col+1 */
+ Int A [], /* column form and row form of A */
+ Int head [], /* of size n_col+1 */
+ double knobs [COLAMD_KNOBS],/* parameters */
+ Int *p_n_row2, /* number of non-dense, non-empty rows */
+ Int *p_n_col2, /* number of non-dense, non-empty columns */
+ Int *p_max_deg /* maximum row degree */
+)
+{
+ /* === Local variables ================================================== */
+
+ Int c ; /* a column index */
+ Int r, row ; /* a row index */
+ Int *cp ; /* a column pointer */
+ Int deg ; /* degree of a row or column */
+ Int *cp_end ; /* a pointer to the end of a column */
+ Int *new_cp ; /* new column pointer */
+ Int col_length ; /* length of pruned column */
+ Int score ; /* current column score */
+ Int n_col2 ; /* number of non-dense, non-empty columns */
+ Int n_row2 ; /* number of non-dense, non-empty rows */
+ Int dense_row_count ; /* remove rows with more entries than this */
+ Int dense_col_count ; /* remove cols with more entries than this */
+ Int min_score ; /* smallest column score */
+ Int max_deg ; /* maximum row degree */
+ Int next_col ; /* Used to add to degree list.*/
+
+#ifndef NDEBUG
+ Int debug_count ; /* debug only. */
+#endif /* NDEBUG */
+
+ /* === Extract knobs ==================================================== */
+
+ /* Note: if knobs contains a NaN, this is undefined: */
+ if (knobs [COLAMD_DENSE_ROW] < 0)
+ {
+ /* only remove completely dense rows */
+ dense_row_count = n_col-1 ;
+ }
+ else
+ {
+ dense_row_count = DENSE_DEGREE (knobs [COLAMD_DENSE_ROW], n_col) ;
+ }
+ if (knobs [COLAMD_DENSE_COL] < 0)
+ {
+ /* only remove completely dense columns */
+ dense_col_count = n_row-1 ;
+ }
+ else
+ {
+ dense_col_count =
+ DENSE_DEGREE (knobs [COLAMD_DENSE_COL], MIN (n_row, n_col)) ;
+ }
+
+ DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ;
+ max_deg = 0 ;
+ n_col2 = n_col ;
+ n_row2 = n_row ;
+
+ /* === Kill empty columns =============================================== */
+
+ /* Put the empty columns at the end in their natural order, so that LU */
+ /* factorization can proceed as far as possible. */
+ for (c = n_col-1 ; c >= 0 ; c--)
+ {
+ deg = Col [c].length ;
+ if (deg == 0)
+ {
+ /* this is a empty column, kill and order it last */
+ Col [c].shared2.order = --n_col2 ;
+ KILL_PRINCIPAL_COL (c) ;
+ }
+ }
+ DEBUG1 (("colamd: null columns killed: %d\n", n_col - n_col2)) ;
+
+ /* === Kill dense columns =============================================== */
+
+ /* Put the dense columns at the end, in their natural order */
+ for (c = n_col-1 ; c >= 0 ; c--)
+ {
+ /* skip any dead columns */
+ if (COL_IS_DEAD (c))
+ {
+ continue ;
+ }
+ deg = Col [c].length ;
+ if (deg > dense_col_count)
+ {
+ /* this is a dense column, kill and order it last */
+ Col [c].shared2.order = --n_col2 ;
+ /* decrement the row degrees */
+ cp = &A [Col [c].start] ;
+ cp_end = cp + Col [c].length ;
+ while (cp < cp_end)
+ {
+ Row [*cp++].shared1.degree-- ;
+ }
+ KILL_PRINCIPAL_COL (c) ;
+ }
+ }
+ DEBUG1 (("colamd: Dense and null columns killed: %d\n", n_col - n_col2)) ;
+
+ /* === Kill dense and empty rows ======================================== */
+
+ for (r = 0 ; r < n_row ; r++)
+ {
+ deg = Row [r].shared1.degree ;
+ ASSERT (deg >= 0 && deg <= n_col) ;
+ if (deg > dense_row_count || deg == 0)
+ {
+ /* kill a dense or empty row */
+ KILL_ROW (r) ;
+ --n_row2 ;
+ }
+ else
+ {
+ /* keep track of max degree of remaining rows */
+ max_deg = MAX (max_deg, deg) ;
+ }
+ }
+ DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ;
+
+ /* === Compute initial column scores ==================================== */
+
+ /* At this point the row degrees are accurate. They reflect the number */
+ /* of "live" (non-dense) columns in each row. No empty rows exist. */
+ /* Some "live" columns may contain only dead rows, however. These are */
+ /* pruned in the code below. */
+
+ /* now find the initial matlab score for each column */
+ for (c = n_col-1 ; c >= 0 ; c--)
+ {
+ /* skip dead column */
+ if (COL_IS_DEAD (c))
+ {
+ continue ;
+ }
+ score = 0 ;
+ cp = &A [Col [c].start] ;
+ new_cp = cp ;
+ cp_end = cp + Col [c].length ;
+ while (cp < cp_end)
+ {
+ /* get a row */
+ row = *cp++ ;
+ /* skip if dead */
+ if (ROW_IS_DEAD (row))
+ {
+ continue ;
+ }
+ /* compact the column */
+ *new_cp++ = row ;
+ /* add row's external degree */
+ score += Row [row].shared1.degree - 1 ;
+ /* guard against integer overflow */
+ score = MIN (score, n_col) ;
+ }
+ /* determine pruned column length */
+ col_length = (Int) (new_cp - &A [Col [c].start]) ;
+ if (col_length == 0)
+ {
+ /* a newly-made null column (all rows in this col are "dense" */
+ /* and have already been killed) */
+ DEBUG2 (("Newly null killed: %d\n", c)) ;
+ Col [c].shared2.order = --n_col2 ;
+ KILL_PRINCIPAL_COL (c) ;
+ }
+ else
+ {
+ /* set column length and set score */
+ ASSERT (score >= 0) ;
+ ASSERT (score <= n_col) ;
+ Col [c].length = col_length ;
+ Col [c].shared2.score = score ;
+ }
+ }
+ DEBUG1 (("colamd: Dense, null, and newly-null columns killed: %d\n",
+ n_col-n_col2)) ;
+
+ /* At this point, all empty rows and columns are dead. All live columns */
+ /* are "clean" (containing no dead rows) and simplicial (no supercolumns */
+ /* yet). Rows may contain dead columns, but all live rows contain at */
+ /* least one live column. */
+
+#ifndef NDEBUG
+ debug_structures (n_row, n_col, Row, Col, A, n_col2) ;
+#endif /* NDEBUG */
+
+ /* === Initialize degree lists ========================================== */
+
+#ifndef NDEBUG
+ debug_count = 0 ;
+#endif /* NDEBUG */
+
+ /* clear the hash buckets */
+ for (c = 0 ; c <= n_col ; c++)
+ {
+ head [c] = EMPTY ;
+ }
+ min_score = n_col ;
+ /* place in reverse order, so low column indices are at the front */
+ /* of the lists. This is to encourage natural tie-breaking */
+ for (c = n_col-1 ; c >= 0 ; c--)
+ {
+ /* only add principal columns to degree lists */
+ if (COL_IS_ALIVE (c))
+ {
+ DEBUG4 (("place %d score %d minscore %d ncol %d\n",
+ c, Col [c].shared2.score, min_score, n_col)) ;
+
+ /* === Add columns score to DList =============================== */
+
+ score = Col [c].shared2.score ;
+
+ ASSERT (min_score >= 0) ;
+ ASSERT (min_score <= n_col) ;
+ ASSERT (score >= 0) ;
+ ASSERT (score <= n_col) ;
+ ASSERT (head [score] >= EMPTY) ;
+
+ /* now add this column to dList at proper score location */
+ next_col = head [score] ;
+ Col [c].shared3.prev = EMPTY ;
+ Col [c].shared4.degree_next = next_col ;
+
+ /* if there already was a column with the same score, set its */
+ /* previous pointer to this new column */
+ if (next_col != EMPTY)
+ {
+ Col [next_col].shared3.prev = c ;
+ }
+ head [score] = c ;
+
+ /* see if this score is less than current min */
+ min_score = MIN (min_score, score) ;
+
+#ifndef NDEBUG
+ debug_count++ ;
+#endif /* NDEBUG */
+
+ }
+ }
+
+#ifndef NDEBUG
+ DEBUG1 (("colamd: Live cols %d out of %d, non-princ: %d\n",
+ debug_count, n_col, n_col-debug_count)) ;
+ ASSERT (debug_count == n_col2) ;
+ debug_deg_lists (n_row, n_col, Row, Col, head, min_score, n_col2, max_deg) ;
+#endif /* NDEBUG */
+
+ /* === Return number of remaining columns, and max row degree =========== */
+
+ *p_n_col2 = n_col2 ;
+ *p_n_row2 = n_row2 ;
+ *p_max_deg = max_deg ;
+}
+
+
+/* ========================================================================== */
+/* === find_ordering ======================================================== */
+/* ========================================================================== */
+
+/*
+ Order the principal columns of the supercolumn form of the matrix
+ (no supercolumns on input). Uses a minimum approximate column minimum
+ degree ordering method. Not user-callable.
+*/
+
+PRIVATE Int find_ordering /* return the number of garbage collections */
+(
+ /* === Parameters ======================================================= */
+
+ Int n_row, /* number of rows of A */
+ Int n_col, /* number of columns of A */
+ Int Alen, /* size of A, 2*nnz + n_col or larger */
+ Colamd_Row Row [], /* of size n_row+1 */
+ Colamd_Col Col [], /* of size n_col+1 */
+ Int A [], /* column form and row form of A */
+ Int head [], /* of size n_col+1 */
+ Int n_col2, /* Remaining columns to order */
+ Int max_deg, /* Maximum row degree */
+ Int pfree, /* index of first free slot (2*nnz on entry) */
+ Int aggressive
+)
+{
+ /* === Local variables ================================================== */
+
+ Int k ; /* current pivot ordering step */
+ Int pivot_col ; /* current pivot column */
+ Int *cp ; /* a column pointer */
+ Int *rp ; /* a row pointer */
+ Int pivot_row ; /* current pivot row */
+ Int *new_cp ; /* modified column pointer */
+ Int *new_rp ; /* modified row pointer */
+ Int pivot_row_start ; /* pointer to start of pivot row */
+ Int pivot_row_degree ; /* number of columns in pivot row */
+ Int pivot_row_length ; /* number of supercolumns in pivot row */
+ Int pivot_col_score ; /* score of pivot column */
+ Int needed_memory ; /* free space needed for pivot row */
+ Int *cp_end ; /* pointer to the end of a column */
+ Int *rp_end ; /* pointer to the end of a row */
+ Int row ; /* a row index */
+ Int col ; /* a column index */
+ Int max_score ; /* maximum possible score */
+ Int cur_score ; /* score of current column */
+ unsigned Int hash ; /* hash value for supernode detection */
+ Int head_column ; /* head of hash bucket */
+ Int first_col ; /* first column in hash bucket */
+ Int tag_mark ; /* marker value for mark array */
+ Int row_mark ; /* Row [row].shared2.mark */
+ Int set_difference ; /* set difference size of row with pivot row */
+ Int min_score ; /* smallest column score */
+ Int col_thickness ; /* "thickness" (no. of columns in a supercol) */
+ Int max_mark ; /* maximum value of tag_mark */
+ Int pivot_col_thickness ; /* number of columns represented by pivot col */
+ Int prev_col ; /* Used by Dlist operations. */
+ Int next_col ; /* Used by Dlist operations. */
+ Int ngarbage ; /* number of garbage collections performed */
+
+#ifndef NDEBUG
+ Int debug_d ; /* debug loop counter */
+ Int debug_step = 0 ; /* debug loop counter */
+#endif /* NDEBUG */
+
+ /* === Initialization and clear mark ==================================== */
+
+ max_mark = INT_MAX - n_col ; /* INT_MAX defined in <limits.h> */
+ tag_mark = clear_mark (0, max_mark, n_row, Row) ;
+ min_score = 0 ;
+ ngarbage = 0 ;
+ DEBUG1 (("colamd: Ordering, n_col2=%d\n", n_col2)) ;
+
+ /* === Order the columns ================================================ */
+
+ for (k = 0 ; k < n_col2 ; /* 'k' is incremented below */)
+ {
+
+#ifndef NDEBUG
+ if (debug_step % 100 == 0)
+ {
+ DEBUG2 (("\n... Step k: %d out of n_col2: %d\n", k, n_col2)) ;
+ }
+ else
+ {
+ DEBUG3 (("\n----------Step k: %d out of n_col2: %d\n", k, n_col2)) ;
+ }
+ debug_step++ ;
+ debug_deg_lists (n_row, n_col, Row, Col, head,
+ min_score, n_col2-k, max_deg) ;
+ debug_matrix (n_row, n_col, Row, Col, A) ;
+#endif /* NDEBUG */
+
+ /* === Select pivot column, and order it ============================ */
+
+ /* make sure degree list isn't empty */
+ ASSERT (min_score >= 0) ;
+ ASSERT (min_score <= n_col) ;
+ ASSERT (head [min_score] >= EMPTY) ;
+
+#ifndef NDEBUG
+ for (debug_d = 0 ; debug_d < min_score ; debug_d++)
+ {
+ ASSERT (head [debug_d] == EMPTY) ;
+ }
+#endif /* NDEBUG */
+
+ /* get pivot column from head of minimum degree list */
+ while (head [min_score] == EMPTY && min_score < n_col)
+ {
+ min_score++ ;
+ }
+ pivot_col = head [min_score] ;
+ ASSERT (pivot_col >= 0 && pivot_col <= n_col) ;
+ next_col = Col [pivot_col].shared4.degree_next ;
+ head [min_score] = next_col ;
+ if (next_col != EMPTY)
+ {
+ Col [next_col].shared3.prev = EMPTY ;
+ }
+
+ ASSERT (COL_IS_ALIVE (pivot_col)) ;
+
+ /* remember score for defrag check */
+ pivot_col_score = Col [pivot_col].shared2.score ;
+
+ /* the pivot column is the kth column in the pivot order */
+ Col [pivot_col].shared2.order = k ;
+
+ /* increment order count by column thickness */
+ pivot_col_thickness = Col [pivot_col].shared1.thickness ;
+ k += pivot_col_thickness ;
+ ASSERT (pivot_col_thickness > 0) ;
+ DEBUG3 (("Pivot col: %d thick %d\n", pivot_col, pivot_col_thickness)) ;
+
+ /* === Garbage_collection, if necessary ============================= */
+
+ needed_memory = MIN (pivot_col_score, n_col - k) ;
+ if (pfree + needed_memory >= Alen)
+ {
+ pfree = garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;
+ ngarbage++ ;
+ /* after garbage collection we will have enough */
+ ASSERT (pfree + needed_memory < Alen) ;
+ /* garbage collection has wiped out the Row[].shared2.mark array */
+ tag_mark = clear_mark (0, max_mark, n_row, Row) ;
+
+#ifndef NDEBUG
+ debug_matrix (n_row, n_col, Row, Col, A) ;
+#endif /* NDEBUG */
+ }
+
+ /* === Compute pivot row pattern ==================================== */
+
+ /* get starting location for this new merged row */
+ pivot_row_start = pfree ;
+
+ /* initialize new row counts to zero */
+ pivot_row_degree = 0 ;
+
+ /* tag pivot column as having been visited so it isn't included */
+ /* in merged pivot row */
+ Col [pivot_col].shared1.thickness = -pivot_col_thickness ;
+
+ /* pivot row is the union of all rows in the pivot column pattern */
+ cp = &A [Col [pivot_col].start] ;
+ cp_end = cp + Col [pivot_col].length ;
+ while (cp < cp_end)
+ {
+ /* get a row */
+ row = *cp++ ;
+ DEBUG4 (("Pivot col pattern %d %d\n", ROW_IS_ALIVE (row), row)) ;
+ /* skip if row is dead */
+ if (ROW_IS_ALIVE (row))
+ {
+ rp = &A [Row [row].start] ;
+ rp_end = rp + Row [row].length ;
+ while (rp < rp_end)
+ {
+ /* get a column */
+ col = *rp++ ;
+ /* add the column, if alive and untagged */
+ col_thickness = Col [col].shared1.thickness ;
+ if (col_thickness > 0 && COL_IS_ALIVE (col))
+ {
+ /* tag column in pivot row */
+ Col [col].shared1.thickness = -col_thickness ;
+ ASSERT (pfree < Alen) ;
+ /* place column in pivot row */
+ A [pfree++] = col ;
+ pivot_row_degree += col_thickness ;
+ }
+ }
+ }
+ }
+
+ /* clear tag on pivot column */
+ Col [pivot_col].shared1.thickness = pivot_col_thickness ;
+ max_deg = MAX (max_deg, pivot_row_degree) ;
+
+#ifndef NDEBUG
+ DEBUG3 (("check2\n")) ;
+ debug_mark (n_row, Row, tag_mark, max_mark) ;
+#endif /* NDEBUG */
+
+ /* === Kill all rows used to construct pivot row ==================== */
+
+ /* also kill pivot row, temporarily */
+ cp = &A [Col [pivot_col].start] ;
+ cp_end = cp + Col [pivot_col].length ;
+ while (cp < cp_end)
+ {
+ /* may be killing an already dead row */
+ row = *cp++ ;
+ DEBUG3 (("Kill row in pivot col: %d\n", row)) ;
+ KILL_ROW (row) ;
+ }
+
+ /* === Select a row index to use as the new pivot row =============== */
+
+ pivot_row_length = pfree - pivot_row_start ;
+ if (pivot_row_length > 0)
+ {
+ /* pick the "pivot" row arbitrarily (first row in col) */
+ pivot_row = A [Col [pivot_col].start] ;
+ DEBUG3 (("Pivotal row is %d\n", pivot_row)) ;
+ }
+ else
+ {
+ /* there is no pivot row, since it is of zero length */
+ pivot_row = EMPTY ;
+ ASSERT (pivot_row_length == 0) ;
+ }
+ ASSERT (Col [pivot_col].length > 0 || pivot_row_length == 0) ;
+
+ /* === Approximate degree computation =============================== */
+
+ /* Here begins the computation of the approximate degree. The column */
+ /* score is the sum of the pivot row "length", plus the size of the */
+ /* set differences of each row in the column minus the pattern of the */
+ /* pivot row itself. The column ("thickness") itself is also */
+ /* excluded from the column score (we thus use an approximate */
+ /* external degree). */
+
+ /* The time taken by the following code (compute set differences, and */
+ /* add them up) is proportional to the size of the data structure */
+ /* being scanned - that is, the sum of the sizes of each column in */
+ /* the pivot row. Thus, the amortized time to compute a column score */
+ /* is proportional to the size of that column (where size, in this */
+ /* context, is the column "length", or the number of row indices */
+ /* in that column). The number of row indices in a column is */
+ /* monotonically non-decreasing, from the length of the original */
+ /* column on input to colamd. */
+
+ /* === Compute set differences ====================================== */
+
+ DEBUG3 (("** Computing set differences phase. **\n")) ;
+
+ /* pivot row is currently dead - it will be revived later. */
+
+ DEBUG3 (("Pivot row: ")) ;
+ /* for each column in pivot row */
+ rp = &A [pivot_row_start] ;
+ rp_end = rp + pivot_row_length ;
+ while (rp < rp_end)
+ {
+ col = *rp++ ;
+ ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
+ DEBUG3 (("Col: %d\n", col)) ;
+
+ /* clear tags used to construct pivot row pattern */
+ col_thickness = -Col [col].shared1.thickness ;
+ ASSERT (col_thickness > 0) ;
+ Col [col].shared1.thickness = col_thickness ;
+
+ /* === Remove column from degree list =========================== */
+
+ cur_score = Col [col].shared2.score ;
+ prev_col = Col [col].shared3.prev ;
+ next_col = Col [col].shared4.degree_next ;
+ ASSERT (cur_score >= 0) ;
+ ASSERT (cur_score <= n_col) ;
+ ASSERT (cur_score >= EMPTY) ;
+ if (prev_col == EMPTY)
+ {
+ head [cur_score] = next_col ;
+ }
+ else
+ {
+ Col [prev_col].shared4.degree_next = next_col ;
+ }
+ if (next_col != EMPTY)
+ {
+ Col [next_col].shared3.prev = prev_col ;
+ }
+
+ /* === Scan the column ========================================== */
+
+ cp = &A [Col [col].start] ;
+ cp_end = cp + Col [col].length ;
+ while (cp < cp_end)
+ {
+ /* get a row */
+ row = *cp++ ;
+ row_mark = Row [row].shared2.mark ;
+ /* skip if dead */
+ if (ROW_IS_MARKED_DEAD (row_mark))
+ {
+ continue ;
+ }
+ ASSERT (row != pivot_row) ;
+ set_difference = row_mark - tag_mark ;
+ /* check if the row has been seen yet */
+ if (set_difference < 0)
+ {
+ ASSERT (Row [row].shared1.degree <= max_deg) ;
+ set_difference = Row [row].shared1.degree ;
+ }
+ /* subtract column thickness from this row's set difference */
+ set_difference -= col_thickness ;
+ ASSERT (set_difference >= 0) ;
+ /* absorb this row if the set difference becomes zero */
+ if (set_difference == 0 && aggressive)
+ {
+ DEBUG3 (("aggressive absorption. Row: %d\n", row)) ;
+ KILL_ROW (row) ;
+ }
+ else
+ {
+ /* save the new mark */
+ Row [row].shared2.mark = set_difference + tag_mark ;
+ }
+ }
+ }
+
+#ifndef NDEBUG
+ debug_deg_lists (n_row, n_col, Row, Col, head,
+ min_score, n_col2-k-pivot_row_degree, max_deg) ;
+#endif /* NDEBUG */
+
+ /* === Add up set differences for each column ======================= */
+
+ DEBUG3 (("** Adding set differences phase. **\n")) ;
+
+ /* for each column in pivot row */
+ rp = &A [pivot_row_start] ;
+ rp_end = rp + pivot_row_length ;
+ while (rp < rp_end)
+ {
+ /* get a column */
+ col = *rp++ ;
+ ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
+ hash = 0 ;
+ cur_score = 0 ;
+ cp = &A [Col [col].start] ;
+ /* compact the column */
+ new_cp = cp ;
+ cp_end = cp + Col [col].length ;
+
+ DEBUG4 (("Adding set diffs for Col: %d.\n", col)) ;
+
+ while (cp < cp_end)
+ {
+ /* get a row */
+ row = *cp++ ;
+ ASSERT(row >= 0 && row < n_row) ;
+ row_mark = Row [row].shared2.mark ;
+ /* skip if dead */
+ if (ROW_IS_MARKED_DEAD (row_mark))
+ {
+ DEBUG4 ((" Row %d, dead\n", row)) ;
+ continue ;
+ }
+ DEBUG4 ((" Row %d, set diff %d\n", row, row_mark-tag_mark));
+ ASSERT (row_mark >= tag_mark) ;
+ /* compact the column */
+ *new_cp++ = row ;
+ /* compute hash function */
+ hash += row ;
+ /* add set difference */
+ cur_score += row_mark - tag_mark ;
+ /* integer overflow... */
+ cur_score = MIN (cur_score, n_col) ;
+ }
+
+ /* recompute the column's length */
+ Col [col].length = (Int) (new_cp - &A [Col [col].start]) ;
+
+ /* === Further mass elimination ================================= */
+
+ if (Col [col].length == 0)
+ {
+ DEBUG4 (("further mass elimination. Col: %d\n", col)) ;
+ /* nothing left but the pivot row in this column */
+ KILL_PRINCIPAL_COL (col) ;
+ pivot_row_degree -= Col [col].shared1.thickness ;
+ ASSERT (pivot_row_degree >= 0) ;
+ /* order it */
+ Col [col].shared2.order = k ;
+ /* increment order count by column thickness */
+ k += Col [col].shared1.thickness ;
+ }
+ else
+ {
+ /* === Prepare for supercolumn detection ==================== */
+
+ DEBUG4 (("Preparing supercol detection for Col: %d.\n", col)) ;
+
+ /* save score so far */
+ Col [col].shared2.score = cur_score ;
+
+ /* add column to hash table, for supercolumn detection */
+ hash %= n_col + 1 ;
+
+ DEBUG4 ((" Hash = %d, n_col = %d.\n", hash, n_col)) ;
+ ASSERT (((Int) hash) <= n_col) ;
+
+ head_column = head [hash] ;
+ if (head_column > EMPTY)
+ {
+ /* degree list "hash" is non-empty, use prev (shared3) of */
+ /* first column in degree list as head of hash bucket */
+ first_col = Col [head_column].shared3.headhash ;
+ Col [head_column].shared3.headhash = col ;
+ }
+ else
+ {
+ /* degree list "hash" is empty, use head as hash bucket */
+ first_col = - (head_column + 2) ;
+ head [hash] = - (col + 2) ;
+ }
+ Col [col].shared4.hash_next = first_col ;
+
+ /* save hash function in Col [col].shared3.hash */
+ Col [col].shared3.hash = (Int) hash ;
+ ASSERT (COL_IS_ALIVE (col)) ;
+ }
+ }
+
+ /* The approximate external column degree is now computed. */
+
+ /* === Supercolumn detection ======================================== */
+
+ DEBUG3 (("** Supercolumn detection phase. **\n")) ;
+
+ detect_super_cols (
+
+#ifndef NDEBUG
+ n_col, Row,
+#endif /* NDEBUG */
+
+ Col, A, head, pivot_row_start, pivot_row_length) ;
+
+ /* === Kill the pivotal column ====================================== */
+
+ KILL_PRINCIPAL_COL (pivot_col) ;
+
+ /* === Clear mark =================================================== */
+
+ tag_mark = clear_mark (tag_mark+max_deg+1, max_mark, n_row, Row) ;
+
+#ifndef NDEBUG
+ DEBUG3 (("check3\n")) ;
+ debug_mark (n_row, Row, tag_mark, max_mark) ;
+#endif /* NDEBUG */
+
+ /* === Finalize the new pivot row, and column scores ================ */
+
+ DEBUG3 (("** Finalize scores phase. **\n")) ;
+
+ /* for each column in pivot row */
+ rp = &A [pivot_row_start] ;
+ /* compact the pivot row */
+ new_rp = rp ;
+ rp_end = rp + pivot_row_length ;
+ while (rp < rp_end)
+ {
+ col = *rp++ ;
+ /* skip dead columns */
+ if (COL_IS_DEAD (col))
+ {
+ continue ;
+ }
+ *new_rp++ = col ;
+ /* add new pivot row to column */
+ A [Col [col].start + (Col [col].length++)] = pivot_row ;
+
+ /* retrieve score so far and add on pivot row's degree. */
+ /* (we wait until here for this in case the pivot */
+ /* row's degree was reduced due to mass elimination). */
+ cur_score = Col [col].shared2.score + pivot_row_degree ;
+
+ /* calculate the max possible score as the number of */
+ /* external columns minus the 'k' value minus the */
+ /* columns thickness */
+ max_score = n_col - k - Col [col].shared1.thickness ;
+
+ /* make the score the external degree of the union-of-rows */
+ cur_score -= Col [col].shared1.thickness ;
+
+ /* make sure score is less or equal than the max score */
+ cur_score = MIN (cur_score, max_score) ;
+ ASSERT (cur_score >= 0) ;
+
+ /* store updated score */
+ Col [col].shared2.score = cur_score ;
+
+ /* === Place column back in degree list ========================= */
+
+ ASSERT (min_score >= 0) ;
+ ASSERT (min_score <= n_col) ;
+ ASSERT (cur_score >= 0) ;
+ ASSERT (cur_score <= n_col) ;
+ ASSERT (head [cur_score] >= EMPTY) ;
+ next_col = head [cur_score] ;
+ Col [col].shared4.degree_next = next_col ;
+ Col [col].shared3.prev = EMPTY ;
+ if (next_col != EMPTY)
+ {
+ Col [next_col].shared3.prev = col ;
+ }
+ head [cur_score] = col ;
+
+ /* see if this score is less than current min */
+ min_score = MIN (min_score, cur_score) ;
+
+ }
+
+#ifndef NDEBUG
+ debug_deg_lists (n_row, n_col, Row, Col, head,
+ min_score, n_col2-k, max_deg) ;
+#endif /* NDEBUG */
+
+ /* === Resurrect the new pivot row ================================== */
+
+ if (pivot_row_degree > 0)
+ {
+ /* update pivot row length to reflect any cols that were killed */
+ /* during super-col detection and mass elimination */
+ Row [pivot_row].start = pivot_row_start ;
+ Row [pivot_row].length = (Int) (new_rp - &A[pivot_row_start]) ;
+ ASSERT (Row [pivot_row].length > 0) ;
+ Row [pivot_row].shared1.degree = pivot_row_degree ;
+ Row [pivot_row].shared2.mark = 0 ;
+ /* pivot row is no longer dead */
+
+ DEBUG1 (("Resurrect Pivot_row %d deg: %d\n",
+ pivot_row, pivot_row_degree)) ;
+ }
+ }
+
+ /* === All principal columns have now been ordered ====================== */
+
+ return (ngarbage) ;
+}
+
+
+/* ========================================================================== */
+/* === order_children ======================================================= */
+/* ========================================================================== */
+
+/*
+ The find_ordering routine has ordered all of the principal columns (the
+ representatives of the supercolumns). The non-principal columns have not
+ yet been ordered. This routine orders those columns by walking up the
+ parent tree (a column is a child of the column which absorbed it). The
+ final permutation vector is then placed in p [0 ... n_col-1], with p [0]
+ being the first column, and p [n_col-1] being the last. It doesn't look
+ like it at first glance, but be assured that this routine takes time linear
+ in the number of columns. Although not immediately obvious, the time
+ taken by this routine is O (n_col), that is, linear in the number of
+ columns. Not user-callable.
+*/
+
+PRIVATE void order_children
+(
+ /* === Parameters ======================================================= */
+
+ Int n_col, /* number of columns of A */
+ Colamd_Col Col [], /* of size n_col+1 */
+ Int p [] /* p [0 ... n_col-1] is the column permutation*/
+)
+{
+ /* === Local variables ================================================== */
+
+ Int i ; /* loop counter for all columns */
+ Int c ; /* column index */
+ Int parent ; /* index of column's parent */
+ Int order ; /* column's order */
+
+ /* === Order each non-principal column ================================== */
+
+ for (i = 0 ; i < n_col ; i++)
+ {
+ /* find an un-ordered non-principal column */
+ ASSERT (COL_IS_DEAD (i)) ;
+ if (!COL_IS_DEAD_PRINCIPAL (i) && Col [i].shared2.order == EMPTY)
+ {
+ parent = i ;
+ /* once found, find its principal parent */
+ do
+ {
+ parent = Col [parent].shared1.parent ;
+ } while (!COL_IS_DEAD_PRINCIPAL (parent)) ;
+
+ /* now, order all un-ordered non-principal columns along path */
+ /* to this parent. collapse tree at the same time */
+ c = i ;
+ /* get order of parent */
+ order = Col [parent].shared2.order ;
+
+ do
+ {
+ ASSERT (Col [c].shared2.order == EMPTY) ;
+
+ /* order this column */
+ Col [c].shared2.order = order++ ;
+ /* collaps tree */
+ Col [c].shared1.parent = parent ;
+
+ /* get immediate parent of this column */
+ c = Col [c].shared1.parent ;
+
+ /* continue until we hit an ordered column. There are */
+ /* guarranteed not to be anymore unordered columns */
+ /* above an ordered column */
+ } while (Col [c].shared2.order == EMPTY) ;
+
+ /* re-order the super_col parent to largest order for this group */
+ Col [parent].shared2.order = order ;
+ }
+ }
+
+ /* === Generate the permutation ========================================= */
+
+ for (c = 0 ; c < n_col ; c++)
+ {
+ p [Col [c].shared2.order] = c ;
+ }
+}
+
+
+/* ========================================================================== */
+/* === detect_super_cols ==================================================== */
+/* ========================================================================== */
+
+/*
+ Detects supercolumns by finding matches between columns in the hash buckets.
+ Check amongst columns in the set A [row_start ... row_start + row_length-1].
+ The columns under consideration are currently *not* in the degree lists,
+ and have already been placed in the hash buckets.
+
+ The hash bucket for columns whose hash function is equal to h is stored
+ as follows:
+
+ if head [h] is >= 0, then head [h] contains a degree list, so:
+
+ head [h] is the first column in degree bucket h.
+ Col [head [h]].headhash gives the first column in hash bucket h.
+
+ otherwise, the degree list is empty, and:
+
+ -(head [h] + 2) is the first column in hash bucket h.
+
+ For a column c in a hash bucket, Col [c].shared3.prev is NOT a "previous
+ column" pointer. Col [c].shared3.hash is used instead as the hash number
+ for that column. The value of Col [c].shared4.hash_next is the next column
+ in the same hash bucket.
+
+ Assuming no, or "few" hash collisions, the time taken by this routine is
+ linear in the sum of the sizes (lengths) of each column whose score has
+ just been computed in the approximate degree computation.
+ Not user-callable.
+*/
+
+PRIVATE void detect_super_cols
+(
+ /* === Parameters ======================================================= */
+
+#ifndef NDEBUG
+ /* these two parameters are only needed when debugging is enabled: */
+ Int n_col, /* number of columns of A */
+ Colamd_Row Row [], /* of size n_row+1 */
+#endif /* NDEBUG */
+
+ Colamd_Col Col [], /* of size n_col+1 */
+ Int A [], /* row indices of A */
+ Int head [], /* head of degree lists and hash buckets */
+ Int row_start, /* pointer to set of columns to check */
+ Int row_length /* number of columns to check */
+)
+{
+ /* === Local variables ================================================== */
+
+ Int hash ; /* hash value for a column */
+ Int *rp ; /* pointer to a row */
+ Int c ; /* a column index */
+ Int super_c ; /* column index of the column to absorb into */
+ Int *cp1 ; /* column pointer for column super_c */
+ Int *cp2 ; /* column pointer for column c */
+ Int length ; /* length of column super_c */
+ Int prev_c ; /* column preceding c in hash bucket */
+ Int i ; /* loop counter */
+ Int *rp_end ; /* pointer to the end of the row */
+ Int col ; /* a column index in the row to check */
+ Int head_column ; /* first column in hash bucket or degree list */
+ Int first_col ; /* first column in hash bucket */
+
+ /* === Consider each column in the row ================================== */
+
+ rp = &A [row_start] ;
+ rp_end = rp + row_length ;
+ while (rp < rp_end)
+ {
+ col = *rp++ ;
+ if (COL_IS_DEAD (col))
+ {
+ continue ;
+ }
+
+ /* get hash number for this column */
+ hash = Col [col].shared3.hash ;
+ ASSERT (hash <= n_col) ;
+
+ /* === Get the first column in this hash bucket ===================== */
+
+ head_column = head [hash] ;
+ if (head_column > EMPTY)
+ {
+ first_col = Col [head_column].shared3.headhash ;
+ }
+ else
+ {
+ first_col = - (head_column + 2) ;
+ }
+
+ /* === Consider each column in the hash bucket ====================== */
+
+ for (super_c = first_col ; super_c != EMPTY ;
+ super_c = Col [super_c].shared4.hash_next)
+ {
+ ASSERT (COL_IS_ALIVE (super_c)) ;
+ ASSERT (Col [super_c].shared3.hash == hash) ;
+ length = Col [super_c].length ;
+
+ /* prev_c is the column preceding column c in the hash bucket */
+ prev_c = super_c ;
+
+ /* === Compare super_c with all columns after it ================ */
+
+ for (c = Col [super_c].shared4.hash_next ;
+ c != EMPTY ; c = Col [c].shared4.hash_next)
+ {
+ ASSERT (c != super_c) ;
+ ASSERT (COL_IS_ALIVE (c)) ;
+ ASSERT (Col [c].shared3.hash == hash) ;
+
+ /* not identical if lengths or scores are different */
+ if (Col [c].length != length ||
+ Col [c].shared2.score != Col [super_c].shared2.score)
+ {
+ prev_c = c ;
+ continue ;
+ }
+
+ /* compare the two columns */
+ cp1 = &A [Col [super_c].start] ;
+ cp2 = &A [Col [c].start] ;
+
+ for (i = 0 ; i < length ; i++)
+ {
+ /* the columns are "clean" (no dead rows) */
+ ASSERT (ROW_IS_ALIVE (*cp1)) ;
+ ASSERT (ROW_IS_ALIVE (*cp2)) ;
+ /* row indices will same order for both supercols, */
+ /* no gather scatter nessasary */
+ if (*cp1++ != *cp2++)
+ {
+ break ;
+ }
+ }
+
+ /* the two columns are different if the for-loop "broke" */
+ if (i != length)
+ {
+ prev_c = c ;
+ continue ;
+ }
+
+ /* === Got it! two columns are identical =================== */
+
+ ASSERT (Col [c].shared2.score == Col [super_c].shared2.score) ;
+
+ Col [super_c].shared1.thickness += Col [c].shared1.thickness ;
+ Col [c].shared1.parent = super_c ;
+ KILL_NON_PRINCIPAL_COL (c) ;
+ /* order c later, in order_children() */
+ Col [c].shared2.order = EMPTY ;
+ /* remove c from hash bucket */
+ Col [prev_c].shared4.hash_next = Col [c].shared4.hash_next ;
+ }
+ }
+
+ /* === Empty this hash bucket ======================================= */
+
+ if (head_column > EMPTY)
+ {
+ /* corresponding degree list "hash" is not empty */
+ Col [head_column].shared3.headhash = EMPTY ;
+ }
+ else
+ {
+ /* corresponding degree list "hash" is empty */
+ head [hash] = EMPTY ;
+ }
+ }
+}
+
+
+/* ========================================================================== */
+/* === garbage_collection =================================================== */
+/* ========================================================================== */
+
+/*
+ Defragments and compacts columns and rows in the workspace A. Used when
+ all avaliable memory has been used while performing row merging. Returns
+ the index of the first free position in A, after garbage collection. The
+ time taken by this routine is linear is the size of the array A, which is
+ itself linear in the number of nonzeros in the input matrix.
+ Not user-callable.
+*/
+
+PRIVATE Int garbage_collection /* returns the new value of pfree */
+(
+ /* === Parameters ======================================================= */
+
+ Int n_row, /* number of rows */
+ Int n_col, /* number of columns */
+ Colamd_Row Row [], /* row info */
+ Colamd_Col Col [], /* column info */
+ Int A [], /* A [0 ... Alen-1] holds the matrix */
+ Int *pfree /* &A [0] ... pfree is in use */
+)
+{
+ /* === Local variables ================================================== */
+
+ Int *psrc ; /* source pointer */
+ Int *pdest ; /* destination pointer */
+ Int j ; /* counter */
+ Int r ; /* a row index */
+ Int c ; /* a column index */
+ Int length ; /* length of a row or column */
+
+#ifndef NDEBUG
+ Int debug_rows ;
+ DEBUG2 (("Defrag..\n")) ;
+ for (psrc = &A[0] ; psrc < pfree ; psrc++) ASSERT (*psrc >= 0) ;
+ debug_rows = 0 ;
+#endif /* NDEBUG */
+
+ /* === Defragment the columns =========================================== */
+
+ pdest = &A[0] ;
+ for (c = 0 ; c < n_col ; c++)
+ {
+ if (COL_IS_ALIVE (c))
+ {
+ psrc = &A [Col [c].start] ;
+
+ /* move and compact the column */
+ ASSERT (pdest <= psrc) ;
+ Col [c].start = (Int) (pdest - &A [0]) ;
+ length = Col [c].length ;
+ for (j = 0 ; j < length ; j++)
+ {
+ r = *psrc++ ;
+ if (ROW_IS_ALIVE (r))
+ {
+ *pdest++ = r ;
+ }
+ }
+ Col [c].length = (Int) (pdest - &A [Col [c].start]) ;
+ }
+ }
+
+ /* === Prepare to defragment the rows =================================== */
+
+ for (r = 0 ; r < n_row ; r++)
+ {
+ if (ROW_IS_DEAD (r) || (Row [r].length == 0))
+ {
+ /* This row is already dead, or is of zero length. Cannot compact
+ * a row of zero length, so kill it. NOTE: in the current version,
+ * there are no zero-length live rows. Kill the row (for the first
+ * time, or again) just to be safe. */
+ KILL_ROW (r) ;
+ }
+ else
+ {
+ /* save first column index in Row [r].shared2.first_column */
+ psrc = &A [Row [r].start] ;
+ Row [r].shared2.first_column = *psrc ;
+ ASSERT (ROW_IS_ALIVE (r)) ;
+ /* flag the start of the row with the one's complement of row */
+ *psrc = ONES_COMPLEMENT (r) ;
+#ifndef NDEBUG
+ debug_rows++ ;
+#endif /* NDEBUG */
+ }
+ }
+
+ /* === Defragment the rows ============================================== */
+
+ psrc = pdest ;
+ while (psrc < pfree)
+ {
+ /* find a negative number ... the start of a row */
+ if (*psrc++ < 0)
+ {
+ psrc-- ;
+ /* get the row index */
+ r = ONES_COMPLEMENT (*psrc) ;
+ ASSERT (r >= 0 && r < n_row) ;
+ /* restore first column index */
+ *psrc = Row [r].shared2.first_column ;
+ ASSERT (ROW_IS_ALIVE (r)) ;
+ ASSERT (Row [r].length > 0) ;
+ /* move and compact the row */
+ ASSERT (pdest <= psrc) ;
+ Row [r].start = (Int) (pdest - &A [0]) ;
+ length = Row [r].length ;
+ for (j = 0 ; j < length ; j++)
+ {
+ c = *psrc++ ;
+ if (COL_IS_ALIVE (c))
+ {
+ *pdest++ = c ;
+ }
+ }
+ Row [r].length = (Int) (pdest - &A [Row [r].start]) ;
+ ASSERT (Row [r].length > 0) ;
+#ifndef NDEBUG
+ debug_rows-- ;
+#endif /* NDEBUG */
+ }
+ }
+ /* ensure we found all the rows */
+ ASSERT (debug_rows == 0) ;
+
+ /* === Return the new value of pfree ==================================== */
+
+ return ((Int) (pdest - &A [0])) ;
+}
+
+
+/* ========================================================================== */
+/* === clear_mark =========================================================== */
+/* ========================================================================== */
+
+/*
+ Clears the Row [].shared2.mark array, and returns the new tag_mark.
+ Return value is the new tag_mark. Not user-callable.
+*/
+
+PRIVATE Int clear_mark /* return the new value for tag_mark */
+(
+ /* === Parameters ======================================================= */
+
+ Int tag_mark, /* new value of tag_mark */
+ Int max_mark, /* max allowed value of tag_mark */
+
+ Int n_row, /* number of rows in A */
+ Colamd_Row Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
+)
+{
+ /* === Local variables ================================================== */
+
+ Int r ;
+
+ if (tag_mark <= 0 || tag_mark >= max_mark)
+ {
+ for (r = 0 ; r < n_row ; r++)
+ {
+ if (ROW_IS_ALIVE (r))
+ {
+ Row [r].shared2.mark = 0 ;
+ }
+ }
+ tag_mark = 1 ;
+ }
+
+ return (tag_mark) ;
+}
+
+
+/* ========================================================================== */
+/* === print_report ========================================================= */
+/* ========================================================================== */
+
+PRIVATE void print_report
+(
+ char *method,
+ Int stats [COLAMD_STATS]
+)
+{
+
+ Int i1, i2, i3 ;
+
+ PRINTF (("\n%s version %d.%d, %s: ", method,
+ COLAMD_MAIN_VERSION, COLAMD_SUB_VERSION, COLAMD_DATE)) ;
+
+ if (!stats)
+ {
+ PRINTF (("No statistics available.\n")) ;
+ return ;
+ }
+
+ i1 = stats [COLAMD_INFO1] ;
+ i2 = stats [COLAMD_INFO2] ;
+ i3 = stats [COLAMD_INFO3] ;
+
+ if (stats [COLAMD_STATUS] >= 0)
+ {
+ PRINTF (("OK. ")) ;
+ }
+ else
+ {
+ PRINTF (("ERROR. ")) ;
+ }
+
+ switch (stats [COLAMD_STATUS])
+ {
+
+ case COLAMD_OK_BUT_JUMBLED:
+
+ PRINTF(("Matrix has unsorted or duplicate row indices.\n")) ;
+
+ PRINTF(("%s: number of duplicate or out-of-order row indices: %d\n",
+ method, i3)) ;
+
+ PRINTF(("%s: last seen duplicate or out-of-order row index: %d\n",
+ method, INDEX (i2))) ;
+
+ PRINTF(("%s: last seen in column: %d",
+ method, INDEX (i1))) ;
+
+ /* no break - fall through to next case instead */
+
+ case COLAMD_OK:
+
+ PRINTF(("\n")) ;
+
+ PRINTF(("%s: number of dense or empty rows ignored: %d\n",
+ method, stats [COLAMD_DENSE_ROW])) ;
+
+ PRINTF(("%s: number of dense or empty columns ignored: %d\n",
+ method, stats [COLAMD_DENSE_COL])) ;
+
+ PRINTF(("%s: number of garbage collections performed: %d\n",
+ method, stats [COLAMD_DEFRAG_COUNT])) ;
+ break ;
+
+ case COLAMD_ERROR_A_not_present:
+
+ PRINTF(("Array A (row indices of matrix) not present.\n")) ;
+ break ;
+
+ case COLAMD_ERROR_p_not_present:
+
+ PRINTF(("Array p (column pointers for matrix) not present.\n")) ;
+ break ;
+
+ case COLAMD_ERROR_nrow_negative:
+
+ PRINTF(("Invalid number of rows (%d).\n", i1)) ;
+ break ;
+
+ case COLAMD_ERROR_ncol_negative:
+
+ PRINTF(("Invalid number of columns (%d).\n", i1)) ;
+ break ;
+
+ case COLAMD_ERROR_nnz_negative:
+
+ PRINTF(("Invalid number of nonzero entries (%d).\n", i1)) ;
+ break ;
+
+ case COLAMD_ERROR_p0_nonzero:
+
+ PRINTF(("Invalid column pointer, p [0] = %d, must be zero.\n", i1));
+ break ;
+
+ case COLAMD_ERROR_A_too_small:
+
+ PRINTF(("Array A too small.\n")) ;
+ PRINTF((" Need Alen >= %d, but given only Alen = %d.\n",
+ i1, i2)) ;
+ break ;
+
+ case COLAMD_ERROR_col_length_negative:
+
+ PRINTF
+ (("Column %d has a negative number of nonzero entries (%d).\n",
+ INDEX (i1), i2)) ;
+ break ;
+
+ case COLAMD_ERROR_row_index_out_of_bounds:
+
+ PRINTF
+ (("Row index (row %d) out of bounds (%d to %d) in column %d.\n",
+ INDEX (i2), INDEX (0), INDEX (i3-1), INDEX (i1))) ;
+ break ;
+
+ case COLAMD_ERROR_out_of_memory:
+
+ PRINTF(("Out of memory.\n")) ;
+ break ;
+
+ /* v2.4: internal-error case deleted */
+ }
+}
+
+
+
+
+/* ========================================================================== */
+/* === colamd debugging routines ============================================ */
+/* ========================================================================== */
+
+/* When debugging is disabled, the remainder of this file is ignored. */
+
+#ifndef NDEBUG
+
+
+/* ========================================================================== */
+/* === debug_structures ===================================================== */
+/* ========================================================================== */
+
+/*
+ At this point, all empty rows and columns are dead. All live columns
+ are "clean" (containing no dead rows) and simplicial (no supercolumns
+ yet). Rows may contain dead columns, but all live rows contain at
+ least one live column.
+*/
+
+PRIVATE void debug_structures
+(
+ /* === Parameters ======================================================= */
+
+ Int n_row,
+ Int n_col,
+ Colamd_Row Row [],
+ Colamd_Col Col [],
+ Int A [],
+ Int n_col2
+)
+{
+ /* === Local variables ================================================== */
+
+ Int i ;
+ Int c ;
+ Int *cp ;
+ Int *cp_end ;
+ Int len ;
+ Int score ;
+ Int r ;
+ Int *rp ;
+ Int *rp_end ;
+ Int deg ;
+
+ /* === Check A, Row, and Col ============================================ */
+
+ for (c = 0 ; c < n_col ; c++)
+ {
+ if (COL_IS_ALIVE (c))
+ {
+ len = Col [c].length ;
+ score = Col [c].shared2.score ;
+ DEBUG4 (("initial live col %5d %5d %5d\n", c, len, score)) ;
+ ASSERT (len > 0) ;
+ ASSERT (score >= 0) ;
+ ASSERT (Col [c].shared1.thickness == 1) ;
+ cp = &A [Col [c].start] ;
+ cp_end = cp + len ;
+ while (cp < cp_end)
+ {
+ r = *cp++ ;
+ ASSERT (ROW_IS_ALIVE (r)) ;
+ }
+ }
+ else
+ {
+ i = Col [c].shared2.order ;
+ ASSERT (i >= n_col2 && i < n_col) ;
+ }
+ }
+
+ for (r = 0 ; r < n_row ; r++)
+ {
+ if (ROW_IS_ALIVE (r))
+ {
+ i = 0 ;
+ len = Row [r].length ;
+ deg = Row [r].shared1.degree ;
+ ASSERT (len > 0) ;
+ ASSERT (deg > 0) ;
+ rp = &A [Row [r].start] ;
+ rp_end = rp + len ;
+ while (rp < rp_end)
+ {
+ c = *rp++ ;
+ if (COL_IS_ALIVE (c))
+ {
+ i++ ;
+ }
+ }
+ ASSERT (i > 0) ;
+ }
+ }
+}
+
+
+/* ========================================================================== */
+/* === debug_deg_lists ====================================================== */
+/* ========================================================================== */
+
+/*
+ Prints the contents of the degree lists. Counts the number of columns
+ in the degree list and compares it to the total it should have. Also
+ checks the row degrees.
+*/
+
+PRIVATE void debug_deg_lists
+(
+ /* === Parameters ======================================================= */
+
+ Int n_row,
+ Int n_col,
+ Colamd_Row Row [],
+ Colamd_Col Col [],
+ Int head [],
+ Int min_score,
+ Int should,
+ Int max_deg
+)
+{
+ /* === Local variables ================================================== */
+
+ Int deg ;
+ Int col ;
+ Int have ;
+ Int row ;
+
+ /* === Check the degree lists =========================================== */
+
+ if (n_col > 10000 && colamd_debug <= 0)
+ {
+ return ;
+ }
+ have = 0 ;
+ DEBUG4 (("Degree lists: %d\n", min_score)) ;
+ for (deg = 0 ; deg <= n_col ; deg++)
+ {
+ col = head [deg] ;
+ if (col == EMPTY)
+ {
+ continue ;
+ }
+ DEBUG4 (("%d:", deg)) ;
+ while (col != EMPTY)
+ {
+ DEBUG4 ((" %d", col)) ;
+ have += Col [col].shared1.thickness ;
+ ASSERT (COL_IS_ALIVE (col)) ;
+ col = Col [col].shared4.degree_next ;
+ }
+ DEBUG4 (("\n")) ;
+ }
+ DEBUG4 (("should %d have %d\n", should, have)) ;
+ ASSERT (should == have) ;
+
+ /* === Check the row degrees ============================================ */
+
+ if (n_row > 10000 && colamd_debug <= 0)
+ {
+ return ;
+ }
+ for (row = 0 ; row < n_row ; row++)
+ {
+ if (ROW_IS_ALIVE (row))
+ {
+ ASSERT (Row [row].shared1.degree <= max_deg) ;
+ }
+ }
+}
+
+
+/* ========================================================================== */
+/* === debug_mark =========================================================== */
+/* ========================================================================== */
+
+/*
+ Ensures that the tag_mark is less that the maximum and also ensures that
+ each entry in the mark array is less than the tag mark.
+*/
+
+PRIVATE void debug_mark
+(
+ /* === Parameters ======================================================= */
+
+ Int n_row,
+ Colamd_Row Row [],
+ Int tag_mark,
+ Int max_mark
+)
+{
+ /* === Local variables ================================================== */
+
+ Int r ;
+
+ /* === Check the Row marks ============================================== */
+
+ ASSERT (tag_mark > 0 && tag_mark <= max_mark) ;
+ if (n_row > 10000 && colamd_debug <= 0)
+ {
+ return ;
+ }
+ for (r = 0 ; r < n_row ; r++)
+ {
+ ASSERT (Row [r].shared2.mark < tag_mark) ;
+ }
+}
+
+
+/* ========================================================================== */
+/* === debug_matrix ========================================================= */
+/* ========================================================================== */
+
+/*
+ Prints out the contents of the columns and the rows.
+*/
+
+PRIVATE void debug_matrix
+(
+ /* === Parameters ======================================================= */
+
+ Int n_row,
+ Int n_col,
+ Colamd_Row Row [],
+ Colamd_Col Col [],
+ Int A []
+)
+{
+ /* === Local variables ================================================== */
+
+ Int r ;
+ Int c ;
+ Int *rp ;
+ Int *rp_end ;
+ Int *cp ;
+ Int *cp_end ;
+
+ /* === Dump the rows and columns of the matrix ========================== */
+
+ if (colamd_debug < 3)
+ {
+ return ;
+ }
+ DEBUG3 (("DUMP MATRIX:\n")) ;
+ for (r = 0 ; r < n_row ; r++)
+ {
+ DEBUG3 (("Row %d alive? %d\n", r, ROW_IS_ALIVE (r))) ;
+ if (ROW_IS_DEAD (r))
+ {
+ continue ;
+ }
+ DEBUG3 (("start %d length %d degree %d\n",
+ Row [r].start, Row [r].length, Row [r].shared1.degree)) ;
+ rp = &A [Row [r].start] ;
+ rp_end = rp + Row [r].length ;
+ while (rp < rp_end)
+ {
+ c = *rp++ ;
+ DEBUG4 ((" %d col %d\n", COL_IS_ALIVE (c), c)) ;
+ }
+ }
+
+ for (c = 0 ; c < n_col ; c++)
+ {
+ DEBUG3 (("Col %d alive? %d\n", c, COL_IS_ALIVE (c))) ;
+ if (COL_IS_DEAD (c))
+ {
+ continue ;
+ }
+ DEBUG3 (("start %d length %d shared1 %d shared2 %d\n",
+ Col [c].start, Col [c].length,
+ Col [c].shared1.thickness, Col [c].shared2.score)) ;
+ cp = &A [Col [c].start] ;
+ cp_end = cp + Col [c].length ;
+ while (cp < cp_end)
+ {
+ r = *cp++ ;
+ DEBUG4 ((" %d row %d\n", ROW_IS_ALIVE (r), r)) ;
+ }
+ }
+}
+
+PRIVATE void colamd_get_debug
+(
+ char *method
+)
+{
+ FILE *f ;
+ colamd_debug = 0 ; /* no debug printing */
+ f = fopen ("debug", "r") ;
+ if (f == (FILE *) NULL)
+ {
+ colamd_debug = 0 ;
+ }
+ else
+ {
+ fscanf (f, "%d", &colamd_debug) ;
+ fclose (f) ;
+ }
+ DEBUG0 (("%s: debug version, D = %d (THIS WILL BE SLOW!)\n",
+ method, colamd_debug)) ;
+}
+
+#endif /* NDEBUG */
diff --git a/extern/colamd/Source/colamd_global.c b/extern/colamd/Source/colamd_global.c
new file mode 100644
index 00000000000..4d1ae22300c
--- /dev/null
+++ b/extern/colamd/Source/colamd_global.c
@@ -0,0 +1,24 @@
+/* ========================================================================== */
+/* === colamd_global.c ====================================================== */
+/* ========================================================================== */
+
+/* ----------------------------------------------------------------------------
+ * COLAMD, Copyright (C) 2007, Timothy A. Davis.
+ * See License.txt for the Version 2.1 of the GNU Lesser General Public License
+ * http://www.cise.ufl.edu/research/sparse
+ * -------------------------------------------------------------------------- */
+
+/* Global variables for COLAMD */
+
+#ifndef NPRINT
+#ifdef MATLAB_MEX_FILE
+#include "mex.h"
+int (*colamd_printf) (const char *, ...) = mexPrintf ;
+#else
+#include <stdio.h>
+int (*colamd_printf) (const char *, ...) = printf ;
+#endif
+#else
+int (*colamd_printf) (const char *, ...) = ((void *) 0) ;
+#endif
+
diff --git a/extern/libmv/CMakeLists.txt b/extern/libmv/CMakeLists.txt
new file mode 100644
index 00000000000..b2360889f7f
--- /dev/null
+++ b/extern/libmv/CMakeLists.txt
@@ -0,0 +1,211 @@
+# $Id$
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# The Original Code is Copyright (C) 2011, Blender Foundation
+# All rights reserved.
+#
+# Contributor(s): Blender Foundation,
+# Sergey Sharybin
+#
+# ***** END GPL LICENSE BLOCK *****
+
+set(INC
+ .
+ ../Eigen3
+ ./third_party/ssba
+ ./third_party/ldl/Include
+ ../colamd/Include
+)
+
+set(INC_SYS
+
+)
+
+set(SRC
+ libmv-capi.cpp
+ libmv/numeric/numeric.cc
+ libmv/numeric/poly.cc
+ libmv/numeric/tinyvector.cc
+ libmv/simple_pipeline/reconstruction.cc
+ libmv/simple_pipeline/resect.cc
+ libmv/simple_pipeline/intersect.cc
+ libmv/simple_pipeline/initialize_reconstruction.cc
+ libmv/simple_pipeline/camera_intrinsics.cc
+ libmv/simple_pipeline/pipeline.cc
+ libmv/simple_pipeline/detect.cc
+ libmv/simple_pipeline/tracks.cc
+ libmv/simple_pipeline/bundle.cc
+ libmv/image/convolve.cc
+ libmv/image/array_nd.cc
+ libmv/tracking/pyramid_region_tracker.cc
+ libmv/tracking/sad.cc
+ libmv/tracking/trklt_region_tracker.cc
+ libmv/tracking/klt_region_tracker.cc
+ libmv/tracking/retrack_region_tracker.cc
+ libmv/multiview/projection.cc
+ libmv/multiview/conditioning.cc
+ libmv/multiview/fundamental.cc
+ libmv/multiview/euclidean_resection.cc
+ libmv/multiview/triangulation.cc
+
+ third_party/ssba/Geometry/v3d_metricbundle.cpp
+ third_party/ssba/Math/v3d_optimization.cpp
+ third_party/gflags/gflags.cc
+ third_party/gflags/gflags_reporting.cc
+ third_party/gflags/gflags_completions.cc
+ third_party/fast/fast_9.c
+ third_party/fast/fast_10.c
+ third_party/fast/fast_11.c
+ third_party/fast/fast_12.c
+ third_party/fast/fast.c
+ third_party/fast/nonmax.c
+ third_party/ldl/Source/ldl.c
+
+ libmv-capi.h
+ libmv/logging/logging.h
+ libmv/numeric/dogleg.h
+ libmv/numeric/levenberg_marquardt.h
+ libmv/numeric/poly.h
+ libmv/numeric/function_derivative.h
+ libmv/numeric/numeric.h
+ libmv/simple_pipeline/resect.h
+ libmv/simple_pipeline/reconstruction.h
+ libmv/simple_pipeline/camera_intrinsics.h
+ libmv/simple_pipeline/tracks.h
+ libmv/simple_pipeline/detect.h
+ libmv/simple_pipeline/pipeline.h
+ libmv/simple_pipeline/intersect.h
+ libmv/simple_pipeline/bundle.h
+ libmv/simple_pipeline/initialize_reconstruction.h
+ libmv/image/convolve.h
+ libmv/image/tuple.h
+ libmv/image/array_nd.h
+ libmv/image/sample.h
+ libmv/image/image.h
+ libmv/tracking/region_tracker.h
+ libmv/tracking/retrack_region_tracker.h
+ libmv/tracking/sad.h
+ libmv/tracking/pyramid_region_tracker.h
+ libmv/tracking/trklt_region_tracker.h
+ libmv/tracking/klt_region_tracker.h
+ libmv/base/id_generator.h
+ libmv/base/vector.h
+ libmv/base/scoped_ptr.h
+ libmv/base/vector_utils.h
+ libmv/multiview/nviewtriangulation.h
+ libmv/multiview/resection.h
+ libmv/multiview/euclidean_resection.h
+ libmv/multiview/triangulation.h
+ libmv/multiview/projection.h
+ libmv/multiview/fundamental.h
+ libmv/multiview/conditioning.h
+
+ third_party/ssba/Geometry/v3d_metricbundle.h
+ third_party/ssba/Geometry/v3d_cameramatrix.h
+ third_party/ssba/Geometry/v3d_distortion.h
+ third_party/ssba/Math/v3d_linear_utils.h
+ third_party/ssba/Math/v3d_optimization.h
+ third_party/ssba/Math/v3d_mathutilities.h
+ third_party/ssba/Math/v3d_linear.h
+ third_party/gflags/gflags_completions.h
+ third_party/gflags/mutex.h
+ third_party/gflags/config.h
+ third_party/gflags/gflags.h
+ third_party/fast/fast.h
+ third_party/ldl/Include/ldl.h
+ third_party/msinttypes/stdint.h
+ third_party/msinttypes/inttypes.h
+)
+
+IF(WIN32)
+ list(APPEND SRC
+ third_party/glog/src/logging.cc
+ third_party/glog/src/raw_logging.cc
+ third_party/glog/src/utilities.cc
+ third_party/glog/src/vlog_is_on.cc
+ third_party/glog/src/windows/port.cc
+
+ third_party/glog/src/utilities.h
+ third_party/glog/src/stacktrace_generic-inl.h
+ third_party/glog/src/stacktrace.h
+ third_party/glog/src/stacktrace_x86_64-inl.h
+ third_party/glog/src/base/googleinit.h
+ third_party/glog/src/base/mutex.h
+ third_party/glog/src/base/commandlineflags.h
+ third_party/glog/src/stacktrace_powerpc-inl.h
+ third_party/glog/src/stacktrace_x86-inl.h
+ third_party/glog/src/config.h
+ third_party/glog/src/stacktrace_libunwind-inl.h
+ third_party/glog/src/windows/glog/raw_logging.h
+ third_party/glog/src/windows/glog/vlog_is_on.h
+ third_party/glog/src/windows/glog/logging.h
+ third_party/glog/src/windows/glog/log_severity.h
+ third_party/glog/src/windows/port.h
+ third_party/glog/src/windows/config.h
+ )
+
+ list(APPEND INC
+ ./third_party/glog/src/windows
+ ./third_party/msinttypes
+ )
+
+ IF(MSVC)
+ set(MSVC_OFLAGS O1 O2 Ox)
+ foreach(FLAG )
+ string(REPLACE "" "Od" CMAKE_CXX_FLAGS_RELEASE "")
+ string(REPLACE "" "Od" CMAKE_C_FLAGS_RELWITHDEBINFO "")
+ endforeach()
+ ENDIF(MSVC)
+ELSE(WIN32)
+ list(APPEND SRC
+ third_party/glog/src/utilities.cc
+ third_party/glog/src/symbolize.cc
+ third_party/glog/src/vlog_is_on.cc
+ third_party/glog/src/signalhandler.cc
+ third_party/glog/src/logging.cc
+ third_party/glog/src/demangle.cc
+ third_party/glog/src/raw_logging.cc
+
+ third_party/glog/src/utilities.h
+ third_party/glog/src/stacktrace_generic-inl.h
+ third_party/glog/src/config_mac.h
+ third_party/glog/src/stacktrace.h
+ third_party/glog/src/stacktrace_x86_64-inl.h
+ third_party/glog/src/symbolize.h
+ third_party/glog/src/base/googleinit.h
+ third_party/glog/src/base/mutex.h
+ third_party/glog/src/base/commandlineflags.h
+ third_party/glog/src/stacktrace_powerpc-inl.h
+ third_party/glog/src/stacktrace_x86-inl.h
+ third_party/glog/src/config.h
+ third_party/glog/src/demangle.h
+ third_party/glog/src/stacktrace_libunwind-inl.h
+ third_party/glog/src/glog/raw_logging.h
+ third_party/glog/src/glog/vlog_is_on.h
+ third_party/glog/src/glog/logging.h
+ third_party/glog/src/glog/log_severity.h
+ third_party/glog/src/config_linux.h
+ )
+
+ list(APPEND INC
+ ./third_party/glog/src
+ )
+ENDIF(WIN32)
+
+add_definitions(-DV3DLIB_ENABLE_SUITESPARSE -DGOOGLE_GLOG_DLL_DECL=)
+
+blender_add_lib(extern_libmv "${SRC}" "${INC}" "${INC_SYS}")
diff --git a/extern/libmv/ChangeLog b/extern/libmv/ChangeLog
new file mode 100644
index 00000000000..7e10abfead6
--- /dev/null
+++ b/extern/libmv/ChangeLog
@@ -0,0 +1,312 @@
+commit 531c79bf95fddaaa70707d1abcd4fdafda16bbf0
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Sat Aug 20 00:00:42 2011 +0200
+
+ Display warped pattern in marker preview.
+
+commit bb5c27e671b6f8eb56ddf490f0795d59bede591b
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 19 18:37:48 2011 +0200
+
+ Fix CMake build.
+
+commit 2ac7281ff6b9545b425dd84fb03bf9c5c98b4de2
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 19 17:34:45 2011 +0200
+
+ Avoid symbol shadowing.
+
+commit 2a7c3de4acc60e0433b4952f69e30528dbafe0d2
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 19 17:22:47 2011 +0200
+
+ Better dragging behavior when hitting borders.
+
+commit a14eb3953c9521b2e08ff9ddd45b33ff1f8aeafb
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 19 17:12:12 2011 +0200
+
+ Update marker preview to new affine tracking.
+
+commit 5299ea67043459eda147950e589c2d327a8fbced
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 19 16:05:54 2011 +0200
+
+ sqrt takes double precision.
+
+commit 9f9221ce151d788c49b48f6f293ab2e2f8813978
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 19 16:04:37 2011 +0200
+
+ MSVC compatibility: heap allocate pattern, explicit float cast.
+
+commit 702658d2f8616964a6eeb3743fd85e97ac7ff09d
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 19 14:59:24 2011 +0200
+
+ Expose regularization parameters (areaPenalty and conditionPenalty) in API.
+
+commit 3e84ae5fbac10451d4935418f6281a90cedace11
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 19 14:19:27 2011 +0200
+
+ Add LaplaceFilter.
+ Add regularization in affine SAD Tracker (keep constant area and good condition number).
+ UI: Better track display (+enable line antialiasing).
+
+commit 6d26d9a8ccc4ce009fbf253898fea8864dd5001a
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 19 10:25:26 2011 +0200
+
+ Add optimization for integer pixel search.
+ Allows more agressive settings for affine coordinate descent.
+
+commit 70ceae81c0ab561b07e640ecb9933f0a902b57cd
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 19 00:02:12 2011 +0200
+
+ Document coordinate descent method in affine SAD matcher.
+ Add heuristic to prevent high distortions.
+
+commit 75520f4bc4ccbb272a1b4149d3b8d05a90f7f896
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Thu Aug 18 23:14:17 2011 +0200
+
+ Fix affine iteration.
+
+commit 4e8e0aa6018e3eb2fbebdad7f1cbd6c909d26e79
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Thu Aug 18 23:03:26 2011 +0200
+
+ Handle rotations.
+
+commit 3ce41cf3c1b5c136a61d8f4c63ccae3cafbdb8da
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Thu Aug 18 22:24:47 2011 +0200
+
+ Slow brute-force affine diamond search implementation.
+
+commit 1c4acd03e030c1c50dc6fc36c419c72ea69a0713
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Thu Aug 18 20:51:43 2011 +0200
+
+ Fix detect.cc.
+
+commit ec18cc5ea9ae2e641075a847e82d0aacb8415ad8
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Thu Aug 18 17:45:37 2011 +0200
+
+ Compute and return Pearson product-moment correlation coefficient between reference and matched pattern.
+
+commit 21d4245c63a01bfc736192d55baf10983e7c9ec7
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Thu Aug 18 16:18:44 2011 +0200
+
+ UI and API support for affine tracking.
+
+commit a4876d8c40dcde615b44009c38c49e9a1b1d4698
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Wed Aug 17 20:26:01 2011 +0200
+
+ Hack to make sad.cc compile with MSVC on system without support for the SSE instruction set.
+
+commit 0de723dfce5bbe44dbd19be8cd6dd6e9b03b7924
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Wed Aug 17 20:10:46 2011 +0200
+
+ Fix slow path (for computers without SSE2).
+ Heap allocate scores in detect.cc
+
+commit 65a9d496f81e8b37eae39a4063957b8be9a4e6f0
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Wed Aug 17 19:25:17 2011 +0200
+
+ Fix compilation on OSX.
+
+commit d22720e618456329388d2c107422c3b371657cba
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Wed Aug 17 14:14:45 2011 +0200
+
+ Improve Detect and SAD Tracker API and documentation.
+
+commit 5d6cd4ad365b061901bad40695b51d568487a0cf
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Wed Aug 17 11:57:29 2011 +0200
+
+ MSVC support fixes.
+
+commit 50f0323173c6deebd6aaf9c126f0b51b2a79c3c1
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 16 23:21:37 2011 +0200
+
+ Detector can detect features similar to a given pattern.
+
+commit 5734cc27bbf84c2b6edcfcc1ea736798e12d5820
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 16 22:53:54 2011 +0200
+
+ Ensure SAD Tracker is C compatible.
+ Update Detect API documentation.
+
+commit 701c42842574064fea992f8822e3899cb9066108
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 16 21:56:42 2011 +0200
+
+ Remove FAST detector.
+ Add Moravec detector.
+ This detector is more suited to tracking since it try to choose patterns which are unlikely to drift by computing SAD with neighbouring patches.
+ It could be improved to better avoid edges.
+
+commit 9bdf93e13fc880c78b6f34397da673388c16040e
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 16 21:55:08 2011 +0200
+
+ Fix Qt Tracker GL to work with AMD drivers.
+
+commit 81613ee0cc94b315f333c9632b18b95d426aad05
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 16 21:54:12 2011 +0200
+
+ Make CameraIntrinsics (and thus Qt tracker) compilable without linking libmv.
+
+commit a1d9a8fa8b01ef7cf2a79b3b891633fc333fc9cf
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 16 21:24:51 2011 +0200
+
+ Fix SAD tracker. Pattern was transposed by affine pattern sampler.
+
+commit c3b794da2e7fd23f2fbdf90dbd71de0e6b3bc811
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 16 21:19:02 2011 +0200
+
+ Fix SAD tracker. Pattern was transposed by affine pattern sampler.
+
+commit a9b61bf3356f27174cdd983f562f99c3a6a2cc35
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Sun Aug 14 09:56:51 2011 +0200
+
+ Clarify CameraIntrinsics documentation.
+ Edit CameraInstrinsics test to fail.
+
+commit 10bdad9ad2cea2603896263cde5a5339169a9af0
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 12 21:05:32 2011 +0200
+
+ Fix out of bound access in warp bilinear sampling.
+
+commit dd9a418db021a28af2c1198d5e5b9e68fe048a03
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 12 19:14:36 2011 +0200
+
+ Fix compilation with -funsigned-char.
+
+commit bd1a268ede39b67f2ba4b360f6fc693419e7cd7f
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Aug 12 18:39:27 2011 +0200
+
+ CameraIntrinsics fixes.
+
+commit ae513b39fb779632f96ceff7c1e014fb8e68702a
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 9 19:38:58 2011 +0200
+
+ Remove stray QDebug include.
+
+commit 1e58f55078ce6009a885be30ae0316aec6ed8239
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 9 14:16:31 2011 +0200
+
+ Make API future-proof (for an eventual affine or planar tracker).
+
+commit c2af303e7bf0dddcb02937323ac5846b1801e6cc
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 9 11:13:29 2011 +0200
+
+ Remove reconstruction breaking debug code.
+
+commit 8792a633e5c5f1c1f12e164b9e8897ca0790ac59
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 9 10:49:18 2011 +0200
+
+ Remove getchar()s.
+
+commit 63a9bdee0cbd1197e0315d01c27bfc2361bd5656
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 9 10:35:07 2011 +0200
+
+ Adapt patch to new PipelineRoutines code generation strategy.
+
+commit 096ff1a4070f7212c50fb0a4b2feec7ca9d97158
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 9 09:54:12 2011 +0200
+
+ Merge max_image and max_track fix from tomato.
+
+commit d8450cd3c37278a397482cd36b1e2419f154cfb9
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Tue Aug 9 09:38:49 2011 +0200
+
+ Synchronize tree with Tomato: Merge patch for better resection, keep deprecated KLT tracker.
+
+commit e9b2dca920cf9575c15150a4988634b00e343a41
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Mon Aug 8 17:07:08 2011 +0200
+
+ Fixes, Documentation.
+
+commit 4fc1c57a2d92442808ac4a3676e6d9a25a51e310
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Sun Aug 7 14:35:08 2011 +0200
+
+ Improve tracker resilience by penalizing large motion vectors.
+
+commit cc8e7e8e08cd91f75c080a0091461ca9fe969664
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Sun Aug 7 09:28:09 2011 +0200
+
+ Leverage SSE2 SAD instruction for 16x speed improvement in integer pixel search resulting in ~1ms per marker for 16x16 pattern on 128x128 region.
+
+commit f362ab4999a768370fca57552464b459eb9fbddc
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Sun Aug 7 09:06:04 2011 +0200
+
+ Improve SAD Tracker subpixel precision (avoid drift even when adapting at each frame).
+
+commit fce7a214c561b5f5f0e17115c31fb48814bde2db
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Sat Aug 6 21:57:06 2011 +0200
+
+ Track using simple Sum of Absolute Differences matching.
+ This method is simpler, more robust, faster and accurate.
+
+commit 620a7a35d9a2818bf6e9dbf5d11debda4be6bc26
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Fri Jul 29 12:35:57 2011 +0200
+
+ Add Intersect unit test.
+
+commit a2bf58fa57be11215eb17ff7f7de58f97d480ec3
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Thu Jul 28 11:08:06 2011 +0200
+
+ Remove tests depending on dead code.
+ Fix CameraIntrinsics test.
+ Add Intersect and Resect tests.
+
+commit 19bddee10b4879c8cd2238ccdf5b8f7620cf8384
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Wed Jul 27 12:07:21 2011 +0200
+
+ Image Distortion: Fixes and more testing.
+
+commit 0454d97da328fb0eda8c6c50511ac31864a6d3d6
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Wed Jul 27 10:32:37 2011 +0200
+
+ Test float image distortion.
+
+commit 8db01595a8721f766d85931a8d92b780461d8741
+Author: Matthias Fauconneau <matthias.fauconneau@gmail.com>
+Date: Wed Jul 27 10:27:07 2011 +0200
+
+ Image Distortion: Bilinear sampling, Optimization, Instantiate all variants (Distort/Undistort, float/ubyte, 1-4 channels).
diff --git a/extern/libmv/SConscript b/extern/libmv/SConscript
new file mode 100644
index 00000000000..52e2da56a0d
--- /dev/null
+++ b/extern/libmv/SConscript
@@ -0,0 +1,58 @@
+#!/usr/bin/python
+import sys
+import os
+
+Import('env')
+
+defs = []
+
+cflags_libmv = Split(env['CFLAGS'])
+ccflags_libmv = Split(env['CCFLAGS'])
+cxxflags_libmv = Split(env['CXXFLAGS'])
+
+defs.append('V3DLIB_ENABLE_SUITESPARSE')
+defs.append('GOOGLE_GLOG_DLL_DECL=')
+
+src = env.Glob("*.cpp")
+src += env.Glob('libmv/image/*.cc')
+src += env.Glob('libmv/multiview/*.cc')
+src += env.Glob('libmv/numeric/*.cc')
+src += env.Glob('libmv/simple_pipeline/*.cc')
+src += env.Glob('libmv/tracking/*.cc')
+src += env.Glob('third_party/fast/*.c')
+src += env.Glob('third_party/gflags/*.cc')
+src += env.Glob('third_party/ldl/Source/*.c')
+src += env.Glob('third_party/ssba/Geometry/*.cpp')
+src += env.Glob('third_party/ssba/Math/*.cpp')
+
+incs = '. ../Eigen3'
+
+if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'linuxcross', 'win64-vc'):
+ incs += ' ./third_party/glog/src/windows ./third_party/glog/src/windows/glog ./third_party/msinttypes'
+
+ src += ['./third_party/glog/src/logging.cc', './third_party/glog/src/raw_logging.cc', './third_party/glog/src/utilities.cc', './third_party/glog/src/vlog_is_on.cc']
+ src += ['./third_party/glog/src/windows/port.cc']
+
+ if env['OURPLATFORM'] in ('win32-vc', 'win64-vc'):
+ cflags_libmv.append('/Od')
+ ccflags_libmv.append('/Od')
+ cxxflags_libmv.append('/Od')
+
+ if not env['BF_DEBUG']:
+ defs.append('NDEBUG')
+ else:
+ if not env['BF_DEBUG']:
+ cflags_libmv = Split(env['REL_CFLAGS'])
+ ccflags_libmv = Split(env['REL_CCFLAGS'])
+ cxxflags_libmv = Split(env['REL_CXXFLAGS'])
+else:
+ src += env.Glob("third_party/glog/src/*.cc")
+ incs += ' ./third_party/glog/src'
+ if not env['BF_DEBUG']:
+ cflags_libmv = Split(env['REL_CFLAGS'])
+ ccflags_libmv = Split(env['REL_CCFLAGS'])
+ cxxflags_libmv = Split(env['REL_CXXFLAGS'])
+
+incs += ' ./third_party/ssba ./third_party/ldl/Include ../colamd/Include'
+
+env.BlenderLib ( libname = 'extern_libmv', sources=src, includes=Split(incs), defines=defs, libtype=['extern', 'player'], priority=[20,137], compileflags=cflags_libmv, cc_compileflags=ccflags_libmv, cxx_compileflags=cxxflags_libmv )
diff --git a/extern/libmv/bundle.sh b/extern/libmv/bundle.sh
new file mode 100755
index 00000000000..ff84c5ddc52
--- /dev/null
+++ b/extern/libmv/bundle.sh
@@ -0,0 +1,247 @@
+#!/bin/sh
+
+#BRANCH="keir"
+BRANCH="Matthias-Fauconneau"
+
+if [ -d ./.svn ]; then
+ echo "This script is supposed to work only when using git-svn"
+ exit 1
+fi
+
+repo="git://github.com/${BRANCH}/libmv.git"
+tmp=`mktemp -d`
+
+git clone $repo $tmp/libmv
+
+#git --git-dir $tmp/libmv/.git --work-tree $tmp/libmv log --since="1 month ago" > ChangeLog
+git --git-dir $tmp/libmv/.git --work-tree $tmp/libmv log -n 50 > ChangeLog
+
+for p in `cat ./patches/series`; do
+ echo "Applying patch $p..."
+ cat ./patches/$p | patch -d $tmp/libmv -p1
+done
+
+rm -rf libmv
+rm -rf third_party
+
+cat "files.txt" | while f=`line`; do
+ mkdir -p `dirname $f`
+ cp $tmp/libmv/src/$f $f
+done
+
+rm -rf $tmp
+
+chmod 664 ./third_party/glog/src/windows/*.cc ./third_party/glog/src/windows/*.h ./third_party/glog/src/windows/glog/*.h
+
+sources=`find ./libmv -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//\t/'`
+headers=`find ./libmv -type f -iname '*.h' | sed -r 's/^\.\//\t/'`
+
+third_sources=`find ./third_party -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | grep -v glog | sed -r 's/^\.\//\t/'`
+third_headers=`find ./third_party -type f -iname '*.h' | grep -v glog | sed -r 's/^\.\//\t/'`
+
+third_glog_sources=`find ./third_party -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | grep glog | grep -v windows | sed -r 's/^\.\//\t\t/'`
+third_glog_headers=`find ./third_party -type f -iname '*.h' | grep glog | grep -v windows | sed -r 's/^\.\//\t\t/'`
+
+src_dir=`find ./libmv -type f -iname '*.cc' -exec dirname {} \; -or -iname '*.cpp' -exec dirname {} \; -or -iname '*.c' -exec dirname {} \; | sed -r 's/^\.\//\t/' | sort | uniq`
+src_third_dir=`find ./third_party -type f -iname '*.cc' -exec dirname {} \; -or -iname '*.cpp' -exec dirname {} \; -or -iname '*.c' -exec dirname {} \; | sed -r 's/^\.\//\t/' | sort | uniq`
+src=""
+win_src=""
+for x in $src_dir $src_third_dir; do
+ t=""
+
+ if test `echo "$x" | grep -c glog ` -eq 1; then
+ continue;
+ fi
+
+ if stat $x/*.cpp > /dev/null 2>&1; then
+ t="src += env.Glob('`echo $x'/*.cpp'`')"
+ fi
+
+ if stat $x/*.c > /dev/null 2>&1; then
+ if [ -z "$t" ]; then
+ t="src += env.Glob('`echo $x'/*.c'`')"
+ else
+ t="$t + env.Glob('`echo $x'/*.c'`')"
+ fi
+ fi
+
+ if stat $x/*.cc > /dev/null 2>&1; then
+ if [ -z "$t" ]; then
+ t="src += env.Glob('`echo $x'/*.cc'`')"
+ else
+ t="$t + env.Glob('`echo $x'/*.cc'`')"
+ fi
+ fi
+
+ if test `echo $x | grep -c windows ` -eq 0; then
+ if [ -z "$src" ]; then
+ src=$t
+ else
+ src=`echo "$src\n$t"`
+ fi
+ else
+ if [ -z "$win_src" ]; then
+ win_src=`echo " $t"`
+ else
+ win_src=`echo "$win_src\n $t"`
+ fi
+ fi
+done
+
+cat > CMakeLists.txt << EOF
+# \$Id\$
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# The Original Code is Copyright (C) 2011, Blender Foundation
+# All rights reserved.
+#
+# Contributor(s): Blender Foundation,
+# Sergey Sharybin
+#
+# ***** END GPL LICENSE BLOCK *****
+
+set(INC
+ .
+ ../Eigen3
+ ./third_party/ssba
+ ./third_party/ldl/Include
+ ../colamd/Include
+)
+
+set(INC_SYS
+
+)
+
+set(SRC
+ libmv-capi.cpp
+${sources}
+
+${third_sources}
+
+ libmv-capi.h
+${headers}
+
+${third_headers}
+)
+
+IF(WIN32)
+ list(APPEND SRC
+ third_party/glog/src/logging.cc
+ third_party/glog/src/raw_logging.cc
+ third_party/glog/src/utilities.cc
+ third_party/glog/src/vlog_is_on.cc
+ third_party/glog/src/windows/port.cc
+
+ third_party/glog/src/utilities.h
+ third_party/glog/src/stacktrace_generic-inl.h
+ third_party/glog/src/stacktrace.h
+ third_party/glog/src/stacktrace_x86_64-inl.h
+ third_party/glog/src/base/googleinit.h
+ third_party/glog/src/base/mutex.h
+ third_party/glog/src/base/commandlineflags.h
+ third_party/glog/src/stacktrace_powerpc-inl.h
+ third_party/glog/src/stacktrace_x86-inl.h
+ third_party/glog/src/config.h
+ third_party/glog/src/stacktrace_libunwind-inl.h
+ third_party/glog/src/windows/glog/raw_logging.h
+ third_party/glog/src/windows/glog/vlog_is_on.h
+ third_party/glog/src/windows/glog/logging.h
+ third_party/glog/src/windows/glog/log_severity.h
+ third_party/glog/src/windows/port.h
+ third_party/glog/src/windows/config.h
+ )
+
+ list(APPEND INC
+ ./third_party/glog/src/windows
+ ./third_party/msinttypes
+ )
+
+ IF(MSVC)
+ set(MSVC_OFLAGS O1 O2 Ox)
+ foreach(FLAG ${MSVC_OFLAGS})
+ string(REPLACE "${FLAG}" "Od" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}")
+ string(REPLACE "${FLAG}" "Od" CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO}")
+ endforeach()
+ ENDIF(MSVC)
+ELSE(WIN32)
+ list(APPEND SRC
+${third_glog_sources}
+
+${third_glog_headers}
+ )
+
+ list(APPEND INC
+ ./third_party/glog/src
+ )
+ENDIF(WIN32)
+
+add_definitions(-DV3DLIB_ENABLE_SUITESPARSE -DGOOGLE_GLOG_DLL_DECL=)
+
+blender_add_lib(extern_libmv "\${SRC}" "\${INC}" "\${INC_SYS}")
+EOF
+
+cat > SConscript << EOF
+#!/usr/bin/python
+import sys
+import os
+
+Import('env')
+
+defs = []
+
+cflags_libmv = Split(env['CFLAGS'])
+ccflags_libmv = Split(env['CCFLAGS'])
+cxxflags_libmv = Split(env['CXXFLAGS'])
+
+defs.append('V3DLIB_ENABLE_SUITESPARSE')
+defs.append('GOOGLE_GLOG_DLL_DECL=')
+
+src = env.Glob("*.cpp")
+$src
+
+incs = '. ../Eigen3'
+
+if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'linuxcross', 'win64-vc'):
+ incs += ' ./third_party/glog/src/windows ./third_party/glog/src/windows/glog ./third_party/msinttypes'
+${win_src}
+ src += ['./third_party/glog/src/logging.cc', './third_party/glog/src/raw_logging.cc', './third_party/glog/src/utilities.cc', './third_party/glog/src/vlog_is_on.cc']
+ src += ['./third_party/glog/src/windows/port.cc']
+
+ if env['OURPLATFORM'] in ('win32-vc', 'win64-vc'):
+ cflags_libmv.append('/Od')
+ ccflags_libmv.append('/Od')
+ cxxflags_libmv.append('/Od')
+
+ if not env['BF_DEBUG']:
+ defs.append('NDEBUG')
+ else:
+ if not env['BF_DEBUG']:
+ cflags_libmv = Split(env['REL_CFLAGS'])
+ ccflags_libmv = Split(env['REL_CCFLAGS'])
+ cxxflags_libmv = Split(env['REL_CXXFLAGS'])
+else:
+ src += env.Glob("third_party/glog/src/*.cc")
+ incs += ' ./third_party/glog/src'
+ if not env['BF_DEBUG']:
+ cflags_libmv = Split(env['REL_CFLAGS'])
+ ccflags_libmv = Split(env['REL_CCFLAGS'])
+ cxxflags_libmv = Split(env['REL_CXXFLAGS'])
+
+incs += ' ./third_party/ssba ./third_party/ldl/Include ../colamd/Include'
+
+env.BlenderLib ( libname = 'extern_libmv', sources=src, includes=Split(incs), defines=defs, libtype=['extern', 'player'], priority=[20,137], compileflags=cflags_libmv, cc_compileflags=ccflags_libmv, cxx_compileflags=cxxflags_libmv )
+EOF
diff --git a/extern/libmv/files.txt b/extern/libmv/files.txt
new file mode 100644
index 00000000000..fe6be5d0b20
--- /dev/null
+++ b/extern/libmv/files.txt
@@ -0,0 +1,141 @@
+libmv/logging/logging.h
+libmv/numeric/dogleg.h
+libmv/numeric/levenberg_marquardt.h
+libmv/numeric/poly.h
+libmv/numeric/numeric.cc
+libmv/numeric/function_derivative.h
+libmv/numeric/poly.cc
+libmv/numeric/tinyvector.cc
+libmv/numeric/numeric.h
+libmv/simple_pipeline/reconstruction.cc
+libmv/simple_pipeline/resect.h
+libmv/simple_pipeline/resect.cc
+libmv/simple_pipeline/reconstruction.h
+libmv/simple_pipeline/camera_intrinsics.h
+libmv/simple_pipeline/intersect.cc
+libmv/simple_pipeline/initialize_reconstruction.cc
+libmv/simple_pipeline/camera_intrinsics.cc
+libmv/simple_pipeline/pipeline.cc
+libmv/simple_pipeline/tracks.h
+libmv/simple_pipeline/detect.h
+libmv/simple_pipeline/detect.cc
+libmv/simple_pipeline/pipeline.h
+libmv/simple_pipeline/tracks.cc
+libmv/simple_pipeline/bundle.cc
+libmv/simple_pipeline/intersect.h
+libmv/simple_pipeline/bundle.h
+libmv/simple_pipeline/initialize_reconstruction.h
+libmv/image/convolve.h
+libmv/image/tuple.h
+libmv/image/array_nd.h
+libmv/image/convolve.cc
+libmv/image/array_nd.cc
+libmv/image/sample.h
+libmv/image/image.h
+libmv/tracking/pyramid_region_tracker.cc
+libmv/tracking/region_tracker.h
+libmv/tracking/sad.cc
+libmv/tracking/trklt_region_tracker.cc
+libmv/tracking/klt_region_tracker.cc
+libmv/tracking/retrack_region_tracker.h
+libmv/tracking/sad.h
+libmv/tracking/pyramid_region_tracker.h
+libmv/tracking/trklt_region_tracker.h
+libmv/tracking/retrack_region_tracker.cc
+libmv/tracking/klt_region_tracker.h
+libmv/base/id_generator.h
+libmv/base/vector.h
+libmv/base/scoped_ptr.h
+libmv/base/vector_utils.h
+libmv/multiview/projection.cc
+libmv/multiview/conditioning.cc
+libmv/multiview/nviewtriangulation.h
+libmv/multiview/resection.h
+libmv/multiview/fundamental.cc
+libmv/multiview/euclidean_resection.cc
+libmv/multiview/euclidean_resection.h
+libmv/multiview/triangulation.h
+libmv/multiview/projection.h
+libmv/multiview/triangulation.cc
+libmv/multiview/fundamental.h
+libmv/multiview/conditioning.h
+third_party/ssba/README.TXT
+third_party/ssba/COPYING.TXT
+third_party/ssba/Geometry/v3d_metricbundle.h
+third_party/ssba/Geometry/v3d_metricbundle.cpp
+third_party/ssba/Geometry/v3d_cameramatrix.h
+third_party/ssba/Geometry/v3d_distortion.h
+third_party/ssba/README.libmv
+third_party/ssba/Math/v3d_linear_utils.h
+third_party/ssba/Math/v3d_optimization.h
+third_party/ssba/Math/v3d_mathutilities.h
+third_party/ssba/Math/v3d_linear.h
+third_party/ssba/Math/v3d_optimization.cpp
+third_party/gflags/gflags_completions.h
+third_party/gflags/mutex.h
+third_party/gflags/gflags.cc
+third_party/gflags/gflags_reporting.cc
+third_party/gflags/README.libmv
+third_party/gflags/config.h
+third_party/gflags/gflags_completions.cc
+third_party/gflags/gflags.h
+third_party/fast/fast_9.c
+third_party/fast/fast_10.c
+third_party/fast/fast_11.c
+third_party/fast/fast.h
+third_party/fast/LICENSE
+third_party/fast/fast_12.c
+third_party/fast/fast.c
+third_party/fast/README
+third_party/fast/README.libmv
+third_party/fast/nonmax.c
+third_party/ldl/Include/ldl.h
+third_party/ldl/CMakeLists.txt
+third_party/ldl/README.libmv
+third_party/ldl/Doc/ChangeLog
+third_party/ldl/Doc/lesser.txt
+third_party/ldl/README.txt
+third_party/ldl/Source/ldl.c
+third_party/glog/ChangeLog
+third_party/glog/COPYING
+third_party/glog/src/utilities.cc
+third_party/glog/src/utilities.h
+third_party/glog/src/symbolize.cc
+third_party/glog/src/stacktrace_generic-inl.h
+third_party/glog/src/config_mac.h
+third_party/glog/src/vlog_is_on.cc
+third_party/glog/src/signalhandler.cc
+third_party/glog/src/stacktrace.h
+third_party/glog/src/stacktrace_x86_64-inl.h
+third_party/glog/src/symbolize.h
+third_party/glog/src/base/googleinit.h
+third_party/glog/src/base/mutex.h
+third_party/glog/src/base/commandlineflags.h
+third_party/glog/src/windows/preprocess.sh
+third_party/glog/src/windows/port.h
+third_party/glog/src/windows/config.h
+third_party/glog/src/windows/glog/raw_logging.h
+third_party/glog/src/windows/glog/vlog_is_on.h
+third_party/glog/src/windows/glog/logging.h
+third_party/glog/src/windows/glog/log_severity.h
+third_party/glog/src/windows/port.cc
+third_party/glog/src/logging.cc
+third_party/glog/src/stacktrace_powerpc-inl.h
+third_party/glog/src/stacktrace_x86-inl.h
+third_party/glog/src/demangle.cc
+third_party/glog/src/config.h
+third_party/glog/src/demangle.h
+third_party/glog/src/stacktrace_libunwind-inl.h
+third_party/glog/src/glog/raw_logging.h
+third_party/glog/src/glog/vlog_is_on.h
+third_party/glog/src/glog/logging.h
+third_party/glog/src/glog/log_severity.h
+third_party/glog/src/raw_logging.cc
+third_party/glog/src/config_linux.h
+third_party/glog/NEWS
+third_party/glog/README
+third_party/glog/README.libmv
+third_party/glog/AUTHORS
+third_party/msinttypes/stdint.h
+third_party/msinttypes/inttypes.h
+third_party/msinttypes/README.libmv
diff --git a/extern/libmv/libmv-capi.cpp b/extern/libmv/libmv-capi.cpp
new file mode 100644
index 00000000000..c3d3c02b043
--- /dev/null
+++ b/extern/libmv/libmv-capi.cpp
@@ -0,0 +1,707 @@
+/*
+ * $Id$
+ *
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/* define this to generate PNG images with content of search areas
+ tracking between which failed */
+#undef DUMP_FAILURE
+
+#include "libmv-capi.h"
+
+#include "glog/logging.h"
+#include "Math/v3d_optimization.h"
+
+#include "libmv/tracking/klt_region_tracker.h"
+#include "libmv/tracking/trklt_region_tracker.h"
+#include "libmv/tracking/pyramid_region_tracker.h"
+#include "libmv/tracking/retrack_region_tracker.h"
+
+#include "libmv/tracking/sad.h"
+
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/initialize_reconstruction.h"
+#include "libmv/simple_pipeline/bundle.h"
+#include "libmv/simple_pipeline/detect.h"
+#include "libmv/simple_pipeline/pipeline.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+
+#include <stdlib.h>
+
+#ifdef DUMP_FAILURE
+# include <png.h>
+#endif
+
+#ifdef _MSC_VER
+# define snprintf _snprintf
+#endif
+
+#define DEFAULT_WINDOW_HALFSIZE 5
+
+typedef struct libmv_RegionTracker {
+ libmv::TrkltRegionTracker *trklt_region_tracker;
+ libmv::PyramidRegionTracker *pyramid_region_tracker;
+ libmv::RegionTracker *region_tracker;
+} libmv_RegionTracker;
+
+typedef struct libmv_Reconstruction {
+ libmv::EuclideanReconstruction reconstruction;
+
+ /* used for per-track average error calculation after reconstruction */
+ libmv::Tracks tracks;
+ libmv::CameraIntrinsics intrinsics;
+
+ double error;
+} libmv_Reconstruction;
+
+typedef struct libmv_Features {
+ int count, margin;
+ libmv::Feature *features;
+} libmv_Features;
+
+/* ************ Logging ************ */
+
+void libmv_initLogging(const char *argv0)
+{
+ google::InitGoogleLogging(argv0);
+ google::SetCommandLineOption("logtostderr", "1");
+ google::SetCommandLineOption("v", "0");
+ google::SetCommandLineOption("stderrthreshold", "7");
+ google::SetCommandLineOption("minloglevel", "7");
+ V3D::optimizerVerbosenessLevel = 0;
+}
+
+void libmv_startDebugLogging(void)
+{
+ google::SetCommandLineOption("logtostderr", "1");
+ google::SetCommandLineOption("v", "0");
+ google::SetCommandLineOption("stderrthreshold", "1");
+ google::SetCommandLineOption("minloglevel", "0");
+ V3D::optimizerVerbosenessLevel = 1;
+}
+
+void libmv_setLoggingVerbosity(int verbosity)
+{
+ char val[10];
+ snprintf(val, sizeof(val), "%d", verbosity);
+
+ google::SetCommandLineOption("v", val);
+ V3D::optimizerVerbosenessLevel = verbosity;
+}
+
+/* ************ RegionTracker ************ */
+
+libmv_RegionTracker *libmv_regionTrackerNew(int max_iterations, int pyramid_level, double tolerance)
+{
+ libmv::RegionTracker *region_tracker;
+ libmv::TrkltRegionTracker *trklt_region_tracker = new libmv::TrkltRegionTracker;
+
+ trklt_region_tracker->half_window_size = DEFAULT_WINDOW_HALFSIZE;
+ trklt_region_tracker->max_iterations = max_iterations;
+
+ libmv::PyramidRegionTracker *pyramid_region_tracker =
+ new libmv::PyramidRegionTracker(trklt_region_tracker, pyramid_level);
+
+ region_tracker = new libmv::RetrackRegionTracker(pyramid_region_tracker, tolerance);
+
+ libmv_RegionTracker *configured_region_tracker = new libmv_RegionTracker;
+ configured_region_tracker->trklt_region_tracker = trklt_region_tracker;
+ configured_region_tracker->pyramid_region_tracker = pyramid_region_tracker;
+ configured_region_tracker->region_tracker = region_tracker;
+
+ return configured_region_tracker;
+}
+
+static void floatBufToImage(const float *buf, int width, int height, libmv::FloatImage *image)
+{
+ int x, y, a = 0;
+
+ image->resize(height, width);
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ (*image)(y, x, 0) = buf[a++];
+ }
+ }
+}
+
+#ifdef DUMP_FAILURE
+void savePNGImage(png_bytep *row_pointers, int width, int height, int depth, int color_type, char *file_name)
+{
+ png_infop info_ptr;
+ png_structp png_ptr;
+ FILE *fp = fopen(file_name, "wb");
+
+ if (!fp)
+ return;
+
+ /* Initialize stuff */
+ png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
+ info_ptr = png_create_info_struct(png_ptr);
+
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return;
+ }
+
+ png_init_io(png_ptr, fp);
+
+ /* write header */
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return;
+ }
+
+ png_set_IHDR(png_ptr, info_ptr, width, height,
+ depth, color_type, PNG_INTERLACE_NONE,
+ PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
+
+ png_write_info(png_ptr, info_ptr);
+
+ /* write bytes */
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return;
+ }
+
+ png_write_image(png_ptr, row_pointers);
+
+ /* end write */
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return;
+ }
+
+ png_write_end(png_ptr, NULL);
+
+ fclose(fp);
+}
+
+static void saveImage(libmv::FloatImage image, int x0, int y0)
+{
+ int x, y;
+ png_bytep *row_pointers;
+
+ row_pointers= (png_bytep*)malloc(sizeof(png_bytep)*image.Height());
+
+ for (y = 0; y < image.Height(); y++) {
+ row_pointers[y]= (png_bytep)malloc(sizeof(png_byte)*4*image.Width());
+
+ for (x = 0; x < image.Width(); x++) {
+ if (x0 == x && y0 == y) {
+ row_pointers[y][x*4+0]= 255;
+ row_pointers[y][x*4+1]= 0;
+ row_pointers[y][x*4+2]= 0;
+ row_pointers[y][x*4+3]= 255;
+ }
+ else {
+ float pixel = image(y, x, 0);
+ row_pointers[y][x*4+0]= pixel*255;
+ row_pointers[y][x*4+1]= pixel*255;
+ row_pointers[y][x*4+2]= pixel*255;
+ row_pointers[y][x*4+3]= 255;
+ }
+ }
+ }
+
+ {
+ static int a= 0;
+ char buf[128];
+ snprintf(buf, sizeof(buf), "%02d.png", ++a);
+ savePNGImage(row_pointers, image.Width(), image.Height(), 8, PNG_COLOR_TYPE_RGBA, buf);
+ }
+
+ for (y = 0; y < image.Height(); y++) {
+ free(row_pointers[y]);
+ }
+ free(row_pointers);
+}
+
+static void saveBytesImage(unsigned char *data, int width, int height)
+{
+ int x, y;
+ png_bytep *row_pointers;
+
+ row_pointers= (png_bytep*)malloc(sizeof(png_bytep)*height);
+
+ for (y = 0; y < height; y++) {
+ row_pointers[y]= (png_bytep)malloc(sizeof(png_byte)*4*width);
+
+ for (x = 0; x < width; x++) {
+ char pixel = data[width*y+x];
+ row_pointers[y][x*4+0]= pixel;
+ row_pointers[y][x*4+1]= pixel;
+ row_pointers[y][x*4+2]= pixel;
+ row_pointers[y][x*4+3]= 255;
+ }
+ }
+
+ {
+ static int a= 0;
+ char buf[128];
+ snprintf(buf, sizeof(buf), "%02d.png", ++a);
+ savePNGImage(row_pointers, width, height, 8, PNG_COLOR_TYPE_RGBA, buf);
+ }
+
+ for (y = 0; y < height; y++) {
+ free(row_pointers[y]);
+ }
+ free(row_pointers);
+}
+#endif
+
+int libmv_regionTrackerTrack(libmv_RegionTracker *libmv_tracker, const float *ima1, const float *ima2,
+ int width, int height, int half_window_size,
+ double x1, double y1, double *x2, double *y2)
+{
+ libmv::RegionTracker *region_tracker;
+ libmv::TrkltRegionTracker *trklt_region_tracker;
+ libmv::FloatImage old_patch, new_patch;
+
+ trklt_region_tracker = libmv_tracker->trklt_region_tracker;
+ region_tracker = libmv_tracker->region_tracker;
+
+ trklt_region_tracker->half_window_size = half_window_size;
+
+ floatBufToImage(ima1, width, height, &old_patch);
+ floatBufToImage(ima2, width, height, &new_patch);
+
+#ifndef DUMP_FAILURE
+ return region_tracker->Track(old_patch, new_patch, x1, y1, x2, y2);
+#else
+ {
+ double sx2 = *x2, sy2 = *y2;
+ int result = region_tracker->Track(old_patch, new_patch, x1, y1, x2, y2);
+
+ if (!result) {
+ saveImage(old_patch, x1, y1);
+ saveImage(new_patch, sx2, sy2);
+ }
+
+ return result;
+ }
+#endif
+}
+
+void libmv_regionTrackerDestroy(libmv_RegionTracker *libmv_tracker)
+{
+ delete libmv_tracker->region_tracker;
+ delete libmv_tracker;
+}
+
+/* ************ Tracks ************ */
+
+void libmv_SADSamplePattern(unsigned char *image, int stride,
+ float warp[3][2], unsigned char *pattern)
+{
+ libmv::mat32 mat32;
+
+ memcpy(mat32.data, warp, sizeof(float)*3*2);
+
+ libmv::SamplePattern(image, stride, mat32, pattern, 16);
+}
+
+float libmv_SADTrackerTrack(unsigned char *pattern, unsigned char *warped, unsigned char *image, int stride,
+ int width, int height, float warp[3][2])
+{
+ float result;
+ libmv::mat32 mat32;
+
+ memcpy(mat32.data, warp, sizeof(float)*3*2);
+
+ result = libmv::Track(pattern, warped, 16, image, stride, width, height, &mat32, 16, 16);
+
+ memcpy(warp, mat32.data, sizeof(float)*3*2);
+
+ return result;
+}
+
+/* ************ Tracks ************ */
+
+libmv_Tracks *libmv_tracksNew(void)
+{
+ libmv::Tracks *libmv_tracks = new libmv::Tracks();
+
+ return (libmv_Tracks *)libmv_tracks;
+}
+
+void libmv_tracksInsert(struct libmv_Tracks *libmv_tracks, int image, int track, double x, double y)
+{
+ ((libmv::Tracks*)libmv_tracks)->Insert(image, track, x, y);
+}
+
+void libmv_tracksDestroy(libmv_Tracks *libmv_tracks)
+{
+ delete (libmv::Tracks*)libmv_tracks;
+}
+
+/* ************ Reconstruction solver ************ */
+
+libmv_Reconstruction *libmv_solveReconstruction(libmv_Tracks *tracks, int keyframe1, int keyframe2,
+ double focal_length, double principal_x, double principal_y, double k1, double k2, double k3)
+{
+ /* Invert the camera intrinsics. */
+ libmv::vector<libmv::Marker> markers = ((libmv::Tracks*)tracks)->AllMarkers();
+ libmv_Reconstruction *libmv_reconstruction = new libmv_Reconstruction();
+ libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
+ libmv::CameraIntrinsics *intrinsics = &libmv_reconstruction->intrinsics;
+
+ intrinsics->SetFocalLength(focal_length, focal_length);
+ intrinsics->SetPrincipalPoint(principal_x, principal_y);
+ intrinsics->SetRadialDistortion(k1, k2, k3);
+
+ if(focal_length) {
+ /* do a lens undistortion if focal length is non-zero only */
+ for (int i = 0; i < markers.size(); ++i) {
+ intrinsics->InvertIntrinsics(markers[i].x,
+ markers[i].y,
+ &(markers[i].x),
+ &(markers[i].y));
+ }
+ }
+
+ libmv::Tracks normalized_tracks(markers);
+
+ libmv::vector<libmv::Marker> keyframe_markers =
+ normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
+
+ libmv::EuclideanReconstructTwoFrames(keyframe_markers, reconstruction);
+ libmv::EuclideanBundle(normalized_tracks, reconstruction);
+ libmv::EuclideanCompleteReconstruction(normalized_tracks, reconstruction);
+
+ libmv_reconstruction->tracks = *(libmv::Tracks *)tracks;
+ libmv_reconstruction->error = libmv::EuclideanReprojectionError(*(libmv::Tracks *)tracks, *reconstruction, *intrinsics);
+
+ return (libmv_Reconstruction *)libmv_reconstruction;
+}
+
+int libmv_reporojectionPointForTrack(libmv_Reconstruction *libmv_reconstruction, int track, double pos[3])
+{
+ libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
+ libmv::EuclideanPoint *point = reconstruction->PointForTrack(track);
+
+ if(point) {
+ pos[0] = point->X[0];
+ pos[1] = point->X[2];
+ pos[2] = point->X[1];
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static libmv::Marker ProjectMarker(const libmv::EuclideanPoint &point, const libmv::EuclideanCamera &camera,
+ const libmv::CameraIntrinsics &intrinsics) {
+ libmv::Vec3 projected = camera.R * point.X + camera.t;
+ projected /= projected(2);
+
+ libmv::Marker reprojected_marker;
+ intrinsics.ApplyIntrinsics(projected(0), projected(1), &reprojected_marker.x, &reprojected_marker.y);
+
+ reprojected_marker.image = camera.image;
+ reprojected_marker.track = point.track;
+
+ return reprojected_marker;
+}
+
+double libmv_reporojectionErrorForTrack(libmv_Reconstruction *libmv_reconstruction, int track)
+{
+ libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
+ libmv::CameraIntrinsics *intrinsics = &libmv_reconstruction->intrinsics;
+ libmv::vector<libmv::Marker> markers = libmv_reconstruction->tracks.MarkersForTrack(track);
+
+ int num_reprojected = 0;
+ double total_error = 0.0;
+
+ for (int i = 0; i < markers.size(); ++i) {
+ const libmv::EuclideanCamera *camera = reconstruction->CameraForImage(markers[i].image);
+ const libmv::EuclideanPoint *point = reconstruction->PointForTrack(markers[i].track);
+
+ if (!camera || !point) {
+ continue;
+ }
+
+ num_reprojected++;
+
+ libmv::Marker reprojected_marker = ProjectMarker(*point, *camera, *intrinsics);
+ double ex = reprojected_marker.x - markers[i].x;
+ double ey = reprojected_marker.y - markers[i].y;
+
+ total_error += sqrt(ex*ex + ey*ey);
+ }
+
+ return total_error / num_reprojected;
+}
+
+int libmv_reporojectionCameraForImage(libmv_Reconstruction *libmv_reconstruction, int image, double mat[4][4])
+{
+ libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
+ libmv::EuclideanCamera *camera = reconstruction->CameraForImage(image);
+
+ if(camera) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ int l = k;
+
+ if (k == 1) l = 2;
+ else if (k == 2) l = 1;
+
+ if (j == 2) mat[j][l] = -camera->R(j,k);
+ else mat[j][l] = camera->R(j,k);
+ }
+ mat[j][3]= 0.0;
+ }
+
+ libmv::Vec3 optical_center = -camera->R.transpose() * camera->t;
+
+ mat[3][0] = optical_center(0);
+ mat[3][1] = optical_center(2);
+ mat[3][2] = optical_center(1);
+
+ mat[3][3]= 1.0;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+double libmv_reprojectionError(libmv_Reconstruction *libmv_reconstruction)
+{
+ return libmv_reconstruction->error;
+}
+
+void libmv_destroyReconstruction(libmv_Reconstruction *libmv_reconstruction)
+{
+ delete libmv_reconstruction;
+}
+
+/* ************ feature detector ************ */
+
+struct libmv_Features *libmv_detectFeatures(unsigned char *data, int width, int height, int stride,
+ int margin, int count, int min_distance)
+{
+ libmv::Feature *features = new libmv::Feature[count];
+ libmv_Features *libmv_features = new libmv_Features;
+
+ if(margin) {
+ data += margin*stride+margin;
+ width -= 2*margin;
+ height -= 2*margin;
+ }
+
+ libmv::Detect(data, stride, width, height, features, &count, min_distance, NULL);
+
+ libmv_features->count= count;
+ libmv_features->margin= margin;
+ libmv_features->features= features;
+
+ return libmv_features ;
+}
+
+int libmv_countFeatures(struct libmv_Features *libmv_features)
+{
+ return libmv_features->count;
+}
+
+void libmv_getFeature(struct libmv_Features *libmv_features, int number, double *x, double *y, double *score, double *size)
+{
+ libmv::Feature feature = libmv_features->features[number];
+
+ *x = feature.x + libmv_features->margin;
+ *y = feature.y + libmv_features->margin;
+ *score = feature.score;
+ *size = feature.size;
+}
+
+void libmv_destroyFeatures(struct libmv_Features *libmv_features)
+{
+ delete (libmv::Feature *)libmv_features;
+}
+
+/* ************ camera intrinsics ************ */
+
+struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsNew(double focal_length, double principal_x, double principal_y,
+ double k1, double k2, double k3, int width, int height)
+{
+ libmv::CameraIntrinsics *intrinsics= new libmv::CameraIntrinsics();
+
+ intrinsics->SetFocalLength(focal_length, focal_length);
+ intrinsics->SetPrincipalPoint(principal_x, principal_y);
+ intrinsics->SetRadialDistortion(k1, k2, k3);
+ intrinsics->SetImageSize(width, height);
+
+ return (struct libmv_CameraIntrinsics *) intrinsics;
+}
+
+struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsCopy(struct libmv_CameraIntrinsics *libmvIntrinsics)
+{
+ libmv::CameraIntrinsics *orig_intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
+ libmv::CameraIntrinsics *new_intrinsics= new libmv::CameraIntrinsics(*orig_intrinsics);
+
+ return (struct libmv_CameraIntrinsics *) new_intrinsics;
+}
+
+void libmv_CameraIntrinsicsDestroy(struct libmv_CameraIntrinsics *libmvIntrinsics)
+{
+ libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
+
+ delete intrinsics;
+}
+
+void libmv_CameraIntrinsicsUpdate(struct libmv_CameraIntrinsics *libmvIntrinsics, double focal_length,
+ double principal_x, double principal_y, double k1, double k2, double k3, int width, int height)
+{
+ libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
+
+ if (intrinsics->focal_length() != focal_length)
+ intrinsics->SetFocalLength(focal_length, focal_length);
+
+ if (intrinsics->principal_point_x() != principal_x || intrinsics->principal_point_y() != principal_y)
+ intrinsics->SetFocalLength(focal_length, focal_length);
+
+ if (intrinsics->k1() != k1 || intrinsics->k2() != k2 || intrinsics->k3() != k3)
+ intrinsics->SetRadialDistortion(k1, k2, k3);
+
+ if (intrinsics->image_width() != width || intrinsics->image_height() != height)
+ intrinsics->SetImageSize(width, height);
+}
+
+void libmv_CameraIntrinsicsUndistortByte(struct libmv_CameraIntrinsics *libmvIntrinsics,
+ unsigned char *src, unsigned char *dst, int width, int height, int channels)
+{
+ libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
+
+ intrinsics->Undistort(src, dst, width, height, channels);
+}
+
+void libmv_CameraIntrinsicsUndistortFloat(struct libmv_CameraIntrinsics *libmvIntrinsics,
+ float *src, float *dst, int width, int height, int channels)
+{
+ libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
+
+ intrinsics->Undistort(src, dst, width, height, channels);
+}
+
+void libmv_CameraIntrinsicsDistortByte(struct libmv_CameraIntrinsics *libmvIntrinsics,
+ unsigned char *src, unsigned char *dst, int width, int height, int channels)
+{
+ libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
+ intrinsics->Distort(src, dst, width, height, channels);
+}
+
+void libmv_CameraIntrinsicsDistortFloat(struct libmv_CameraIntrinsics *libmvIntrinsics,
+ float *src, float *dst, int width, int height, int channels)
+{
+ libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
+
+ intrinsics->Distort(src, dst, width, height, channels);
+}
+
+/* ************ distortion ************ */
+
+void libmv_undistortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ unsigned char *src, unsigned char *dst, int width, int height, int channels)
+{
+ libmv::CameraIntrinsics intrinsics;
+
+ intrinsics.SetFocalLength(focal_length, focal_length);
+ intrinsics.SetPrincipalPoint(principal_x, principal_y);
+ intrinsics.SetRadialDistortion(k1, k2, k3);
+
+ intrinsics.Undistort(src, dst, width, height, channels);
+}
+
+void libmv_undistortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ float *src, float *dst, int width, int height, int channels)
+{
+ libmv::CameraIntrinsics intrinsics;
+
+ intrinsics.SetFocalLength(focal_length, focal_length);
+ intrinsics.SetPrincipalPoint(principal_x, principal_y);
+ intrinsics.SetRadialDistortion(k1, k2, k3);
+
+ intrinsics.Undistort(src, dst, width, height, channels);
+}
+
+void libmv_distortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ unsigned char *src, unsigned char *dst, int width, int height, int channels)
+{
+ libmv::CameraIntrinsics intrinsics;
+
+ intrinsics.SetFocalLength(focal_length, focal_length);
+ intrinsics.SetPrincipalPoint(principal_x, principal_y);
+ intrinsics.SetRadialDistortion(k1, k2, k3);
+
+ intrinsics.Distort(src, dst, width, height, channels);
+}
+
+void libmv_distortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ float *src, float *dst, int width, int height, int channels)
+{
+ libmv::CameraIntrinsics intrinsics;
+
+ intrinsics.SetFocalLength(focal_length, focal_length);
+ intrinsics.SetPrincipalPoint(principal_x, principal_y);
+ intrinsics.SetRadialDistortion(k1, k2, k3);
+
+ intrinsics.Distort(src, dst, width, height, channels);
+}
+
+/* ************ utils ************ */
+
+void libmv_applyCameraIntrinsics(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ double x, double y, double *x1, double *y1)
+{
+ libmv::CameraIntrinsics intrinsics;
+
+ intrinsics.SetFocalLength(focal_length, focal_length);
+ intrinsics.SetPrincipalPoint(principal_x, principal_y);
+ intrinsics.SetRadialDistortion(k1, k2, k3);
+
+ if(focal_length) {
+ /* do a lens undistortion if focal length is non-zero only */
+
+ intrinsics.ApplyIntrinsics(x, y, x1, y1);
+ }
+}
+
+void libmv_InvertIntrinsics(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ double x, double y, double *x1, double *y1)
+{
+ libmv::CameraIntrinsics intrinsics;
+
+ intrinsics.SetFocalLength(focal_length, focal_length);
+ intrinsics.SetPrincipalPoint(principal_x, principal_y);
+ intrinsics.SetRadialDistortion(k1, k2, k3);
+
+ if(focal_length) {
+ /* do a lens distortion if focal length is non-zero only */
+
+ intrinsics.InvertIntrinsics(x, y, x1, y1);
+ }
+}
diff --git a/extern/libmv/libmv-capi.h b/extern/libmv/libmv-capi.h
new file mode 100644
index 00000000000..e0a40a1ad86
--- /dev/null
+++ b/extern/libmv/libmv-capi.h
@@ -0,0 +1,125 @@
+/*
+ * $Id$
+ *
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_H
+#define LIBMV_C_API_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct libmv_RegionTracker;
+struct libmv_Tracks;
+struct libmv_Reconstruction;
+struct libmv_Features;
+struct libmv_CameraIntrinsics;
+
+/* Logging */
+void libmv_initLogging(const char *argv0);
+void libmv_startDebugLogging(void);
+void libmv_setLoggingVerbosity(int verbosity);
+
+/* RegionTracker */
+struct libmv_RegionTracker *libmv_regionTrackerNew(int max_iterations, int pyramid_level, double tolerance);
+int libmv_regionTrackerTrack(struct libmv_RegionTracker *libmv_tracker, const float *ima1, const float *ima2,
+ int width, int height, int half_window_size,
+ double x1, double y1, double *x2, double *y2);
+void libmv_regionTrackerDestroy(struct libmv_RegionTracker *libmv_tracker);
+
+/* SAD Tracker */
+void libmv_SADSamplePattern(unsigned char *image, int stride,
+ float warp[3][2], unsigned char *pattern);
+float libmv_SADTrackerTrack(unsigned char *pattern, unsigned char *warped, unsigned char *image,
+ int stride, int width, int height, float warp[3][2]);
+
+/* Tracks */
+struct libmv_Tracks *libmv_tracksNew(void);
+void libmv_tracksInsert(struct libmv_Tracks *libmv_tracks, int image, int track, double x, double y);
+void libmv_tracksDestroy(struct libmv_Tracks *libmv_tracks);
+
+/* Reconstruction solver */
+struct libmv_Reconstruction *libmv_solveReconstruction(struct libmv_Tracks *tracks, int keyframe1, int keyframe2,
+ double focal_length, double principal_x, double principal_y, double k1, double k2, double k3);
+int libmv_reporojectionPointForTrack(struct libmv_Reconstruction *libmv_reconstruction, int track, double pos[3]);
+double libmv_reporojectionErrorForTrack(struct libmv_Reconstruction *libmv_reconstruction, int track);
+int libmv_reporojectionCameraForImage(struct libmv_Reconstruction *libmv_reconstruction, int image, double mat[4][4]);
+double libmv_reprojectionError(struct libmv_Reconstruction *libmv_reconstruction);
+void libmv_destroyReconstruction(struct libmv_Reconstruction *libmv_reconstruction);
+
+/* feature detector */
+struct libmv_Features *libmv_detectFeatures(unsigned char *data, int width, int height, int stride,
+ int margin, int count, int min_distance);
+int libmv_countFeatures(struct libmv_Features *libmv_features);
+void libmv_getFeature(struct libmv_Features *libmv_features, int number, double *x, double *y, double *score, double *size);
+void libmv_destroyFeatures(struct libmv_Features *libmv_features);
+
+/* camera intrinsics */
+struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsNew(double focal_length, double principal_x, double principal_y,
+ double k1, double k2, double k3, int width, int height);
+
+struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsCopy(struct libmv_CameraIntrinsics *libmvIntrinsics);
+
+void libmv_CameraIntrinsicsDestroy(struct libmv_CameraIntrinsics *libmvIntrinsics);
+
+void libmv_CameraIntrinsicsUpdate(struct libmv_CameraIntrinsics *libmvIntrinsics, double focal_length,
+ double principal_x, double principal_y, double k1, double k2, double k3, int width, int height);
+
+void libmv_CameraIntrinsicsUndistortByte(struct libmv_CameraIntrinsics *libmvIntrinsics,
+ unsigned char *src, unsigned char *dst, int width, int height, int channels);
+
+void libmv_CameraIntrinsicsUndistortFloat(struct libmv_CameraIntrinsics *libmvIntrinsics,
+ float *src, float *dst, int width, int height, int channels);
+
+void libmv_CameraIntrinsicsDistortByte(struct libmv_CameraIntrinsics *libmvIntrinsics,
+ unsigned char *src, unsigned char *dst, int width, int height, int channels);
+
+void libmv_CameraIntrinsicsDistortFloat(struct libmv_CameraIntrinsics *libmvIntrinsics,
+ float *src, float *dst, int width, int height, int channels);
+
+/* dsitortion */
+void libmv_undistortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ unsigned char *src, unsigned char *dst, int width, int height, int channels);
+void libmv_undistortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ float *src, float *dst, int width, int height, int channels);
+
+void libmv_distortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ unsigned char *src, unsigned char *dst, int width, int height, int channels);
+void libmv_distortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ float *src, float *dst, int width, int height, int channels);
+
+/* utils */
+void libmv_applyCameraIntrinsics(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ double x, double y, double *x1, double *y1);
+void libmv_InvertIntrinsics(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
+ double x, double y, double *x1, double *y1);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_H
diff --git a/extern/libmv/libmv/base/id_generator.h b/extern/libmv/libmv/base/id_generator.h
new file mode 100644
index 00000000000..bf1eafd218e
--- /dev/null
+++ b/extern/libmv/libmv/base/id_generator.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_ID_GENERATOR_H
+#define LIBMV_ID_GENERATOR_H
+
+namespace libmv {
+
+template <typename ID>
+class IdGenerator {
+ public:
+ IdGenerator() : next_(0) {}
+ ID Generate() { return next_++; }
+ private:
+ ID next_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_ID_GENERATOR_H
diff --git a/extern/libmv/libmv/base/scoped_ptr.h b/extern/libmv/libmv/base/scoped_ptr.h
new file mode 100644
index 00000000000..f1e89eb5625
--- /dev/null
+++ b/extern/libmv/libmv/base/scoped_ptr.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_BASE_SCOPED_PTR_H
+#define LIBMV_BASE_SCOPED_PTR_H
+
+namespace libmv {
+
+/**
+ * A handle for a heap-allocated resource that should be freed when it goes out
+ * of scope. This looks similar to the one found in TR1.
+ */
+template<typename T>
+class scoped_ptr {
+ public:
+ scoped_ptr(T *resource) : resource_(resource) {}
+ ~scoped_ptr() { reset(0); }
+
+ T *get() const { return resource_; }
+ T *operator->() const { return resource_; }
+ T &operator*() const { return *resource_; }
+
+ void reset(T *new_resource) {
+ if (sizeof(T)) {
+ delete resource_;
+ }
+ resource_ = new_resource;
+ }
+
+ T *release() {
+ T *released_resource = resource_;
+ resource_ = 0;
+ return released_resource;
+ }
+
+ private:
+ // No copying allowed.
+ T *resource_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_BASE_SCOPED_PTR_H
diff --git a/extern/libmv/libmv/base/vector.h b/extern/libmv/libmv/base/vector.h
new file mode 100644
index 00000000000..9dc48676629
--- /dev/null
+++ b/extern/libmv/libmv/base/vector.h
@@ -0,0 +1,172 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Get an aligned vector implementation. Must be included before <vector>. The
+// Eigen guys went through some trouble to make a portable override for the
+// fixed size vector types.
+
+#ifndef LIBMV_BASE_VECTOR_H
+#define LIBMV_BASE_VECTOR_H
+
+#include <cstring>
+#include <new>
+
+#include <Eigen/Core>
+
+namespace libmv {
+
+// A simple container class, which guarantees 16 byte alignment needed for most
+// vectorization. Don't use this container for classes that cannot be copied
+// via memcpy.
+// FIXME: this class has some issues:
+// - doesn't support iterators.
+// - impede compatibility with code using STL.
+// - the STL already provide support for custom allocators
+// it could be replaced with a simple
+// template <T> class vector : std::vector<T, aligned_allocator> {} declaration
+// provided it doesn't break code relying on libmv::vector specific behavior
+template <typename T,
+ typename Allocator = Eigen::aligned_allocator<T> >
+class vector {
+ public:
+ ~vector() { clear(); }
+
+ vector() { init(); }
+ vector(int size) { init(); resize(size); }
+ vector(int size, const T & val) {
+ init();
+ resize(size);
+ std::fill(data_, data_+size_, val); }
+
+ // Copy constructor and assignment.
+ vector(const vector<T, Allocator> &rhs) {
+ init();
+ copy(rhs);
+ }
+ vector<T, Allocator> &operator=(const vector<T, Allocator> &rhs) {
+ if (&rhs != this) {
+ copy(rhs);
+ }
+ return *this;
+ }
+
+ /// Swaps the contents of two vectors in constant time.
+ void swap(vector<T, Allocator> &other) {
+ std::swap(allocator_, other.allocator_);
+ std::swap(size_, other.size_);
+ std::swap(capacity_, other.capacity_);
+ std::swap(data_, other.data_);
+ }
+
+ T *data() const { return data_; }
+ int size() const { return size_; }
+ int capacity() const { return capacity_; }
+ const T& back() const { return data_[size_ - 1]; }
+ T& back() { return data_[size_ - 1]; }
+ const T& front() const { return data_[0]; }
+ T& front() { return data_[0]; }
+ const T& operator[](int n) const { return data_[n]; }
+ T& operator[](int n) { return data_[n]; }
+ const T * begin() const { return data_; }
+ const T * end() const { return data_+size_; }
+ T * begin() { return data_; }
+ T * end() { return data_+size_; }
+
+ void resize(size_t size) {
+ reserve(size);
+ if (size > size_) {
+ construct(size_, size);
+ } else if (size < size_) {
+ destruct(size, size_);
+ }
+ size_ = size;
+ }
+
+
+
+ void push_back(const T &value) {
+ if (size_ == capacity_) {
+ reserve(size_ ? 2 * size_ : 1);
+ }
+ new (&data_[size_++]) T(value);
+ }
+
+ void pop_back() {
+ resize(size_ - 1);
+ }
+
+ void clear() {
+ destruct(0, size_);
+ deallocate();
+ init();
+ }
+
+ void reserve(unsigned int size) {
+ if (size > size_) {
+ T *data = static_cast<T *>(allocate(size));
+ memcpy(data, data_, sizeof(*data)*size_);
+ allocator_.deallocate(data_, capacity_);
+ data_ = data;
+ capacity_ = size;
+ }
+ }
+
+ private:
+ void construct(int start, int end) {
+ for (int i = start; i < end; ++i) {
+ new (&data_[i]) T;
+ }
+ }
+ void destruct(int start, int end) {
+ for (int i = start; i < end; ++i) {
+ data_[i].~T();
+ }
+ }
+ void init() {
+ size_ = 0;
+ data_ = 0;
+ capacity_ = 0;
+ }
+
+ void *allocate(int size) {
+ return size ? allocator_.allocate(size) : 0;
+ }
+
+ void deallocate() {
+ allocator_.deallocate(data_, size_);
+ data_ = 0;
+ }
+
+ void copy(const vector<T, Allocator> &rhs) {
+ resize(rhs.size());
+ for (int i = 0; i < rhs.size(); ++i) {
+ (*this)[i] = rhs[i];
+ }
+ }
+
+ Allocator allocator_;
+ size_t size_;
+ size_t capacity_;
+ T *data_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_BASE_VECTOR_H
diff --git a/extern/libmv/libmv/base/vector_utils.h b/extern/libmv/libmv/base/vector_utils.h
new file mode 100644
index 00000000000..7a0c3ba24f5
--- /dev/null
+++ b/extern/libmv/libmv/base/vector_utils.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+
+#ifndef LIBMV_BASE_VECTOR_UTILS_H_
+#define LIBMV_BASE_VECTOR_UTILS_H_
+
+/// Delete the contents of a container.
+template <class Array>
+void DeleteElements(Array *array) {
+ for (int i = 0; i < array->size(); ++i) {
+ delete (*array)[i];
+ }
+ array->clear();
+}
+
+#endif // LIBMV_BASE_VECTOR_UTILS_H_
diff --git a/extern/libmv/libmv/image/array_nd.cc b/extern/libmv/libmv/image/array_nd.cc
new file mode 100644
index 00000000000..3a77e3e4881
--- /dev/null
+++ b/extern/libmv/libmv/image/array_nd.cc
@@ -0,0 +1,108 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/image/image.h"
+#include <iostream>
+#include <cmath>
+
+namespace libmv {
+
+void FloatArrayToScaledByteArray(const Array3Df &float_array,
+ Array3Du *byte_array,
+ bool automatic_range_detection
+ ) {
+ byte_array->ResizeLike(float_array);
+ float minval = HUGE_VAL;
+ float maxval = -HUGE_VAL;
+ if (automatic_range_detection) {
+ for (int i = 0; i < float_array.Height(); ++i) {
+ for (int j = 0; j < float_array.Width(); ++j) {
+ for (int k = 0; k < float_array.Depth(); ++k) {
+ minval = std::min(minval, float_array(i,j,k));
+ maxval = std::max(maxval, float_array(i,j,k));
+ }
+ }
+ }
+ } else {
+ minval = 0;
+ maxval = 1;
+ }
+ for (int i = 0; i < float_array.Height(); ++i) {
+ for (int j = 0; j < float_array.Width(); ++j) {
+ for (int k = 0; k < float_array.Depth(); ++k) {
+ float unscaled = (float_array(i,j,k) - minval) / (maxval - minval);
+ (*byte_array)(i,j,k) = (unsigned char)(255 * unscaled);
+ }
+ }
+ }
+}
+
+void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
+ Array3Df *float_array) {
+ float_array->ResizeLike(byte_array);
+ for (int i = 0; i < byte_array.Height(); ++i) {
+ for (int j = 0; j < byte_array.Width(); ++j) {
+ for (int k = 0; k < byte_array.Depth(); ++k) {
+ (*float_array)(i,j,k) = float(byte_array(i,j,k)) / 255.0f;
+ }
+ }
+ }
+}
+
+void SplitChannels(const Array3Df &input,
+ Array3Df *channel0,
+ Array3Df *channel1,
+ Array3Df *channel2) {
+ assert(input.Depth() >= 3);
+ channel0->Resize(input.Height(), input.Width());
+ channel1->Resize(input.Height(), input.Width());
+ channel2->Resize(input.Height(), input.Width());
+ for (int row = 0; row < input.Height(); ++row) {
+ for (int column = 0; column < input.Width(); ++column) {
+ (*channel0)(row, column) = input(row, column, 0);
+ (*channel1)(row, column) = input(row, column, 1);
+ (*channel2)(row, column) = input(row, column, 2);
+ }
+ }
+}
+
+void PrintArray(const Array3Df &array) {
+ using namespace std;
+
+ printf("[\n");
+ for (int r = 0; r < array.Height(); ++r) {
+ printf("[");
+ for (int c = 0; c < array.Width(); ++c) {
+ if (array.Depth() == 1) {
+ printf("%11f, ", array(r, c));
+ } else {
+ printf("[");
+ for (int k = 0; k < array.Depth(); ++k) {
+ printf("%11f, ", array(r, c, k));
+ }
+ printf("],");
+ }
+ }
+ printf("],\n");
+ }
+ printf("]\n");
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/image/array_nd.h b/extern/libmv/libmv/image/array_nd.h
new file mode 100644
index 00000000000..6d7570cda9b
--- /dev/null
+++ b/extern/libmv/libmv/image/array_nd.h
@@ -0,0 +1,473 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_ARRAY_ND_H
+#define LIBMV_IMAGE_ARRAY_ND_H
+
+#include <cassert>
+#include <cstdio>
+#include <cstring>
+
+#include "libmv/image/tuple.h"
+
+namespace libmv {
+
+class BaseArray {};
+
+/// A multidimensional array class.
+template <typename T, int N>
+class ArrayND : public BaseArray {
+ public:
+ typedef T Scalar;
+
+ /// Type for the multidimensional indices.
+ typedef Tuple<int, N> Index;
+
+ /// Create an empty array.
+ ArrayND() : data_(NULL), own_data(true) { Resize(Index(0)); }
+
+ /// Create an array with the specified shape.
+ ArrayND(const Index &shape) : data_(NULL), own_data(true) { Resize(shape); }
+
+ /// Create an array with the specified shape.
+ ArrayND(int *shape) : data_(NULL), own_data(true) { Resize(shape); }
+
+ /// Copy constructor.
+ ArrayND(const ArrayND<T, N> &b) : data_(NULL), own_data(true) {
+ ResizeLike(b);
+ std::memcpy(Data(), b.Data(), sizeof(T) * Size());
+ }
+
+ ArrayND(int s0) : data_(NULL), own_data(true) { Resize(s0); }
+ ArrayND(int s0, int s1) : data_(NULL), own_data(true) { Resize(s0, s1); }
+ ArrayND(int s0, int s1, int s2) : data_(NULL), own_data(true) { Resize(s0, s1, s2); }
+
+ ArrayND(T* data, int s0, int s1, int s2) : data_(data), own_data(false) { Resize(s0, s1, s2); }
+
+ /// Destructor deletes pixel data.
+ ~ArrayND() {
+ delete [] data_;
+ }
+
+ /// Assignation copies pixel data.
+ ArrayND &operator=(const ArrayND<T, N> &b) {
+ assert(this != &b);
+ ResizeLike(b);
+ std::memcpy(Data(), b.Data(), sizeof(T) * Size());
+ return *this;
+ }
+
+ const Index &Shapes() const {
+ return shape_;
+ }
+
+ const Index &Strides() const {
+ return strides_;
+ }
+
+ /// Create an array of shape s.
+ void Resize(const Index &new_shape) {
+ if (data_ != NULL && shape_ == new_shape) {
+ // Don't bother realloacting if the shapes match.
+ return;
+ }
+ shape_.Reset(new_shape);
+ strides_(N - 1) = 1;
+ for (int i = N - 1; i > 0; --i) {
+ strides_(i - 1) = strides_(i) * shape_(i);
+ }
+ if(own_data) {
+ delete [] data_;
+ data_ = NULL;
+ if (Size() > 0) {
+ data_ = new T[Size()];
+ }
+ }
+ }
+
+ template<typename D>
+ void ResizeLike(const ArrayND<D,N> &other) {
+ Resize(other.Shape());
+ }
+
+ /// Resizes the array to shape s. All data is lost.
+ void Resize(const int *new_shape_array) {
+ Resize(Index(new_shape_array));
+ }
+
+ /// Resize a 1D array to length s0.
+ void Resize(int s0) {
+ assert(N == 1);
+ int shape[] = {s0};
+ Resize(shape);
+ }
+
+ /// Resize a 2D array to shape (s0,s1).
+ void Resize(int s0, int s1) {
+ int shape[N] = {s0, s1};
+ for (int i = 2; i < N; ++i) {
+ shape[i] = 1;
+ }
+ Resize(shape);
+ }
+
+ // Match Eigen2's API.
+ void resize(int rows, int cols) {
+ Resize(rows, cols);
+ }
+
+ /// Resize a 3D array to shape (s0,s1,s2).
+ void Resize(int s0, int s1, int s2) {
+ assert(N == 3);
+ int shape[] = {s0,s1,s2};
+ Resize(shape);
+ }
+
+ template<typename D>
+ void CopyFrom(const ArrayND<D,N> &other) {
+ ResizeLike(other);
+ T *data = Data();
+ const D *other_data = other.Data();
+ for (int i = 0; i < Size(); ++i) {
+ data[i] = T(other_data[i]);
+ }
+ }
+
+ void Fill(T value) {
+ for (int i = 0; i < Size(); ++i) {
+ Data()[i] = value;
+ }
+ }
+
+ // Match Eigen's API.
+ void fill(T value) {
+ for (int i = 0; i < Size(); ++i) {
+ Data()[i] = value;
+ }
+ }
+
+ /// Return a tuple containing the length of each axis.
+ const Index &Shape() const {
+ return shape_;
+ }
+
+ /// Return the length of an axis.
+ int Shape(int axis) const {
+ return shape_(axis);
+ }
+
+ /// Return the distance between neighboring elements along axis.
+ int Stride(int axis) const {
+ return strides_(axis);
+ }
+
+ /// Return the number of elements of the array.
+ int Size() const {
+ int size = 1;
+ for (int i = 0; i < N; ++i)
+ size *= Shape(i);
+ return size;
+ }
+
+ /// Return the total amount of memory used by the array.
+ int MemorySizeInBytes() const {
+ return sizeof(*this) + Size() * sizeof(T);
+ }
+
+ /// Pointer to the first element of the array.
+ T *Data() { return data_; }
+
+ /// Constant pointer to the first element of the array.
+ const T *Data() const { return data_; }
+
+ /// Distance between the first element and the element at position index.
+ int Offset(const Index &index) const {
+ int offset = 0;
+ for (int i = 0; i < N; ++i)
+ offset += index(i) * Stride(i);
+ return offset;
+ }
+
+ /// 1D specialization.
+ int Offset(int i0) const {
+ assert(N == 1);
+ return i0 * Stride(0);
+ }
+
+ /// 2D specialization.
+ int Offset(int i0, int i1) const {
+ assert(N == 2);
+ return i0 * Stride(0) + i1 * Stride(1);
+ }
+
+ /// 3D specialization.
+ int Offset(int i0, int i1, int i2) const {
+ assert(N == 3);
+ return i0 * Stride(0) + i1 * Stride(1) + i2 * Stride(2);
+ }
+
+ /// Return a reference to the element at position index.
+ T &operator()(const Index &index) {
+ // TODO(pau) Boundary checking in debug mode.
+ return *( Data() + Offset(index) );
+ }
+
+ /// 1D specialization.
+ T &operator()(int i0) {
+ return *( Data() + Offset(i0) );
+ }
+
+ /// 2D specialization.
+ T &operator()(int i0, int i1) {
+ assert(0 <= i0 && i0 < Shape(0));
+ assert(0 <= i1 && i1 < Shape(1));
+ return *( Data() + Offset(i0,i1) );
+ }
+
+ /// 3D specialization.
+ T &operator()(int i0, int i1, int i2) {
+ assert(0 <= i0 && i0 < Shape(0));
+ assert(0 <= i1 && i1 < Shape(1));
+ assert(0 <= i2 && i2 < Shape(2));
+ return *( Data() + Offset(i0,i1,i2) );
+ }
+
+ /// Return a constant reference to the element at position index.
+ const T &operator()(const Index &index) const {
+ return *( Data() + Offset(index) );
+ }
+
+ /// 1D specialization.
+ const T &operator()(int i0) const {
+ return *( Data() + Offset(i0) );
+ }
+
+ /// 2D specialization.
+ const T &operator()(int i0, int i1) const {
+ assert(0 <= i0 && i0 < Shape(0));
+ assert(0 <= i1 && i1 < Shape(1));
+ return *( Data() + Offset(i0,i1) );
+ }
+
+ /// 3D specialization.
+ const T &operator()(int i0, int i1, int i2) const {
+ return *( Data() + Offset(i0,i1,i2) );
+ }
+
+ /// True if index is inside array.
+ bool Contains(const Index &index) const {
+ for (int i = 0; i < N; ++i)
+ if (index(i) < 0 || index(i) >= Shape(i))
+ return false;
+ return true;
+ }
+
+ /// 1D specialization.
+ bool Contains(int i0) const {
+ return 0 <= i0 && i0 < Shape(0);
+ }
+
+ /// 2D specialization.
+ bool Contains(int i0, int i1) const {
+ return 0 <= i0 && i0 < Shape(0)
+ && 0 <= i1 && i1 < Shape(1);
+ }
+
+ /// 3D specialization.
+ bool Contains(int i0, int i1, int i2) const {
+ return 0 <= i0 && i0 < Shape(0)
+ && 0 <= i1 && i1 < Shape(1)
+ && 0 <= i2 && i2 < Shape(2);
+ }
+
+ bool operator==(const ArrayND<T, N> &other) const {
+ if (shape_ != other.shape_) return false;
+ if (strides_ != other.strides_) return false;
+ for (int i = 0; i < Size(); ++i) {
+ if (this->Data()[i] != other.Data()[i])
+ return false;
+ }
+ return true;
+ }
+
+ bool operator!=(const ArrayND<T, N> &other) const {
+ return !(*this == other);
+ }
+
+ ArrayND<T, N> operator*(const ArrayND<T, N> &other) const {
+ assert(Shape() = other.Shape());
+ ArrayND<T, N> res;
+ res.ResizeLike(*this);
+ for (int i = 0; i < res.Size(); ++i) {
+ res.Data()[i] = Data()[i] * other.Data()[i];
+ }
+ return res;
+ }
+
+ protected:
+ /// The number of element in each dimension.
+ Index shape_;
+
+ /// How to jump to neighbors in each dimension.
+ Index strides_;
+
+ /// Pointer to the first element of the array.
+ T *data_;
+
+ /// Flag if this Array either own or reference the data
+ bool own_data;
+};
+
+/// 3D array (row, column, channel).
+template <typename T>
+class Array3D : public ArrayND<T, 3> {
+ typedef ArrayND<T, 3> Base;
+ public:
+ Array3D()
+ : Base() {
+ }
+ Array3D(int height, int width, int depth=1)
+ : Base(height, width, depth) {
+ }
+ Array3D(T* data, int height, int width, int depth=1)
+ : Base(data, height, width, depth) {
+ }
+
+ void Resize(int height, int width, int depth=1) {
+ Base::Resize(height, width, depth);
+ }
+
+ int Height() const {
+ return Base::Shape(0);
+ }
+ int Width() const {
+ return Base::Shape(1);
+ }
+ int Depth() const {
+ return Base::Shape(2);
+ }
+
+ // Match Eigen2's API so that Array3D's and Mat*'s can work together via
+ // template magic.
+ int rows() const { return Height(); }
+ int cols() const { return Width(); }
+ int depth() const { return Depth(); }
+
+ int Get_Step() const { return Width()*Depth(); }
+
+ /// Enable accessing with 2 indices for grayscale images.
+ T &operator()(int i0, int i1, int i2 = 0) {
+ assert(0 <= i0 && i0 < Height());
+ assert(0 <= i1 && i1 < Width());
+ return Base::operator()(i0,i1,i2);
+ }
+ const T &operator()(int i0, int i1, int i2 = 0) const {
+ assert(0 <= i0 && i0 < Height());
+ assert(0 <= i1 && i1 < Width());
+ return Base::operator()(i0,i1,i2);
+ }
+};
+
+typedef Array3D<unsigned char> Array3Du;
+typedef Array3D<unsigned int> Array3Dui;
+typedef Array3D<int> Array3Di;
+typedef Array3D<float> Array3Df;
+typedef Array3D<short> Array3Ds;
+
+void SplitChannels(const Array3Df &input,
+ Array3Df *channel0,
+ Array3Df *channel1,
+ Array3Df *channel2);
+
+void PrintArray(const Array3Df &array);
+
+/** Convert a float array into a byte array by scaling values by 255* (max-min).
+ * where max and min are automatically detected
+ * (if automatic_range_detection = true)
+ * \note and TODO this automatic detection only works when the image contains
+ * at least one pixel of both bounds.
+ **/
+void FloatArrayToScaledByteArray(const Array3Df &float_array,
+ Array3Du *byte_array,
+ bool automatic_range_detection = false);
+
+//! Convert a byte array into a float array by dividing values by 255.
+void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
+ Array3Df *float_array);
+
+template <typename AArrayType, typename BArrayType, typename CArrayType>
+void MultiplyElements( const AArrayType &a,
+ const BArrayType &b,
+ CArrayType *c ) {
+ // This function does an element-wise multiply between
+ // the two Arrays A and B, and stores the result in C.
+ // A and B must have the same dimensions.
+ assert( a.Shape() == b.Shape() );
+ c->ResizeLike(a);
+
+ // To perform the multiplcation, a "current" index into the N-dimensions of
+ // the A and B matrix specifies which elements are being multiplied.
+ typename CArrayType::Index index;
+
+ // The index starts at the maximum value for each dimension
+ const typename CArrayType::Index& cShape = c->Shape();
+ for ( int i = 0; i < CArrayType::Index::SIZE; ++i )
+ index(i) = cShape(i) - 1;
+
+ // After each multiplication, the highest-dimensional index is reduced.
+ // if this reduces it less than zero, it resets to its maximum value
+ // and decrements the index of the next lower dimension.
+ // This ripple-action continues until the entire new array has been
+ // calculated, indicated by dimension zero having a negative index.
+ while ( index(0) >= 0 ) {
+ (*c)(index) = a(index) * b(index);
+
+ int dimension = CArrayType::Index::SIZE - 1;
+ index(dimension) = index(dimension) - 1;
+ while ( dimension > 0 && index(dimension) < 0 ) {
+ index(dimension) = cShape(dimension) - 1;
+ index(dimension - 1) = index(dimension - 1) - 1;
+ --dimension;
+ }
+ }
+}
+
+template <typename TA, typename TB, typename TC>
+void MultiplyElements(const ArrayND<TA, 3> &a,
+ const ArrayND<TB, 3> &b,
+ ArrayND<TC, 3> *c) {
+ // Specialization for N==3
+ c->ResizeLike(a);
+ assert(a.Shape(0) == b.Shape(0));
+ assert(a.Shape(1) == b.Shape(1));
+ assert(a.Shape(2) == b.Shape(2));
+ for (int i = 0; i < a.Shape(0); ++i) {
+ for (int j = 0; j < a.Shape(1); ++j) {
+ for (int k = 0; k < a.Shape(2); ++k) {
+ (*c)(i, j, k) = TC(a(i, j, k) * b(i, j, k));
+ }
+ }
+ }
+}
+
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_ARRAY_ND_H
diff --git a/extern/libmv/libmv/image/convolve.cc b/extern/libmv/libmv/image/convolve.cc
new file mode 100644
index 00000000000..be73a1a3263
--- /dev/null
+++ b/extern/libmv/libmv/image/convolve.cc
@@ -0,0 +1,305 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <cmath>
+
+#include "libmv/image/image.h"
+#include "libmv/image/convolve.h"
+
+namespace libmv {
+
+// Compute a Gaussian kernel and derivative, such that you can take the
+// derivative of an image by convolving with the kernel horizontally then the
+// derivative vertically to get (eg) the y derivative.
+void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
+ assert(sigma >= 0.0);
+
+ // 0.004 implies a 3 pixel kernel with 1 pixel sigma.
+ const float truncation_factor = 0.004f;
+
+ // Calculate the kernel size based on sigma such that it is odd.
+ float precisehalfwidth = GaussianInversePositive(truncation_factor, sigma);
+ int width = lround(2*precisehalfwidth);
+ if (width % 2 == 0) {
+ width++;
+ }
+ // Calculate the gaussian kernel and its derivative.
+ kernel->resize(width);
+ derivative->resize(width);
+ kernel->setZero();
+ derivative->setZero();
+ int halfwidth = width / 2;
+ for (int i = -halfwidth; i <= halfwidth; ++i) {
+ (*kernel)(i + halfwidth) = Gaussian(i, sigma);
+ (*derivative)(i + halfwidth) = GaussianDerivative(i, sigma);
+ }
+ // Since images should not get brighter or darker, normalize.
+ NormalizeL1(kernel);
+
+ // Normalize the derivative differently. See
+ // www.cs.duke.edu/courses/spring03/cps296.1/handouts/Image%20Processing.pdf
+ double factor = 0.;
+ for (int i = -halfwidth; i <= halfwidth; ++i) {
+ factor -= i*(*derivative)(i+halfwidth);
+ }
+ *derivative /= factor;
+}
+
+template <int size, bool vertical>
+void FastConvolve(const Vec &kernel, int width, int height,
+ const float* src, int src_stride, int src_line_stride,
+ float* dst, int dst_stride) {
+ double coefficients[2 * size + 1];
+ for (int k = 0; k < 2 * size + 1; ++k) {
+ coefficients[k] = kernel(2 * size - k);
+ }
+ // Fast path: if the kernel has a certain size, use the constant sized loops.
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ double sum = 0;
+ for (int k = -size; k <= size; ++k) {
+ if (vertical) {
+ if (y + k >= 0 && y + k < height) {
+ sum += src[k * src_line_stride] * coefficients[k + size];
+ }
+ } else {
+ if (x + k >= 0 && x + k < width) {
+ sum += src[k * src_stride] * coefficients[k + size];
+ }
+ }
+ }
+ dst[0] = static_cast<float>(sum);
+ src += src_stride;
+ dst += dst_stride;
+ }
+ }
+}
+
+template<bool vertical>
+void Convolve(const Array3Df &in,
+ const Vec &kernel,
+ Array3Df *out_pointer,
+ int plane) {
+ int width = in.Width();
+ int height = in.Height();
+ Array3Df &out = *out_pointer;
+ if (plane == -1) {
+ out.ResizeLike(in);
+ plane = 0;
+ }
+
+ assert(kernel.size() % 2 == 1);
+ assert(&in != out_pointer);
+
+ int src_line_stride = in.Stride(0);
+ int src_stride = in.Stride(1);
+ int dst_stride = out.Stride(1);
+ const float* src = in.Data();
+ float* dst = out.Data() + plane;
+
+ // Use a dispatch table to make most convolutions used in practice use the
+ // fast path.
+ int half_width = kernel.size() / 2;
+ switch (half_width) {
+#define static_convolution( size ) case size: \
+ FastConvolve<size, vertical>(kernel, width, height, src, src_stride, \
+ src_line_stride, dst, dst_stride); break;
+ static_convolution(1)
+ static_convolution(2)
+ static_convolution(3)
+ static_convolution(4)
+ static_convolution(5)
+ static_convolution(6)
+ static_convolution(7)
+#undef static_convolution
+ default:
+ int dynamic_size = kernel.size() / 2;
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ double sum = 0;
+ // Slow path: this loop cannot be unrolled.
+ for (int k = -dynamic_size; k <= dynamic_size; ++k) {
+ if(vertical) {
+ if (y + k >= 0 && y + k < height) {
+ sum += src[k * src_line_stride] * kernel(2 * dynamic_size - (k + dynamic_size));
+ }
+ } else {
+ if (x + k >= 0 && x + k < width) {
+ sum += src[k * src_stride] * kernel(2 * dynamic_size - (k + dynamic_size));
+ }
+ }
+ }
+ dst[0] = static_cast<float>(sum);
+ src += src_stride;
+ dst += dst_stride;
+ }
+ }
+ }
+}
+
+void ConvolveHorizontal(const Array3Df &in,
+ const Vec &kernel,
+ Array3Df *out_pointer,
+ int plane) {
+ Convolve<false>(in, kernel, out_pointer, plane);
+}
+
+void ConvolveVertical(const Array3Df &in,
+ const Vec &kernel,
+ Array3Df *out_pointer,
+ int plane) {
+ Convolve<true>(in, kernel, out_pointer, plane);
+}
+
+void ConvolveGaussian(const Array3Df &in,
+ double sigma,
+ Array3Df *out_pointer) {
+ Vec kernel, derivative;
+ ComputeGaussianKernel(sigma, &kernel, &derivative);
+
+ Array3Df tmp;
+ ConvolveVertical(in, kernel, &tmp);
+ ConvolveHorizontal(tmp, kernel, out_pointer);
+}
+
+void BlurredImageAndDerivatives(const Array3Df &in,
+ double sigma,
+ Array3Df *blurred_image,
+ Array3Df *gradient_x,
+ Array3Df *gradient_y) {
+ Vec kernel, derivative;
+ ComputeGaussianKernel(sigma, &kernel, &derivative);
+ Array3Df tmp;
+
+ // Compute convolved image.
+ ConvolveVertical(in, kernel, &tmp);
+ ConvolveHorizontal(tmp, kernel, blurred_image);
+
+ // Compute first derivative in x (reusing vertical convolution above).
+ ConvolveHorizontal(tmp, derivative, gradient_x);
+
+ // Compute first derivative in y.
+ ConvolveHorizontal(in, kernel, &tmp);
+ ConvolveVertical(tmp, derivative, gradient_y);
+}
+
+// Compute the gaussian blur of an image and the derivatives of the blurred
+// image, and store the results in three channels. Since the blurred value and
+// gradients are closer in memory, this leads to better performance if all
+// three values are needed at the same time.
+void BlurredImageAndDerivativesChannels(const Array3Df &in,
+ double sigma,
+ Array3Df *blurred_and_gradxy) {
+ assert(in.Depth() == 1);
+
+ Vec kernel, derivative;
+ ComputeGaussianKernel(sigma, &kernel, &derivative);
+
+ // Compute convolved image.
+ Array3Df tmp;
+ ConvolveVertical(in, kernel, &tmp);
+ blurred_and_gradxy->Resize(in.Height(), in.Width(), 3);
+ ConvolveHorizontal(tmp, kernel, blurred_and_gradxy, 0);
+
+ // Compute first derivative in x.
+ ConvolveHorizontal(tmp, derivative, blurred_and_gradxy, 1);
+
+ // Compute first derivative in y.
+ ConvolveHorizontal(in, kernel, &tmp);
+ ConvolveVertical(tmp, derivative, blurred_and_gradxy, 2);
+}
+
+void BoxFilterHorizontal(const Array3Df &in,
+ int window_size,
+ Array3Df *out_pointer) {
+ Array3Df &out = *out_pointer;
+ out.ResizeLike(in);
+ int half_width = (window_size - 1) / 2;
+
+ for (int k = 0; k < in.Depth(); ++k) {
+ for (int i=0; i<in.Height(); ++i) {
+ float sum = 0;
+ // Init sum.
+ for (int j=0; j<half_width; ++j) {
+ sum += in(i, j, k);
+ }
+ // Fill left border.
+ for (int j=0; j < half_width + 1; ++j) {
+ sum += in(i, j + half_width, k);
+ out(i, j, k) = sum;
+ }
+ // Fill interior.
+ for (int j = half_width + 1; j<in.Width()-half_width; ++j) {
+ sum -= in(i, j - half_width - 1, k);
+ sum += in(i, j + half_width, k);
+ out(i, j, k) = sum;
+ }
+ // Fill right border.
+ for (int j = in.Width() - half_width; j<in.Width(); ++j) {
+ sum -= in(i, j - half_width - 1, k);
+ out(i, j, k) = sum;
+ }
+ }
+ }
+}
+
+void BoxFilterVertical(const Array3Df &in,
+ int window_size,
+ Array3Df *out_pointer) {
+ Array3Df &out = *out_pointer;
+ out.ResizeLike(in);
+ int half_width = (window_size - 1) / 2;
+
+ for (int k = 0; k < in.Depth(); ++k) {
+ for (int j = 0; j < in.Width(); ++j) {
+ float sum = 0;
+ // Init sum.
+ for (int i=0; i<half_width; ++i) {
+ sum += in(i, j, k);
+ }
+ // Fill left border.
+ for (int i=0; i < half_width + 1; ++i) {
+ sum += in(i + half_width, j, k);
+ out(i, j, k) = sum;
+ }
+ // Fill interior.
+ for (int i = half_width + 1; i<in.Height()-half_width; ++i) {
+ sum -= in(i - half_width - 1, j, k);
+ sum += in(i + half_width, j, k);
+ out(i, j, k) = sum;
+ }
+ // Fill right border.
+ for (int i = in.Height() - half_width; i<in.Height(); ++i) {
+ sum -= in(i - half_width - 1, j, k);
+ out(i, j, k) = sum;
+ }
+ }
+ }
+}
+
+void BoxFilter(const Array3Df &in,
+ int box_width,
+ Array3Df *out) {
+ Array3Df tmp;
+ BoxFilterHorizontal(in, box_width, &tmp);
+ BoxFilterVertical(tmp, box_width, out);
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/image/convolve.h b/extern/libmv/libmv/image/convolve.h
new file mode 100644
index 00000000000..c6c995fd674
--- /dev/null
+++ b/extern/libmv/libmv/image/convolve.h
@@ -0,0 +1,93 @@
+// Copyright (c) 2007, 2008, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_CONVOLVE_H_
+#define LIBMV_IMAGE_CONVOLVE_H_
+
+#include "libmv/image/image.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+// TODO(keir): Find a better place for these functions. gaussian.h in numeric?
+
+// Zero mean Gaussian.
+inline double Gaussian(double x, double sigma) {
+ return 1/sqrt(2*M_PI*sigma*sigma) * exp(-(x*x/2/sigma/sigma));
+}
+// 2D gaussian (zero mean)
+// (9) in http://mathworld.wolfram.com/GaussianFunction.html
+inline double Gaussian2D(double x, double y, double sigma) {
+ return 1.0/(2.0*M_PI*sigma*sigma) * exp( -(x*x+y*y)/(2.0*sigma*sigma));
+}
+inline double GaussianDerivative(double x, double sigma) {
+ return -x / sigma / sigma * Gaussian(x, sigma);
+}
+// Solve the inverse of the Gaussian for positive x.
+inline double GaussianInversePositive(double y, double sigma) {
+ return sqrt(-2 * sigma * sigma * log(y * sigma * sqrt(2*M_PI)));
+}
+
+void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative);
+void ConvolveHorizontal(const FloatImage &in,
+ const Vec &kernel,
+ FloatImage *out_pointer,
+ int plane = -1);
+void ConvolveVertical(const FloatImage &in,
+ const Vec &kernel,
+ FloatImage *out_pointer,
+ int plane = -1);
+void ConvolveGaussian(const FloatImage &in,
+ double sigma,
+ FloatImage *out_pointer);
+
+void ImageDerivatives(const FloatImage &in,
+ double sigma,
+ FloatImage *gradient_x,
+ FloatImage *gradient_y);
+
+void BlurredImageAndDerivatives(const FloatImage &in,
+ double sigma,
+ FloatImage *blurred_image,
+ FloatImage *gradient_x,
+ FloatImage *gradient_y);
+
+// Blur and take the gradients of an image, storing the results inside the
+// three channels of blurred_and_gradxy.
+void BlurredImageAndDerivativesChannels(const FloatImage &in,
+ double sigma,
+ FloatImage *blurred_and_gradxy);
+
+void BoxFilterHorizontal(const FloatImage &in,
+ int window_size,
+ FloatImage *out_pointer);
+
+void BoxFilterVertical(const FloatImage &in,
+ int window_size,
+ FloatImage *out_pointer);
+
+void BoxFilter(const FloatImage &in,
+ int box_width,
+ FloatImage *out);
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_CONVOLVE_H_
+
diff --git a/extern/libmv/libmv/image/image.h b/extern/libmv/libmv/image/image.h
new file mode 100644
index 00000000000..d158b0e0397
--- /dev/null
+++ b/extern/libmv/libmv/image/image.h
@@ -0,0 +1,158 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_IMAGE_H
+#define LIBMV_IMAGE_IMAGE_H
+
+#include <cmath>
+
+#include "libmv/image/array_nd.h"
+
+namespace libmv {
+
+typedef Array3Du ByteImage; // For backwards compatibility.
+typedef Array3Df FloatImage;
+
+// Type added only to manage special 2D array for feature detection
+typedef Array3Di IntImage;
+typedef Array3Ds ShortImage;
+
+// An image class that is a thin wrapper around Array3D's of various types.
+// TODO(keir): Decide if we should add reference counting semantics... Maybe it
+// is the best solution after all.
+class Image {
+ public:
+
+ // Create an image from an array. The image takes ownership of the array.
+ Image(Array3Du *array) : array_type_(BYTE), array_(array) {}
+ Image(Array3Df *array) : array_type_(FLOAT), array_(array) {}
+
+ Image(const Image &img): array_type_(NONE), array_(NULL) {
+ *this = img;
+ }
+
+ // Underlying data type.
+ enum DataType {
+ NONE,
+ BYTE,
+ FLOAT,
+ INT,
+ SHORT,
+ };
+
+ // Size in bytes that the image takes in memory.
+ int MemorySizeInBytes() {
+ int size;
+ switch (array_type_)
+ {
+ case BYTE:
+ size = reinterpret_cast<Array3Du *>(array_)->MemorySizeInBytes();
+ break;
+ case FLOAT:
+ size = reinterpret_cast<Array3Df *>(array_)->MemorySizeInBytes();
+ break;
+ case INT:
+ size = reinterpret_cast<Array3Di *>(array_)->MemorySizeInBytes();
+ break;
+ case SHORT:
+ size = reinterpret_cast<Array3Ds *>(array_)->MemorySizeInBytes();
+ break;
+ default :
+ size = 0;
+ assert(0);
+ }
+ size += sizeof(*this);
+ return size;
+ }
+
+ ~Image() {
+ switch (array_type_)
+ {
+ case BYTE:
+ delete reinterpret_cast<Array3Du *>(array_);
+
+ break;
+ case FLOAT:
+ delete reinterpret_cast<Array3Df *>(array_);
+
+ break;
+ case INT:
+ delete reinterpret_cast<Array3Di *>(array_);
+
+ break;
+ case SHORT:
+ delete reinterpret_cast<Array3Ds *>(array_);
+
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ Image& operator= (const Image& f) {
+ if (this != &f) {
+ array_type_ = f.array_type_;
+ switch (array_type_)
+ {
+ case BYTE:
+ delete reinterpret_cast<Array3Du *>(array_);
+ array_ = new Array3Du( *(Array3Du *)f.array_);
+ break;
+ case FLOAT:
+ delete reinterpret_cast<Array3Df *>(array_);
+ array_ = new Array3Df( *(Array3Df *)f.array_);
+ break;
+ case INT:
+ delete reinterpret_cast<Array3Di *>(array_);
+ array_ = new Array3Di( *(Array3Di *)f.array_);
+ break;
+ case SHORT:
+ delete reinterpret_cast<Array3Ds *>(array_);
+ array_ = new Array3Ds( *(Array3Ds *)f.array_);
+ break;
+ default:
+ assert(0);
+ }
+ }
+ return *this;
+ }
+
+ Array3Du *AsArray3Du() const {
+ if (array_type_ == BYTE) {
+ return reinterpret_cast<Array3Du *>(array_);
+ }
+ return NULL;
+ }
+
+ Array3Df *AsArray3Df() const {
+ if (array_type_ == FLOAT) {
+ return reinterpret_cast<Array3Df *>(array_);
+ }
+ return NULL;
+ }
+
+ private:
+ DataType array_type_;
+ BaseArray *array_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_IMAGE_IMAGE_H
diff --git a/extern/libmv/libmv/image/sample.h b/extern/libmv/libmv/image/sample.h
new file mode 100644
index 00000000000..cd361231b58
--- /dev/null
+++ b/extern/libmv/libmv/image/sample.h
@@ -0,0 +1,103 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_SAMPLE_H_
+#define LIBMV_IMAGE_SAMPLE_H_
+
+#include "libmv/image/image.h"
+
+namespace libmv {
+
+/// Nearest neighbor interpolation.
+template<typename T>
+inline T SampleNearest(const Array3D<T> &image,
+ float y, float x, int v = 0) {
+ const int i = int(round(y));
+ const int j = int(round(x));
+ return image(i, j, v);
+}
+
+static inline void LinearInitAxis(float fx, int width,
+ int *x1, int *x2,
+ float *dx1, float *dx2) {
+ const int ix = int(fx);
+ if (ix < 0) {
+ *x1 = 0;
+ *x2 = 0;
+ *dx1 = 1;
+ *dx2 = 0;
+ } else if (ix > width-2) {
+ *x1 = width-1;
+ *x2 = width-1;
+ *dx1 = 1;
+ *dx2 = 0;
+ } else {
+ *x1 = ix;
+ *x2 = *x1 + 1;
+ *dx1 = *x2 - fx;
+ *dx2 = 1 - *dx1;
+ }
+}
+
+/// Linear interpolation.
+template<typename T>
+inline T SampleLinear(const Array3D<T> &image, float y, float x, int v = 0) {
+ int x1, y1, x2, y2;
+ float dx1, dy1, dx2, dy2;
+
+ LinearInitAxis(y, image.Height(), &y1, &y2, &dy1, &dy2);
+ LinearInitAxis(x, image.Width(), &x1, &x2, &dx1, &dx2);
+
+ const T im11 = image(y1, x1, v);
+ const T im12 = image(y1, x2, v);
+ const T im21 = image(y2, x1, v);
+ const T im22 = image(y2, x2, v);
+
+ return T(dy1 * ( dx1 * im11 + dx2 * im12 ) +
+ dy2 * ( dx1 * im21 + dx2 * im22 ));
+}
+
+// Downsample all channels by 2. If the image has odd width or height, the last
+// row or column is ignored.
+// FIXME(MatthiasF): this implementation shouldn't be in an interface file
+inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
+ int height = in.Height() / 2;
+ int width = in.Width() / 2;
+ int depth = in.Depth();
+
+ out->Resize(height, width, depth);
+
+ // 2x2 box filter downsampling.
+ for (int r = 0; r < height; ++r) {
+ for (int c = 0; c < width; ++c) {
+ for (int k = 0; k < depth; ++k) {
+ (*out)(r, c, k) = (in(2 * r, 2 * c, k) +
+ in(2 * r + 1, 2 * c, k) +
+ in(2 * r, 2 * c + 1, k) +
+ in(2 * r + 1, 2 * c + 1, k)) / 4.0f;
+ }
+ }
+ }
+
+}
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_SAMPLE_H_
diff --git a/extern/libmv/libmv/image/tuple.h b/extern/libmv/libmv/image/tuple.h
new file mode 100644
index 00000000000..79acc9579d0
--- /dev/null
+++ b/extern/libmv/libmv/image/tuple.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_TUPLE_H
+#define LIBMV_IMAGE_TUPLE_H
+
+#include <algorithm>
+
+namespace libmv {
+
+// A vector of elements with fixed lenght and deep copy semantics.
+template <typename T, int N>
+class Tuple {
+ public:
+ enum { SIZE = N };
+ Tuple() {}
+ Tuple(T initial_value) { Reset(initial_value); }
+
+ template <typename D>
+ Tuple(D *values) { Reset(values); }
+
+ template <typename D>
+ Tuple(const Tuple<D,N> &b) { Reset(b); }
+
+ template <typename D>
+ Tuple& operator=(const Tuple<D,N>& b) {
+ Reset(b);
+ return *this;
+ }
+
+ template <typename D>
+ void Reset(const Tuple<D, N>& b) { Reset(b.Data()); }
+
+ template <typename D>
+ void Reset(D *values) {
+ for(int i=0;i<N;i++) {
+ data_[i] = T(values[i]);
+ }
+ }
+
+ // Set all tuple values to the same thing.
+ void Reset(T value) {
+ for(int i=0;i<N;i++) {
+ data_[i] = value;
+ }
+ }
+
+ // Pointer to the first element.
+ T *Data() { return &data_[0]; }
+ const T *Data() const { return &data_[0]; }
+
+ T &operator()(int i) { return data_[i]; }
+ const T &operator()(int i) const { return data_[i]; }
+
+ bool operator==(const Tuple<T, N> &other) const {
+ for (int i = 0; i < N; ++i) {
+ if ((*this)(i) != other(i)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ bool operator!=(const Tuple<T, N> &other) const {
+ return !(*this == other);
+ }
+
+ private:
+ T data_[N];
+};
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_TUPLE_H
diff --git a/extern/libmv/libmv/logging/logging.h b/extern/libmv/libmv/logging/logging.h
new file mode 100644
index 00000000000..af86c4baa42
--- /dev/null
+++ b/extern/libmv/libmv/logging/logging.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2007, 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_LOGGING_LOGGING_H
+#define LIBMV_LOGGING_LOGGING_H
+
+#include "glog/logging.h"
+
+#define LG LOG(INFO)
+#define V0 LOG(INFO)
+#define V1 LOG(INFO)
+#define V2 LOG(INFO)
+
+#endif // LIBMV_LOGGING_LOGGING_H
diff --git a/extern/libmv/libmv/multiview/conditioning.cc b/extern/libmv/libmv/multiview/conditioning.cc
new file mode 100644
index 00000000000..20e3a88e6cb
--- /dev/null
+++ b/extern/libmv/libmv/multiview/conditioning.cc
@@ -0,0 +1,99 @@
+// Copyright (c) 2010 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/conditioning.h"
+#include "libmv/multiview/projection.h"
+
+namespace libmv {
+
+// HZ 4.4.4 pag.109: Point conditioning (non isotropic)
+void PreconditionerFromPoints(const Mat &points, Mat3 *T) {
+ Vec mean, variance;
+ MeanAndVarianceAlongRows(points, &mean, &variance);
+
+ double xfactor = sqrt(2.0 / variance(0));
+ double yfactor = sqrt(2.0 / variance(1));
+
+ // If variance is equal to 0.0 set scaling factor to identity.
+ // -> Else it will provide nan value (because division by 0).
+ if (variance(0) < 1e-8)
+ xfactor = mean(0) = 1.0;
+ if (variance(1) < 1e-8)
+ yfactor = mean(1) = 1.0;
+
+ *T << xfactor, 0, -xfactor * mean(0),
+ 0, yfactor, -yfactor * mean(1),
+ 0, 0, 1;
+}
+// HZ 4.4.4 pag.107: Point conditioning (isotropic)
+void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T) {
+ Vec mean, variance;
+ MeanAndVarianceAlongRows(points, &mean, &variance);
+
+ double var_norm = variance.norm();
+ double factor = sqrt(2.0 / var_norm);
+
+ // If variance is equal to 0.0 set scaling factor to identity.
+ // -> Else it will provide nan value (because division by 0).
+ if (var_norm < 1e-8) {
+ factor = 1.0;
+ mean.setOnes();
+ }
+
+ *T << factor, 0, -factor * mean(0),
+ 0, factor, -factor * mean(1),
+ 0, 0, 1;
+}
+
+void ApplyTransformationToPoints(const Mat &points,
+ const Mat3 &T,
+ Mat *transformed_points) {
+ int n = points.cols();
+ transformed_points->resize(2,n);
+ Mat3X p(3, n);
+ EuclideanToHomogeneous(points, &p);
+ p = T * p;
+ HomogeneousToEuclidean(p, transformed_points);
+}
+
+void NormalizePoints(const Mat &points,
+ Mat *normalized_points,
+ Mat3 *T) {
+ PreconditionerFromPoints(points, T);
+ ApplyTransformationToPoints(points, *T, normalized_points);
+}
+
+void NormalizeIsotropicPoints(const Mat &points,
+ Mat *normalized_points,
+ Mat3 *T) {
+ IsotropicPreconditionerFromPoints(points, T);
+ ApplyTransformationToPoints(points, *T, normalized_points);
+}
+
+// Denormalize the results. See HZ page 109.
+void UnnormalizerT::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H) {
+ *H = T2.transpose() * (*H) * T1;
+}
+
+void UnnormalizerI::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H) {
+ *H = T2.inverse() * (*H) * T1;
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/multiview/conditioning.h b/extern/libmv/libmv/multiview/conditioning.h
new file mode 100644
index 00000000000..181d7485374
--- /dev/null
+++ b/extern/libmv/libmv/multiview/conditioning.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2010 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_CONDITIONNING_H_
+#define LIBMV_MULTIVIEW_CONDITIONNING_H_
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+// Point conditioning (non isotropic)
+void PreconditionerFromPoints(const Mat &points, Mat3 *T);
+// Point conditioning (isotropic)
+void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T);
+
+void ApplyTransformationToPoints(const Mat &points,
+ const Mat3 &T,
+ Mat *transformed_points);
+
+void NormalizePoints(const Mat &points,
+ Mat *normalized_points,
+ Mat3 *T);
+
+void NormalizeIsotropicPoints(const Mat &points,
+ Mat *normalized_points,
+ Mat3 *T);
+
+/// Use inverse for unnormalize
+struct UnnormalizerI {
+ // Denormalize the results. See HZ page 109.
+ static void Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H);
+};
+
+/// Use transpose for unnormalize
+struct UnnormalizerT {
+ // Denormalize the results. See HZ page 109.
+ static void Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H);
+};
+
+} //namespace libmv
+
+
+#endif // LIBMV_MULTIVIEW_CONDITIONNING_H_
diff --git a/extern/libmv/libmv/multiview/euclidean_resection.cc b/extern/libmv/libmv/multiview/euclidean_resection.cc
new file mode 100644
index 00000000000..6d918a1a8bc
--- /dev/null
+++ b/extern/libmv/libmv/multiview/euclidean_resection.cc
@@ -0,0 +1,661 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <cmath>
+#include <limits>
+
+#include <Eigen/SVD>
+#include <Eigen/Geometry>
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/euclidean_resection.h"
+#include "libmv/multiview/projection.h"
+
+namespace libmv {
+namespace euclidean_resection {
+
+bool EuclideanResection(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t,
+ ResectionMethod method) {
+ switch (method) {
+ case RESECTION_ANSAR_DANIILIDIS:
+ EuclideanResectionAnsarDaniilidis(x_camera, X_world, R, t);
+ break;
+ case RESECTION_EPNP:
+ return EuclideanResectionEPnP(x_camera, X_world, R, t);
+ break;
+ default:
+ LOG(FATAL) << "Unknown resection method.";
+ }
+ return false;
+}
+
+bool EuclideanResection(const Mat &x_image,
+ const Mat3X &X_world,
+ const Mat3 &K,
+ Mat3 *R, Vec3 *t,
+ ResectionMethod method) {
+ CHECK(x_image.rows() == 2 || x_image.rows() == 3)
+ << "Invalid size for x_image: "
+ << x_image.rows() << "x" << x_image.cols();
+
+ Mat2X x_camera;
+ if (x_image.rows() == 2) {
+ EuclideanToNormalizedCamera(x_image, K, &x_camera);
+ } else if (x_image.rows() == 3) {
+ HomogeneousToNormalizedCamera(x_image, K, &x_camera);
+ }
+ return EuclideanResection(x_camera, X_world, R, t, method);
+}
+
+void AbsoluteOrientation(const Mat3X &X,
+ const Mat3X &Xp,
+ Mat3 *R,
+ Vec3 *t) {
+ int num_points = X.cols();
+ Vec3 C = X.rowwise().sum() / num_points; // Centroid of X.
+ Vec3 Cp = Xp.rowwise().sum() / num_points; // Centroid of Xp.
+
+ // Normalize the two point sets.
+ Mat3X Xn(3, num_points), Xpn(3, num_points);
+ for( int i = 0; i < num_points; ++i ){
+ Xn.col(i) = X.col(i) - C;
+ Xpn.col(i) = Xp.col(i) - Cp;
+ }
+
+ // Construct the N matrix (pg. 635).
+ double Sxx = Xn.row(0).dot(Xpn.row(0));
+ double Syy = Xn.row(1).dot(Xpn.row(1));
+ double Szz = Xn.row(2).dot(Xpn.row(2));
+ double Sxy = Xn.row(0).dot(Xpn.row(1));
+ double Syx = Xn.row(1).dot(Xpn.row(0));
+ double Sxz = Xn.row(0).dot(Xpn.row(2));
+ double Szx = Xn.row(2).dot(Xpn.row(0));
+ double Syz = Xn.row(1).dot(Xpn.row(2));
+ double Szy = Xn.row(2).dot(Xpn.row(1));
+
+ Mat4 N;
+ N << Sxx + Syy + Szz, Syz - Szy, Szx - Sxz, Sxy - Syx,
+ Syz - Szy, Sxx - Syy - Szz, Sxy + Syx, Szx + Sxz,
+ Szx - Sxz, Sxy + Syx, -Sxx + Syy - Szz, Syz + Szy,
+ Sxy - Syx, Szx + Sxz, Syz + Szy, -Sxx - Syy + Szz;
+
+ // Find the unit quaternion q that maximizes qNq. It is the eigenvector
+ // corresponding to the lagest eigenvalue.
+ Vec4 q = N.jacobiSvd(Eigen::ComputeFullU).matrixU().col(0);
+
+ // Retrieve the 3x3 rotation matrix.
+ Vec4 qq = q.array() * q.array();
+ double q0q1 = q(0) * q(1);
+ double q0q2 = q(0) * q(2);
+ double q0q3 = q(0) * q(3);
+ double q1q2 = q(1) * q(2);
+ double q1q3 = q(1) * q(3);
+ double q2q3 = q(2) * q(3);
+
+ (*R) << qq(0) + qq(1) - qq(2) - qq(3),
+ 2 * (q1q2 - q0q3),
+ 2 * (q1q3 + q0q2),
+ 2 * (q1q2+ q0q3),
+ qq(0) - qq(1) + qq(2) - qq(3),
+ 2 * (q2q3 - q0q1),
+ 2 * (q1q3 - q0q2),
+ 2 * (q2q3 + q0q1),
+ qq(0) - qq(1) - qq(2) + qq(3);
+
+ // Fix the handedness of the R matrix.
+ if (R->determinant() < 0) {
+ R->row(2) = -R->row(2);
+ }
+ // Compute the final translation.
+ *t = Cp - *R * C;
+}
+
+// Convert i and j indices of the original variables into their quadratic
+// permutation single index. It follows that t_ij = t_ji.
+static int IJToPointIndex(int i, int j, int num_points) {
+ // Always make sure that j is bigger than i. This handles t_ij = t_ji.
+ if (j < i) {
+ std::swap(i, j);
+ }
+ int idx;
+ int num_permutation_rows = num_points * (num_points - 1) / 2;
+
+ // All t_ii's are located at the end of the t vector after all t_ij's.
+ if (j == i) {
+ idx = num_permutation_rows + i;
+ } else {
+ int offset = (num_points - i - 1) * (num_points - i) / 2;
+ idx = (num_permutation_rows - offset + j - i - 1);
+ }
+ return idx;
+};
+
+// Convert i and j indexes of the solution for lambda to their linear indexes.
+static int IJToIndex(int i, int j, int num_lambda) {
+ if (j < i) {
+ std::swap(i, j);
+ }
+ int A = num_lambda * (num_lambda + 1) / 2;
+ int B = num_lambda - i;
+ int C = B * (B + 1) / 2;
+ int idx = A - C + j - i;
+ return idx;
+};
+
+static int Sign(double value) {
+ return (value < 0) ? -1 : 1;
+};
+
+// Organizes a square matrix into a single row constraint on the elements of
+// Lambda to create the constraints in equation (5) in "Linear Pose Estimation
+// from Points or Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no.
+// 5.
+static Vec MatrixToConstraint(const Mat &A,
+ int num_k_columns,
+ int num_lambda) {
+ Vec C(num_k_columns);
+ C.setZero();
+ int idx = 0;
+ for (int i = 0; i < num_lambda; ++i) {
+ for( int j = i; j < num_lambda; ++j) {
+ C(idx) = A(i, j);
+ if (i != j){
+ C(idx) += A(j, i);
+ }
+ ++ idx;
+ }
+ }
+ return C;
+}
+
+// Normalizes the columns of vectors.
+static void NormalizeColumnVectors(Mat3X *vectors) {
+ int num_columns = vectors->cols();
+ for (int i = 0; i < num_columns; ++i){
+ vectors->col(i).normalize();
+ }
+}
+
+void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R,
+ Vec3 *t) {
+ CHECK(x_camera.cols() == X_world.cols());
+ CHECK(x_camera.cols() > 3);
+
+ int num_points = x_camera.cols();
+
+ // Copy the normalized camera coords into 3 vectors and normalize them so
+ // that they are unit vectors from the camera center.
+ Mat3X x_camera_unit(3, num_points);
+ x_camera_unit.block(0, 0, 2, num_points) = x_camera;
+ x_camera_unit.row(2).setOnes();
+ NormalizeColumnVectors(&x_camera_unit);
+
+ int num_m_rows = num_points * (num_points - 1) / 2;
+ int num_tt_variables = num_points * (num_points + 1) / 2;
+ int num_m_columns = num_tt_variables + 1;
+ Mat M(num_m_columns, num_m_columns);
+ M.setZero();
+ Matu ij_index(num_tt_variables, 2);
+
+ // Create the constraint equations for the t_ij variables (7) and arrange
+ // them into the M matrix (8). Also store the initial (i, j) indices.
+ int row=0;
+ for (int i = 0; i < num_points; ++i) {
+ for (int j = i+1; j < num_points; ++j) {
+ M(row, row) = -2 * x_camera_unit.col(i).dot(x_camera_unit.col(j));
+ M(row, num_m_rows + i) = x_camera_unit.col(i).dot(x_camera_unit.col(i));
+ M(row, num_m_rows + j) = x_camera_unit.col(j).dot(x_camera_unit.col(j));
+ Vec3 Xdiff = X_world.col(i) - X_world.col(j);
+ double center_to_point_distance = Xdiff.norm();
+ M(row, num_m_columns - 1) =
+ - center_to_point_distance * center_to_point_distance;
+ ij_index(row, 0) = i;
+ ij_index(row, 1) = j;
+ ++row;
+ }
+ ij_index(i + num_m_rows, 0) = i;
+ ij_index(i + num_m_rows, 1) = i;
+ }
+
+ int num_lambda = num_points + 1; // Dimension of the null space of M.
+ Mat V = M.jacobiSvd(Eigen::ComputeFullV).matrixV().block(0,
+ num_m_rows,
+ num_m_columns,
+ num_lambda);
+
+ // TODO(vess): The number of constraint equations in K (num_k_rows) must be
+ // (num_points + 1) * (num_points + 2)/2. This creates a performance issue
+ // for more than 4 points. It is fine for 4 points at the moment with 18
+ // instead of 15 equations.
+ int num_k_rows = num_m_rows + num_points *
+ (num_points*(num_points-1)/2 - num_points+1);
+ int num_k_columns = num_lambda * (num_lambda + 1) / 2;
+ Mat K(num_k_rows, num_k_columns);
+ K.setZero();
+
+ // Construct the first part of the K matrix corresponding to (t_ii, t_jk) for
+ // i != j.
+ int counter_k_row = 0;
+ for (int idx1 = num_m_rows; idx1 < num_tt_variables; ++idx1) {
+ for (int idx2 = 0; idx2 < num_m_rows; ++idx2) {
+
+ unsigned int i = ij_index(idx1, 0);
+ unsigned int j = ij_index(idx2, 0);
+ unsigned int k = ij_index(idx2, 1);
+
+ if( i != j && i != k ){
+ int idx3 = IJToPointIndex(i, j, num_points);
+ int idx4 = IJToPointIndex(i, k, num_points);
+
+ K.row(counter_k_row) =
+ MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2)-
+ V.row(idx3).transpose() * V.row(idx4),
+ num_k_columns,
+ num_lambda);
+ ++counter_k_row;
+ }
+ }
+ }
+
+ // Construct the second part of the K matrix corresponding to (t_ii,t_jk) for
+ // j==k.
+ for (int idx1 = num_m_rows; idx1 < num_tt_variables; ++idx1) {
+ for (int idx2 = idx1 + 1; idx2 < num_tt_variables; ++idx2) {
+ unsigned int i = ij_index(idx1, 0);
+ unsigned int j = ij_index(idx2, 0);
+ unsigned int k = ij_index(idx2, 1);
+
+ int idx3 = IJToPointIndex(i, j, num_points);
+ int idx4 = IJToPointIndex(i, k, num_points);
+
+ K.row(counter_k_row) =
+ MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2)-
+ V.row(idx3).transpose() * V.row(idx4),
+ num_k_columns,
+ num_lambda);
+ ++counter_k_row;
+ }
+ }
+ Vec L_sq = K.jacobiSvd(Eigen::ComputeFullV).matrixV().col(num_k_columns - 1);
+
+ // Pivot on the largest element for numerical stability. Afterwards recover
+ // the sign of the lambda solution.
+ double max_L_sq_value = fabs(L_sq(IJToIndex(0, 0, num_lambda)));
+ int max_L_sq_index = 1;
+ for (int i = 1; i < num_lambda; ++i) {
+ double abs_sq_value = fabs(L_sq(IJToIndex(i, i, num_lambda)));
+ if (max_L_sq_value < abs_sq_value) {
+ max_L_sq_value = abs_sq_value;
+ max_L_sq_index = i;
+ }
+ }
+ // Ensure positiveness of the largest value corresponding to lambda_ii.
+ L_sq = L_sq * Sign(L_sq(IJToIndex(max_L_sq_index,
+ max_L_sq_index,
+ num_lambda)));
+
+
+ Vec L(num_lambda);
+ L(max_L_sq_index) = sqrt(L_sq(IJToIndex(max_L_sq_index,
+ max_L_sq_index,
+ num_lambda)));
+
+ for (int i = 0; i < num_lambda; ++i) {
+ if (i != max_L_sq_index) {
+ L(i) = L_sq(IJToIndex(max_L_sq_index, i, num_lambda)) / L(max_L_sq_index);
+ }
+ }
+
+ // Correct the scale using the fact that the last constraint is equal to 1.
+ L = L / (V.row(num_m_columns - 1).dot(L));
+ Vec X = V * L;
+
+ // Recover the distances from the camera center to the 3D points Q.
+ Vec d(num_points);
+ d.setZero();
+ for (int c_point = num_m_rows; c_point < num_tt_variables; ++c_point) {
+ d(c_point - num_m_rows) = sqrt(X(c_point));
+ }
+
+ // Create the 3D points in the camera system.
+ Mat X_cam(3, num_points);
+ for (int c_point = 0; c_point < num_points; ++c_point ) {
+ X_cam.col(c_point) = d(c_point) * x_camera_unit.col(c_point);
+ }
+ // Recover the camera translation and rotation.
+ AbsoluteOrientation(X_world, X_cam, R, t);
+}
+
+// Selects 4 virtual control points using mean and PCA.
+void SelectControlPoints(const Mat3X &X_world,
+ Mat *X_centered,
+ Mat34 *X_control_points) {
+ size_t num_points = X_world.cols();
+
+ // The first virtual control point, C0, is the centroid.
+ Vec mean, variance;
+ MeanAndVarianceAlongRows(X_world, &mean, &variance);
+ X_control_points->col(0) = mean;
+
+ // Computes PCA
+ X_centered->resize (3, num_points);
+ for (size_t c = 0; c < num_points; c++) {
+ X_centered->col(c) = X_world.col (c) - mean;
+ }
+ Mat3 X_centered_sq = (*X_centered) * X_centered->transpose();
+ Eigen::JacobiSVD<Mat3> X_centered_sq_svd(X_centered_sq, Eigen::ComputeFullU);
+ Vec3 w = X_centered_sq_svd.singularValues();
+ Mat3 u = X_centered_sq_svd.matrixU();
+ for (size_t c = 0; c < 3; c++) {
+ double k = sqrt (w (c) / num_points);
+ X_control_points->col (c + 1) = mean + k * u.col (c);
+ }
+}
+
+// Computes the barycentric coordinates for all real points
+void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
+ const Mat34 &X_control_points,
+ Mat4X *alphas) {
+ size_t num_points = X_world_centered.cols();
+ Mat3 C2 ;
+ for (size_t c = 1; c < 4; c++) {
+ C2.col(c-1) = X_control_points.col(c) - X_control_points.col(0);
+ }
+
+ Mat3 C2inv = C2.inverse();
+ Mat3X a = C2inv * X_world_centered;
+
+ alphas->resize(4, num_points);
+ alphas->setZero();
+ alphas->block(1, 0, 3, num_points) = a;
+ for (size_t c = 0; c < num_points; c++) {
+ (*alphas)(0, c) = 1.0 - alphas->col(c).sum();
+ }
+}
+
+// Estimates the coordinates of all real points in the camera coordinate frame
+void ComputePointsCoordinatesInCameraFrame(
+ const Mat4X &alphas,
+ const Vec4 &betas,
+ const Eigen::Matrix<double, 12, 12> &U,
+ Mat3X *X_camera) {
+ size_t num_points = alphas.cols();
+
+ // Estimates the control points in the camera reference frame.
+ Mat34 C2b; C2b.setZero();
+ for (size_t cu = 0; cu < 4; cu++) {
+ for (size_t c = 0; c < 4; c++) {
+ C2b.col(c) += betas(cu) * U.block(11 - cu, c * 3, 1, 3).transpose();
+ }
+ }
+
+ // Estimates the 3D points in the camera reference frame
+ X_camera->resize(3, num_points);
+ for (size_t c = 0; c < num_points; c++) {
+ X_camera->col(c) = C2b * alphas.col(c);
+ }
+
+ // Check the sign of the z coordinate of the points (should be positive)
+ uint num_z_neg = 0;
+ for (size_t i = 0; i < X_camera->cols(); ++i) {
+ if ((*X_camera)(2,i) < 0) {
+ num_z_neg++;
+ }
+ }
+
+ // If more than 50% of z are negative, we change the signs
+ if (num_z_neg > 0.5 * X_camera->cols()) {
+ C2b = -C2b;
+ *X_camera = -(*X_camera);
+ }
+}
+
+bool EuclideanResectionEPnP(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t) {
+ CHECK(x_camera.cols() == X_world.cols());
+ CHECK(x_camera.cols() > 3);
+ size_t num_points = X_world.cols();
+
+ // Select the control points.
+ Mat34 X_control_points;
+ Mat X_centered;
+ SelectControlPoints(X_world, &X_centered, &X_control_points);
+
+ // Compute the barycentric coordinates.
+ Mat4X alphas(4, num_points);
+ ComputeBarycentricCoordinates(X_centered, X_control_points, &alphas);
+
+ // Estimates the M matrix with the barycentric coordinates
+ Mat M(2 * num_points, 12);
+ Eigen::Matrix<double, 2, 12> sub_M;
+ for (size_t c = 0; c < num_points; c++) {
+ double a0 = alphas(0, c);
+ double a1 = alphas(1, c);
+ double a2 = alphas(2, c);
+ double a3 = alphas(3, c);
+ double ui = x_camera(0, c);
+ double vi = x_camera(1, c);
+ M.block(2*c, 0, 2, 12) << a0, 0,
+ a0*(-ui), a1, 0,
+ a1*(-ui), a2, 0,
+ a2*(-ui), a3, 0,
+ a3*(-ui), 0,
+ a0, a0*(-vi), 0,
+ a1, a1*(-vi), 0,
+ a2, a2*(-vi), 0,
+ a3, a3*(-vi);
+ }
+
+ // TODO(julien): Avoid the transpose by rewriting the u2.block() calls.
+ Eigen::JacobiSVD<Mat> MtMsvd(M.transpose()*M, Eigen::ComputeFullU);
+ Eigen::Matrix<double, 12, 12> u2 = MtMsvd.matrixU().transpose();
+
+ // Estimate the L matrix.
+ Eigen::Matrix<double, 6, 3> dv1;
+ Eigen::Matrix<double, 6, 3> dv2;
+ Eigen::Matrix<double, 6, 3> dv3;
+ Eigen::Matrix<double, 6, 3> dv4;
+
+ dv1.row(0) = u2.block(11, 0, 1, 3) - u2.block(11, 3, 1, 3);
+ dv1.row(1) = u2.block(11, 0, 1, 3) - u2.block(11, 6, 1, 3);
+ dv1.row(2) = u2.block(11, 0, 1, 3) - u2.block(11, 9, 1, 3);
+ dv1.row(3) = u2.block(11, 3, 1, 3) - u2.block(11, 6, 1, 3);
+ dv1.row(4) = u2.block(11, 3, 1, 3) - u2.block(11, 9, 1, 3);
+ dv1.row(5) = u2.block(11, 6, 1, 3) - u2.block(11, 9, 1, 3);
+ dv2.row(0) = u2.block(10, 0, 1, 3) - u2.block(10, 3, 1, 3);
+ dv2.row(1) = u2.block(10, 0, 1, 3) - u2.block(10, 6, 1, 3);
+ dv2.row(2) = u2.block(10, 0, 1, 3) - u2.block(10, 9, 1, 3);
+ dv2.row(3) = u2.block(10, 3, 1, 3) - u2.block(10, 6, 1, 3);
+ dv2.row(4) = u2.block(10, 3, 1, 3) - u2.block(10, 9, 1, 3);
+ dv2.row(5) = u2.block(10, 6, 1, 3) - u2.block(10, 9, 1, 3);
+ dv3.row(0) = u2.block( 9, 0, 1, 3) - u2.block( 9, 3, 1, 3);
+ dv3.row(1) = u2.block( 9, 0, 1, 3) - u2.block( 9, 6, 1, 3);
+ dv3.row(2) = u2.block( 9, 0, 1, 3) - u2.block( 9, 9, 1, 3);
+ dv3.row(3) = u2.block( 9, 3, 1, 3) - u2.block( 9, 6, 1, 3);
+ dv3.row(4) = u2.block( 9, 3, 1, 3) - u2.block( 9, 9, 1, 3);
+ dv3.row(5) = u2.block( 9, 6, 1, 3) - u2.block( 9, 9, 1, 3);
+ dv4.row(0) = u2.block( 8, 0, 1, 3) - u2.block( 8, 3, 1, 3);
+ dv4.row(1) = u2.block( 8, 0, 1, 3) - u2.block( 8, 6, 1, 3);
+ dv4.row(2) = u2.block( 8, 0, 1, 3) - u2.block( 8, 9, 1, 3);
+ dv4.row(3) = u2.block( 8, 3, 1, 3) - u2.block( 8, 6, 1, 3);
+ dv4.row(4) = u2.block( 8, 3, 1, 3) - u2.block( 8, 9, 1, 3);
+ dv4.row(5) = u2.block( 8, 6, 1, 3) - u2.block( 8, 9, 1, 3);
+
+ Eigen::Matrix<double, 6, 10> L;
+ for (size_t r = 0; r < 6; r++) {
+ L.row(r) << dv1.row(r).dot(dv1.row(r)),
+ 2.0 * dv1.row(r).dot(dv2.row(r)),
+ dv2.row(r).dot(dv2.row(r)),
+ 2.0 * dv1.row(r).dot(dv3.row(r)),
+ 2.0 * dv2.row(r).dot(dv3.row(r)),
+ dv3.row(r).dot(dv3.row(r)),
+ 2.0 * dv1.row(r).dot(dv4.row(r)),
+ 2.0 * dv2.row(r).dot(dv4.row(r)),
+ 2.0 * dv3.row(r).dot(dv4.row(r)),
+ dv4.row(r).dot(dv4.row(r));
+ }
+ Vec6 rho;
+ rho << (X_control_points.col(0) - X_control_points.col(1)).squaredNorm(),
+ (X_control_points.col(0) - X_control_points.col(2)).squaredNorm(),
+ (X_control_points.col(0) - X_control_points.col(3)).squaredNorm(),
+ (X_control_points.col(1) - X_control_points.col(2)).squaredNorm(),
+ (X_control_points.col(1) - X_control_points.col(3)).squaredNorm(),
+ (X_control_points.col(2) - X_control_points.col(3)).squaredNorm();
+
+ // There are three possible solutions based on the three approximations of L
+ // (betas). Below, each one is solved for then the best one is chosen.
+ Mat3X X_camera;
+ Mat3 K; K.setIdentity();
+ vector<Mat3> Rs(3);
+ vector<Vec3> ts(3);
+ Vec rmse(3);
+
+ // TODO(julien): Document where the "1e-3" magical constant comes from below.
+
+ // Find the first possible solution for R, t corresponding to:
+ // Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
+ // Betas_approx_1 = [b00 b01 b02 b03]
+ Vec4 betas = Vec4::Zero();
+ Eigen::Matrix<double, 6, 4> l_6x4;
+ for (size_t r = 0; r < 6; r++) {
+ l_6x4.row(r) << L(r, 0), L(r, 1), L(r, 3), L(r, 6);
+ }
+ Eigen::JacobiSVD<Mat> svd_of_l4(l_6x4,
+ Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Vec4 b4 = svd_of_l4.solve(rho);
+ if ((l_6x4 * b4).isApprox(rho, 1e-3)) {
+ if (b4(0) < 0) {
+ b4 = -b4;
+ }
+ b4(0) = std::sqrt(b4(0));
+ betas << b4(0), b4(1) / b4(0), b4(2) / b4(0), b4(3) / b4(0);
+ ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
+ AbsoluteOrientation(X_world, X_camera, &Rs[0], &ts[0]);
+ rmse(0) = RootMeanSquareError(x_camera, X_world, K, Rs[0], ts[0]);
+ } else {
+ LOG(ERROR) << "First approximation of beta not good enough.";
+ ts[0].setZero();
+ rmse(0) = std::numeric_limits<double>::max();
+ }
+
+ // Find the second possible solution for R, t corresponding to:
+ // Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
+ // Betas_approx_2 = [b00 b01 b11]
+ betas.setZero();
+ Eigen::Matrix<double, 6, 3> l_6x3;
+ l_6x3 = L.block(0, 0, 6, 3);
+ Eigen::JacobiSVD<Mat> svdOfL3(l_6x3,
+ Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Vec3 b3 = svdOfL3.solve(rho);
+ VLOG(2) << " rho = " << rho;
+ VLOG(2) << " l_6x3 * b3 = " << l_6x3 * b3;
+ if ((l_6x3 * b3).isApprox(rho, 1e-3)) {
+ if (b3(0) < 0) {
+ betas(0) = std::sqrt(-b3(0));
+ betas(1) = (b3(2) < 0) ? std::sqrt(-b3(2)) : 0;
+ } else {
+ betas(0) = std::sqrt(b3(0));
+ betas(1) = (b3(2) > 0) ? std::sqrt(b3(2)) : 0;
+ }
+ if (b3(1) < 0) {
+ betas(0) = -betas(0);
+ }
+ betas(2) = 0;
+ betas(3) = 0;
+ ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
+ AbsoluteOrientation(X_world, X_camera, &Rs[1], &ts[1]);
+ rmse(1) = RootMeanSquareError(x_camera, X_world, K, Rs[1], ts[1]);
+ } else {
+ LOG(ERROR) << "Second approximation of beta not good enough.";
+ ts[1].setZero();
+ rmse(1) = std::numeric_limits<double>::max();
+ }
+
+ // Find the third possible solution for R, t corresponding to:
+ // Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
+ // Betas_approx_3 = [b00 b01 b11 b02 b12]
+ betas.setZero();
+ Eigen::Matrix<double, 6, 5> l_6x5;
+ l_6x5 = L.block(0, 0, 6, 5);
+ Eigen::JacobiSVD<Mat> svdOfL5(l_6x5,
+ Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Vec5 b5 = svdOfL5.solve(rho);
+ if ((l_6x5 * b5).isApprox(rho, 1e-3)) {
+ if (b5(0) < 0) {
+ betas(0) = std::sqrt(-b5(0));
+ if (b5(2) < 0) {
+ betas(1) = std::sqrt(-b5(2));
+ } else {
+ b5(2) = 0;
+ }
+ } else {
+ betas(0) = std::sqrt(b5(0));
+ if (b5(2) > 0) {
+ betas(1) = std::sqrt(b5(2));
+ } else {
+ b5(2) = 0;
+ }
+ }
+ if (b5(1) < 0) {
+ betas(0) = -betas(0);
+ }
+ betas(2) = b5(3) / betas(0);
+ betas(3) = 0;
+ ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
+ AbsoluteOrientation(X_world, X_camera, &Rs[2], &ts[2]);
+ rmse(2) = RootMeanSquareError(x_camera, X_world, K, Rs[2], ts[2]);
+ } else {
+ LOG(ERROR) << "Third approximation of beta not good enough.";
+ ts[2].setZero();
+ rmse(2) = std::numeric_limits<double>::max();
+ }
+
+ // Finally, with all three solutions, select the (R, t) with the best RMSE.
+ VLOG(2) << "RMSE for solution 0: " << rmse(0);
+ VLOG(2) << "RMSE for solution 1: " << rmse(0);
+ VLOG(2) << "RMSE for solution 2: " << rmse(0);
+ size_t n = 0;
+ if (rmse(1) < rmse(0)) {
+ n = 1;
+ }
+ if (rmse(2) < rmse(n)) {
+ n = 2;
+ }
+ if (rmse(n) == std::numeric_limits<double>::max()) {
+ LOG(ERROR) << "All three possibilities failed. Reporting failure.";
+ return false;
+ }
+
+ VLOG(1) << "RMSE for best solution #" << n << ": " << rmse(n);
+ *R = Rs[n];
+ *t = ts[n];
+
+ // TODO(julien): Improve the solutions with non-linear refinement.
+ return true;
+}
+
+} // namespace resection
+} // namespace libmv
diff --git a/extern/libmv/libmv/multiview/euclidean_resection.h b/extern/libmv/libmv/multiview/euclidean_resection.h
new file mode 100644
index 00000000000..08fa3d90bd3
--- /dev/null
+++ b/extern/libmv/libmv/multiview/euclidean_resection.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2010 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
+#define LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/multiview/projection.h"
+
+namespace libmv {
+namespace euclidean_resection {
+
+enum ResectionMethod {
+ RESECTION_ANSAR_DANIILIDIS,
+ RESECTION_EPNP,
+};
+
+/**
+ * Computes the extrinsic parameters, R and t for a calibrated camera
+ * from 4 or more 3D points and their normalized images.
+ *
+ * \param x_camera Image points in normalized camera coordinates e.g. x_camera
+ * = inv(K) * x_image.
+ * \param X_world 3D points in the world coordinate system
+ * \param R Solution for the camera rotation matrix
+ * \param t Solution for the camera translation vector
+ * \param method The resection method to use.
+ */
+bool EuclideanResection(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t,
+ ResectionMethod method = RESECTION_EPNP);
+
+/**
+ * Computes the extrinsic parameters, R and t for a calibrated camera
+ * from 4 or more 3D points and their images.
+ *
+ * \param x_image Image points in non-normalized image coordinates. The
+ * coordates are laid out one per row. The matrix can be Nx2
+ * or Nx3 for euclidean or homogenous 2D coordinates.
+ * \param X_world 3D points in the world coordinate system
+ * \param K Intrinsic parameters camera matrix
+ * \param R Solution for the camera rotation matrix
+ * \param t Solution for the camera translation vector
+ * \param method Resection method
+ */
+bool EuclideanResection(const Mat &x_image,
+ const Mat3X &X_world,
+ const Mat3 &K,
+ Mat3 *R, Vec3 *t,
+ ResectionMethod method = RESECTION_EPNP);
+
+/**
+ * The absolute orientation algorithm recovers the transformation between a set
+ * of 3D points, X and Xp such that:
+ *
+ * Xp = R*X + t
+ *
+ * The recovery of the absolute orientation is implemented after this article:
+ * Horn, Hilden, "Closed-form solution of absolute orientation using
+ * orthonormal matrices"
+ */
+void AbsoluteOrientation(const Mat3X &X,
+ const Mat3X &Xp,
+ Mat3 *R,
+ Vec3 *t);
+
+/**
+ * Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
+ * more 3D points and their images.
+ *
+ * \param x_camera Image points in normalized camera coordinates, e.g.
+ * x_camera=inv(K)*x_image
+ * \param X_world 3D points in the world coordinate system
+ * \param R Solution for the camera rotation matrix
+ * \param t Solution for the camera translation vector
+ *
+ * This is the algorithm described in: "Linear Pose Estimation from Points or
+ * Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no. 5.
+ */
+void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t);
+/**
+ * Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
+ * more 3D points and their images.
+ *
+ * \param x_camera Image points in normalized camera coordinates,
+ * e.g. x_camera = inv(K) * x_image
+ * \param X_world 3D points in the world coordinate system
+ * \param R Solution for the camera rotation matrix
+ * \param t Solution for the camera translation vector
+ *
+ * This is the algorithm described in:
+ * "{EP$n$P: An Accurate $O(n)$ Solution to the P$n$P Problem", by V. Lepetit
+ * and F. Moreno-Noguer and P. Fua, IJCV 2009. vol. 81, no. 2
+ * \note: the non-linear optimization is not implemented here.
+ */
+bool EuclideanResectionEPnP(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t);
+
+} // namespace euclidean_resection
+} // namespace libmv
+
+
+#endif /* LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_ */
diff --git a/extern/libmv/libmv/multiview/fundamental.cc b/extern/libmv/libmv/multiview/fundamental.cc
new file mode 100644
index 00000000000..7a6b4a08439
--- /dev/null
+++ b/extern/libmv/libmv/multiview/fundamental.cc
@@ -0,0 +1,391 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/poly.h"
+#include "libmv/multiview/conditioning.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/triangulation.h"
+#include "libmv/multiview/fundamental.h"
+
+namespace libmv {
+
+void EliminateRow(const Mat34 &P, int row, Mat *X) {
+ X->resize(2, 4);
+
+ int first_row = (row + 1) % 3;
+ int second_row = (row + 2) % 3;
+
+ for (int i = 0; i < 4; ++i) {
+ (*X)(0, i) = P(first_row, i);
+ (*X)(1, i) = P(second_row, i);
+ }
+}
+
+void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2) {
+ *P1 << Mat3::Identity(), Vec3::Zero();
+ Vec3 e2;
+ Mat3 Ft = F.transpose();
+ Nullspace(&Ft, &e2);
+ *P2 << CrossProductMatrix(e2) * F, e2;
+}
+
+// Addapted from vgg_F_from_P.
+void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F) {
+ Mat X[3];
+ Mat Y[3];
+ Mat XY;
+
+ for (int i = 0; i < 3; ++i) {
+ EliminateRow(P1, i, X + i);
+ EliminateRow(P2, i, Y + i);
+ }
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VerticalStack(X[j], Y[i], &XY);
+ (*F)(i, j) = XY.determinant();
+ }
+ }
+}
+
+// HZ 11.1 pag.279 (x1 = x, x2 = x')
+// http://www.cs.unc.edu/~marc/tutorial/node54.html
+double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
+ DCHECK_EQ(x1.rows(), 2);
+ DCHECK_GE(x1.cols(), 8);
+ DCHECK_EQ(x1.rows(), x2.rows());
+ DCHECK_EQ(x1.cols(), x2.cols());
+
+ int n = x1.cols();
+ Mat A(n, 9);
+ for (int i = 0; i < n; ++i) {
+ A(i, 0) = x2(0, i) * x1(0, i);
+ A(i, 1) = x2(0, i) * x1(1, i);
+ A(i, 2) = x2(0, i);
+ A(i, 3) = x2(1, i) * x1(0, i);
+ A(i, 4) = x2(1, i) * x1(1, i);
+ A(i, 5) = x2(1, i);
+ A(i, 6) = x1(0, i);
+ A(i, 7) = x1(1, i);
+ A(i, 8) = 1;
+ }
+
+ Vec9 f;
+ double smaller_singular_value = Nullspace(&A, &f);
+ *F = Map<RMat3>(f.data());
+ return smaller_singular_value;
+}
+
+// HZ 11.1.1 pag.280
+void EnforceFundamentalRank2Constraint(Mat3 *F) {
+ Eigen::JacobiSVD<Mat3> USV(*F, Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Vec3 d = USV.singularValues();
+ d(2) = 0.0;
+ *F = USV.matrixU() * d.asDiagonal() * USV.matrixV().transpose();
+}
+
+// HZ 11.2 pag.281 (x1 = x, x2 = x')
+double NormalizedEightPointSolver(const Mat &x1,
+ const Mat &x2,
+ Mat3 *F) {
+ DCHECK_EQ(x1.rows(), 2);
+ DCHECK_GE(x1.cols(), 8);
+ DCHECK_EQ(x1.rows(), x2.rows());
+ DCHECK_EQ(x1.cols(), x2.cols());
+
+ // Normalize the data.
+ Mat3 T1, T2;
+ PreconditionerFromPoints(x1, &T1);
+ PreconditionerFromPoints(x2, &T2);
+ Mat x1_normalized, x2_normalized;
+ ApplyTransformationToPoints(x1, T1, &x1_normalized);
+ ApplyTransformationToPoints(x2, T2, &x2_normalized);
+
+ // Estimate the fundamental matrix.
+ double smaller_singular_value =
+ EightPointSolver(x1_normalized, x2_normalized, F);
+ EnforceFundamentalRank2Constraint(F);
+
+ // Denormalize the fundamental matrix.
+ *F = T2.transpose() * (*F) * T1;
+
+ return smaller_singular_value;
+}
+
+// Seven-point algorithm.
+// http://www.cs.unc.edu/~marc/tutorial/node55.html
+double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
+ const Mat &x2,
+ std::vector<Mat3> *F) {
+ DCHECK_EQ(x1.rows(), 2);
+ DCHECK_EQ(x1.cols(), 7);
+ DCHECK_EQ(x1.rows(), x2.rows());
+ DCHECK_EQ(x2.cols(), x2.cols());
+
+ // Build a 9 x n matrix from point matches, where each row is equivalent to
+ // the equation x'T*F*x = 0 for a single correspondence pair (x', x). The
+ // domain of the matrix is a 9 element vector corresponding to F. The
+ // nullspace should be rank two; the two dimensions correspond to the set of
+ // F matrices satisfying the epipolar geometry.
+ Matrix<double, 7, 9> A;
+ for (int ii = 0; ii < 7; ++ii) {
+ A(ii, 0) = x1(0, ii) * x2(0, ii); // 0 represents x coords,
+ A(ii, 1) = x1(1, ii) * x2(0, ii); // 1 represents y coords.
+ A(ii, 2) = x2(0, ii);
+ A(ii, 3) = x1(0, ii) * x2(1, ii);
+ A(ii, 4) = x1(1, ii) * x2(1, ii);
+ A(ii, 5) = x2(1, ii);
+ A(ii, 6) = x1(0, ii);
+ A(ii, 7) = x1(1, ii);
+ A(ii, 8) = 1.0;
+ }
+
+ // Find the two F matrices in the nullspace of A.
+ Vec9 f1, f2;
+ double s = Nullspace2(&A, &f1, &f2);
+ Mat3 F1 = Map<RMat3>(f1.data());
+ Mat3 F2 = Map<RMat3>(f2.data());
+
+ // Then, use the condition det(F) = 0 to determine F. In other words, solve
+ // det(F1 + a*F2) = 0 for a.
+ double a = F1(0, 0), j = F2(0, 0),
+ b = F1(0, 1), k = F2(0, 1),
+ c = F1(0, 2), l = F2(0, 2),
+ d = F1(1, 0), m = F2(1, 0),
+ e = F1(1, 1), n = F2(1, 1),
+ f = F1(1, 2), o = F2(1, 2),
+ g = F1(2, 0), p = F2(2, 0),
+ h = F1(2, 1), q = F2(2, 1),
+ i = F1(2, 2), r = F2(2, 2);
+
+ // Run fundamental_7point_coeffs.py to get the below coefficients.
+ // The coefficients are in ascending powers of alpha, i.e. P[N]*x^N.
+ double P[4] = {
+ a*e*i + b*f*g + c*d*h - a*f*h - b*d*i - c*e*g,
+ a*e*r + a*i*n + b*f*p + b*g*o + c*d*q + c*h*m + d*h*l + e*i*j + f*g*k -
+ a*f*q - a*h*o - b*d*r - b*i*m - c*e*p - c*g*n - d*i*k - e*g*l - f*h*j,
+ a*n*r + b*o*p + c*m*q + d*l*q + e*j*r + f*k*p + g*k*o + h*l*m + i*j*n -
+ a*o*q - b*m*r - c*n*p - d*k*r - e*l*p - f*j*q - g*l*n - h*j*o - i*k*m,
+ j*n*r + k*o*p + l*m*q - j*o*q - k*m*r - l*n*p,
+ };
+
+ // Solve for the roots of P[3]*x^3 + P[2]*x^2 + P[1]*x + P[0] = 0.
+ double roots[3];
+ int num_roots = SolveCubicPolynomial(P, roots);
+
+ // Build the fundamental matrix for each solution.
+ for (int kk = 0; kk < num_roots; ++kk) {
+ F->push_back(F1 + roots[kk] * F2);
+ }
+ return s;
+}
+
+double FundamentalFromCorrespondences7Point(const Mat &x1,
+ const Mat &x2,
+ std::vector<Mat3> *F) {
+ DCHECK_EQ(x1.rows(), 2);
+ DCHECK_GE(x1.cols(), 7);
+ DCHECK_EQ(x1.rows(), x2.rows());
+ DCHECK_EQ(x1.cols(), x2.cols());
+
+ // Normalize the data.
+ Mat3 T1, T2;
+ PreconditionerFromPoints(x1, &T1);
+ PreconditionerFromPoints(x2, &T2);
+ Mat x1_normalized, x2_normalized;
+ ApplyTransformationToPoints(x1, T1, &x1_normalized);
+ ApplyTransformationToPoints(x2, T2, &x2_normalized);
+
+ // Estimate the fundamental matrix.
+ double smaller_singular_value =
+ FundamentalFrom7CorrespondencesLinear(x1_normalized, x2_normalized, &(*F));
+
+ for (int k = 0; k < F->size(); ++k) {
+ Mat3 & Fmat = (*F)[k];
+ // Denormalize the fundamental matrix.
+ Fmat = T2.transpose() * Fmat * T1;
+ }
+ return smaller_singular_value;
+}
+
+void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized) {
+ *F_normalized = F / FrobeniusNorm(F);
+ if ((*F_normalized)(2, 2) < 0) {
+ *F_normalized *= -1;
+ }
+}
+
+double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
+ Vec3 x(x1(0), x1(1), 1.0);
+ Vec3 y(x2(0), x2(1), 1.0);
+
+ Vec3 F_x = F * x;
+ Vec3 Ft_y = F.transpose() * y;
+ double y_F_x = y.dot(F_x);
+
+ return Square(y_F_x) / ( F_x.head<2>().squaredNorm()
+ + Ft_y.head<2>().squaredNorm());
+}
+
+double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
+ Vec3 x(x1(0), x1(1), 1.0);
+ Vec3 y(x2(0), x2(1), 1.0);
+
+ Vec3 F_x = F * x;
+ Vec3 Ft_y = F.transpose() * y;
+ double y_F_x = y.dot(F_x);
+
+ return Square(y_F_x) * ( 1 / F_x.head<2>().squaredNorm()
+ + 1 / Ft_y.head<2>().squaredNorm());
+}
+
+// HZ 9.6 pag 257 (formula 9.12)
+void EssentialFromFundamental(const Mat3 &F,
+ const Mat3 &K1,
+ const Mat3 &K2,
+ Mat3 *E) {
+ *E = K2.transpose() * F * K1;
+}
+
+// HZ 9.6 pag 257 (formula 9.12)
+// Or http://ai.stanford.edu/~birch/projective/node20.html
+void FundamentalFromEssential(const Mat3 &E,
+ const Mat3 &K1,
+ const Mat3 &K2,
+ Mat3 *F) {
+ *F = K2.inverse().transpose() * E * K1.inverse();
+}
+
+void RelativeCameraMotion(const Mat3 &R1,
+ const Vec3 &t1,
+ const Mat3 &R2,
+ const Vec3 &t2,
+ Mat3 *R,
+ Vec3 *t) {
+ *R = R2 * R1.transpose();
+ *t = t2 - (*R) * t1;
+}
+
+// HZ 9.6 pag 257
+void EssentialFromRt(const Mat3 &R1,
+ const Vec3 &t1,
+ const Mat3 &R2,
+ const Vec3 &t2,
+ Mat3 *E) {
+ Mat3 R;
+ Vec3 t;
+ RelativeCameraMotion(R1, t1, R2, t2, &R, &t);
+ Mat3 Tx = CrossProductMatrix(t);
+ *E = Tx * R;
+}
+
+// HZ 9.6 pag 259 (Result 9.19)
+void MotionFromEssential(const Mat3 &E,
+ std::vector<Mat3> *Rs,
+ std::vector<Vec3> *ts) {
+ Eigen::JacobiSVD<Mat3> USV(E, Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Mat3 U = USV.matrixU();
+ Vec3 d = USV.singularValues();
+ Mat3 Vt = USV.matrixV().transpose();
+
+ // Last column of U is undetermined since d = (a a 0).
+ if (U.determinant() < 0) {
+ U.col(2) *= -1;
+ }
+ // Last row of Vt is undetermined since d = (a a 0).
+ if (Vt.determinant() < 0) {
+ Vt.row(2) *= -1;
+ }
+
+ Mat3 W;
+ W << 0, -1, 0,
+ 1, 0, 0,
+ 0, 0, 1;
+
+ Mat3 U_W_Vt = U * W * Vt;
+ Mat3 U_Wt_Vt = U * W.transpose() * Vt;
+
+ Rs->resize(4);
+ (*Rs)[0] = U_W_Vt;
+ (*Rs)[1] = U_W_Vt;
+ (*Rs)[2] = U_Wt_Vt;
+ (*Rs)[3] = U_Wt_Vt;
+
+ ts->resize(4);
+ (*ts)[0] = U.col(2);
+ (*ts)[1] = -U.col(2);
+ (*ts)[2] = U.col(2);
+ (*ts)[3] = -U.col(2);
+}
+
+int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
+ const std::vector<Vec3> &ts,
+ const Mat3 &K1,
+ const Vec2 &x1,
+ const Mat3 &K2,
+ const Vec2 &x2) {
+ DCHECK_EQ(4, Rs.size());
+ DCHECK_EQ(4, ts.size());
+
+ Mat34 P1, P2;
+ Mat3 R1;
+ Vec3 t1;
+ R1.setIdentity();
+ t1.setZero();
+ P_From_KRt(K1, R1, t1, &P1);
+ for (int i = 0; i < 4; ++i) {
+ const Mat3 &R2 = Rs[i];
+ const Vec3 &t2 = ts[i];
+ P_From_KRt(K2, R2, t2, &P2);
+ Vec3 X;
+ TriangulateDLT(P1, x1, P2, x2, &X);
+ double d1 = Depth(R1, t1, X);
+ double d2 = Depth(R2, t2, X);
+ // Test if point is front to the two cameras.
+ if (d1 > 0 && d2 > 0) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
+ const Mat3 &K1,
+ const Vec2 &x1,
+ const Mat3 &K2,
+ const Vec2 &x2,
+ Mat3 *R,
+ Vec3 *t) {
+ std::vector<Mat3> Rs;
+ std::vector<Vec3> ts;
+ MotionFromEssential(E, &Rs, &ts);
+ int solution = MotionFromEssentialChooseSolution(Rs, ts, K1, x1, K2, x2);
+ if (solution >= 0) {
+ *R = Rs[solution];
+ *t = ts[solution];
+ return true;
+ } else {
+ return false;
+ }
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/multiview/fundamental.h b/extern/libmv/libmv/multiview/fundamental.h
new file mode 100644
index 00000000000..3f4a3b7b211
--- /dev/null
+++ b/extern/libmv/libmv/multiview/fundamental.h
@@ -0,0 +1,144 @@
+// Copyright (c) 2007, 2008, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_FUNDAMENTAL_H_
+#define LIBMV_MULTIVIEW_FUNDAMENTAL_H_
+
+#include <vector>
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2);
+void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F);
+
+/**
+ * The normalized 8-point fundamental matrix solver.
+ */
+double NormalizedEightPointSolver(const Mat &x1,
+ const Mat &x2,
+ Mat3 *F);
+
+/**
+ * 7 points (minimal case, points coordinates must be normalized before):
+ */
+double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
+ const Mat &x2,
+ std::vector<Mat3> *F);
+
+/**
+ * 7 points (points coordinates must be in image space):
+ */
+double FundamentalFromCorrespondences7Point(const Mat &x1,
+ const Mat &x2,
+ std::vector<Mat3> *F);
+
+/**
+ * 8 points (points coordinates must be in image space):
+ */
+double NormalizedEightPointSolver(const Mat &x1,
+ const Mat &x2,
+ Mat3 *F);
+
+/**
+ * Fundamental matrix utility function:
+ */
+void EnforceFundamentalRank2Constraint(Mat3 *F);
+
+void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized);
+
+/**
+ * Approximate squared reprojection errror.
+ *
+ * See page 287 of HZ equation 11.9. This avoids triangulating the point,
+ * relying only on the entries in F.
+ */
+double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
+
+/**
+ * Calculates the sum of the distances from the points to the epipolar lines.
+ *
+ * See page 288 of HZ equation 11.10.
+ */
+double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
+
+/**
+ * Compute the relative camera motion between two cameras.
+ *
+ * Given the motion parameters of two cameras, computes the motion parameters
+ * of the second one assuming the first one to be at the origin.
+ * If T1 and T2 are the camera motions, the computed relative motion is
+ * T = T2 T1^{-1}
+ */
+void RelativeCameraMotion(const Mat3 &R1,
+ const Vec3 &t1,
+ const Mat3 &R2,
+ const Vec3 &t2,
+ Mat3 *R,
+ Vec3 *t);
+
+void EssentialFromFundamental(const Mat3 &F,
+ const Mat3 &K1,
+ const Mat3 &K2,
+ Mat3 *E);
+
+void FundamentalFromEssential(const Mat3 &E,
+ const Mat3 &K1,
+ const Mat3 &K2,
+ Mat3 *F);
+
+void EssentialFromRt(const Mat3 &R1,
+ const Vec3 &t1,
+ const Mat3 &R2,
+ const Vec3 &t2,
+ Mat3 *E);
+
+void MotionFromEssential(const Mat3 &E,
+ std::vector<Mat3> *Rs,
+ std::vector<Vec3> *ts);
+
+/**
+ * Choose one of the four possible motion solutions from an essential matrix.
+ *
+ * Decides the right solution by checking that the triangulation of a match
+ * x1--x2 lies in front of the cameras. See HZ 9.6 pag 259 (9.6.3 Geometrical
+ * interpretation of the 4 solutions)
+ *
+ * \return index of the right solution or -1 if no solution.
+ */
+int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
+ const std::vector<Vec3> &ts,
+ const Mat3 &K1,
+ const Vec2 &x1,
+ const Mat3 &K2,
+ const Vec2 &x2);
+
+bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
+ const Mat3 &K1,
+ const Vec2 &x1,
+ const Mat3 &K2,
+ const Vec2 &x2,
+ Mat3 *R,
+ Vec3 *t);
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_FUNDAMENTAL_H_
diff --git a/extern/libmv/libmv/multiview/nviewtriangulation.h b/extern/libmv/libmv/multiview/nviewtriangulation.h
new file mode 100644
index 00000000000..b4f521f185d
--- /dev/null
+++ b/extern/libmv/libmv/multiview/nviewtriangulation.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Compute a 3D position of a point from several images of it. In particular,
+// compute the projective point X in R^4 such that x = PX.
+//
+// Algorithm is the standard DLT; for derivation see appendix of Keir's thesis.
+
+#ifndef LIBMV_MULTIVIEW_NVIEWTRIANGULATION_H
+#define LIBMV_MULTIVIEW_NVIEWTRIANGULATION_H
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+// x's are 2D coordinates (x,y,1) in each image; Ps are projective cameras. The
+// output, X, is a homogeneous four vectors.
+template<typename T>
+void NViewTriangulate(const Matrix<T, 2, Dynamic> &x,
+ const vector<Matrix<T, 3, 4> > &Ps,
+ Matrix<T, 4, 1> *X) {
+ int nviews = x.cols();
+ assert(nviews == Ps.size());
+
+ Matrix<T, Dynamic, Dynamic> design(3*nviews, 4 + nviews);
+ design.setConstant(0.0);
+ for (int i = 0; i < nviews; i++) {
+ design.template block<3, 4>(3*i, 0) = -Ps[i];
+ design(3*i + 0, 4 + i) = x(0, i);
+ design(3*i + 1, 4 + i) = x(1, i);
+ design(3*i + 2, 4 + i) = 1.0;
+ }
+ Matrix<T, Dynamic, 1> X_and_alphas;
+ Nullspace(&design, &X_and_alphas);
+ X->resize(4);
+ *X = X_and_alphas.head(4);
+}
+
+// x's are 2D coordinates (x,y,1) in each image; Ps are projective cameras. The
+// output, X, is a homogeneous four vectors.
+// This method uses the algebraic distance approximation.
+// Note that this method works better when the 2D points are normalized
+// with an isotopic normalization.
+template<typename T>
+void NViewTriangulateAlgebraic(const Matrix<T, 2, Dynamic> &x,
+ const vector<Matrix<T, 3, 4> > &Ps,
+ Matrix<T, 4, 1> *X) {
+ int nviews = x.cols();
+ assert(nviews == Ps.size());
+
+ Matrix<T, Dynamic, 4> design(2*nviews, 4);
+ for (int i = 0; i < nviews; i++) {
+ design.template block<2, 4>(2*i, 0) = SkewMatMinimal(x.col(i)) * Ps[i];
+ }
+ X->resize(4);
+ Nullspace(&design, X);
+}
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_RESECTION_H
diff --git a/extern/libmv/libmv/multiview/projection.cc b/extern/libmv/libmv/multiview/projection.cc
new file mode 100644
index 00000000000..a7d1a058e9c
--- /dev/null
+++ b/extern/libmv/libmv/multiview/projection.cc
@@ -0,0 +1,221 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+void P_From_KRt(const Mat3 &K, const Mat3 &R, const Vec3 &t, Mat34 *P) {
+ P->block<3, 3>(0, 0) = R;
+ P->col(3) = t;
+ (*P) = K * (*P);
+}
+
+void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
+ // Decompose using the RQ decomposition HZ A4.1.1 pag.579.
+ Mat3 K = P.block(0, 0, 3, 3);
+
+ Mat3 Q;
+ Q.setIdentity();
+
+ // Set K(2,1) to zero.
+ if (K(2, 1) != 0) {
+ double c = -K(2,2);
+ double s = K(2,1);
+ double l = sqrt(c * c + s * s);
+ c /= l; s /= l;
+ Mat3 Qx;
+ Qx << 1, 0, 0,
+ 0, c, -s,
+ 0, s, c;
+ K = K * Qx;
+ Q = Qx.transpose() * Q;
+ }
+ // Set K(2,0) to zero.
+ if (K(2, 0) != 0) {
+ double c = K(2, 2);
+ double s = K(2, 0);
+ double l = sqrt(c * c + s * s);
+ c /= l; s /= l;
+ Mat3 Qy;
+ Qy << c, 0, s,
+ 0, 1, 0,
+ -s, 0, c;
+ K = K * Qy;
+ Q = Qy.transpose() * Q;
+ }
+ // Set K(1,0) to zero.
+ if (K(1, 0) != 0) {
+ double c = -K(1, 1);
+ double s = K(1, 0);
+ double l = sqrt(c * c + s * s);
+ c /= l; s /= l;
+ Mat3 Qz;
+ Qz << c,-s, 0,
+ s, c, 0,
+ 0, 0, 1;
+ K = K * Qz;
+ Q = Qz.transpose() * Q;
+ }
+
+ Mat3 R = Q;
+
+ // Ensure that the diagonal is positive.
+ // TODO(pau) Change this to ensure that:
+ // - K(0,0) > 0
+ // - K(2,2) = 1
+ // - det(R) = 1
+ if (K(2,2) < 0) {
+ K = -K;
+ R = -R;
+ }
+ if (K(1,1) < 0) {
+ Mat3 S;
+ S << 1, 0, 0,
+ 0,-1, 0,
+ 0, 0, 1;
+ K = K * S;
+ R = S * R;
+ }
+ if (K(0,0) < 0) {
+ Mat3 S;
+ S << -1, 0, 0,
+ 0, 1, 0,
+ 0, 0, 1;
+ K = K * S;
+ R = S * R;
+ }
+
+ // Compute translation.
+ Vec p(3);
+ p << P(0,3), P(1,3), P(2,3);
+ // TODO(pau) This sould be done by a SolveLinearSystem(A, b, &x) call.
+ // TODO(keir) use the eigen LU solver syntax...
+ Vec3 t = K.inverse() * p;
+
+ // scale K so that K(2,2) = 1
+ K = K / K(2,2);
+
+ *Kp = K;
+ *Rp = R;
+ *tp = t;
+}
+
+void ProjectionShiftPrincipalPoint(const Mat34 &P,
+ const Vec2 &principal_point,
+ const Vec2 &principal_point_new,
+ Mat34 *P_new) {
+ Mat3 T;
+ T << 1, 0, principal_point_new(0) - principal_point(0),
+ 0, 1, principal_point_new(1) - principal_point(1),
+ 0, 0, 1;
+ *P_new = T * P;
+}
+
+void ProjectionChangeAspectRatio(const Mat34 &P,
+ const Vec2 &principal_point,
+ double aspect_ratio,
+ double aspect_ratio_new,
+ Mat34 *P_new) {
+ Mat3 T;
+ T << 1, 0, 0,
+ 0, aspect_ratio_new / aspect_ratio, 0,
+ 0, 0, 1;
+ Mat34 P_temp;
+
+ ProjectionShiftPrincipalPoint(P, principal_point, Vec2(0,0), &P_temp);
+ P_temp = T * P_temp;
+ ProjectionShiftPrincipalPoint(P_temp, Vec2(0,0), principal_point, P_new);
+}
+
+void HomogeneousToEuclidean(const Mat &H, Mat *X) {
+ int d = H.rows() - 1;
+ int n = H.cols();
+ X->resize(d, n);
+ for (size_t i = 0; i < n; ++i) {
+ double h = H(d, i);
+ for (int j = 0; j < d; ++j) {
+ (*X)(j, i) = H(j, i) / h;
+ }
+ }
+}
+
+void HomogeneousToEuclidean(const Mat3X &h, Mat2X *e) {
+ e->resize(2, h.cols());
+ e->row(0) = h.row(0).array() / h.row(2).array();
+ e->row(1) = h.row(1).array() / h.row(2).array();
+}
+void HomogeneousToEuclidean(const Mat4X &h, Mat3X *e) {
+ e->resize(3, h.cols());
+ e->row(0) = h.row(0).array() / h.row(3).array();
+ e->row(1) = h.row(1).array() / h.row(3).array();
+ e->row(2) = h.row(2).array() / h.row(3).array();
+}
+
+void HomogeneousToEuclidean(const Vec3 &H, Vec2 *X) {
+ double w = H(2);
+ *X << H(0) / w, H(1) / w;
+}
+
+void HomogeneousToEuclidean(const Vec4 &H, Vec3 *X) {
+ double w = H(3);
+ *X << H(0) / w, H(1) / w, H(2) / w;
+}
+
+void EuclideanToHomogeneous(const Mat &X, Mat *H) {
+ int d = X.rows();
+ int n = X.cols();
+ H->resize(d + 1, n);
+ H->block(0, 0, d, n) = X;
+ H->row(d).setOnes();
+}
+
+void EuclideanToHomogeneous(const Vec2 &X, Vec3 *H) {
+ *H << X(0), X(1), 1;
+}
+
+void EuclideanToHomogeneous(const Vec3 &X, Vec4 *H) {
+ *H << X(0), X(1), X(2), 1;
+}
+
+// TODO(julien) Call conditioning.h/ApplyTransformationToPoints ?
+void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n) {
+ Mat3X x_image_h;
+ EuclideanToHomogeneous(x, &x_image_h);
+ Mat3X x_camera_h = K.inverse() * x_image_h;
+ HomogeneousToEuclidean(x_camera_h, n);
+}
+
+void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n) {
+ Mat3X x_camera_h = K.inverse() * x;
+ HomogeneousToEuclidean(x_camera_h, n);
+}
+
+double Depth(const Mat3 &R, const Vec3 &t, const Vec3 &X) {
+ return (R*X)(2) + t(2);
+}
+
+double Depth(const Mat3 &R, const Vec3 &t, const Vec4 &X) {
+ Vec3 Xe = X.head<3>() / X(3);
+ return Depth(R, t, Xe);
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/multiview/projection.h b/extern/libmv/libmv/multiview/projection.h
new file mode 100644
index 00000000000..bc353b64c08
--- /dev/null
+++ b/extern/libmv/libmv/multiview/projection.h
@@ -0,0 +1,231 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_PROJECTION_H_
+#define LIBMV_MULTIVIEW_PROJECTION_H_
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+void P_From_KRt(const Mat3 &K, const Mat3 &R, const Vec3 &t, Mat34 *P);
+void KRt_From_P(const Mat34 &P, Mat3 *K, Mat3 *R, Vec3 *t);
+
+// Applies a change of basis to the image coordinates of the projection matrix
+// so that the principal point becomes principal_point_new.
+void ProjectionShiftPrincipalPoint(const Mat34 &P,
+ const Vec2 &principal_point,
+ const Vec2 &principal_point_new,
+ Mat34 *P_new);
+
+// Applies a change of basis to the image coordinates of the projection matrix
+// so that the aspect ratio becomes aspect_ratio_new. This is done by
+// stretching the y axis. The aspect ratio is defined as the quotient between
+// the focal length of the y and the x axis.
+void ProjectionChangeAspectRatio(const Mat34 &P,
+ const Vec2 &principal_point,
+ double aspect_ratio,
+ double aspect_ratio_new,
+ Mat34 *P_new);
+
+void HomogeneousToEuclidean(const Mat &H, Mat *X);
+void HomogeneousToEuclidean(const Mat3X &h, Mat2X *e);
+void HomogeneousToEuclidean(const Mat4X &h, Mat3X *e);
+void HomogeneousToEuclidean(const Vec3 &H, Vec2 *X);
+void HomogeneousToEuclidean(const Vec4 &H, Vec3 *X);
+inline Vec2 HomogeneousToEuclidean(const Vec3 &h) {
+ return h.head<2>() / h(2);
+}
+inline Vec3 HomogeneousToEuclidean(const Vec4 &h) {
+ return h.head<3>() / h(3);
+}
+inline Mat2X HomogeneousToEuclidean(const Mat3X &h) {
+ Mat2X e(2, h.cols());
+ e.row(0) = h.row(0).array() / h.row(2).array();
+ e.row(1) = h.row(1).array() / h.row(2).array();
+ return e;
+}
+
+void EuclideanToHomogeneous(const Mat &X, Mat *H);
+inline Mat3X EuclideanToHomogeneous(const Mat2X &x) {
+ Mat3X h(3, x.cols());
+ h.block(0, 0, 2, x.cols()) = x;
+ h.row(2).setOnes();
+ return h;
+}
+inline void EuclideanToHomogeneous(const Mat2X &x, Mat3X *h) {
+ h->resize(3, x.cols());
+ h->block(0, 0, 2, x.cols()) = x;
+ h->row(2).setOnes();
+}
+inline Mat4X EuclideanToHomogeneous(const Mat3X &x) {
+ Mat4X h(4, x.cols());
+ h.block(0, 0, 3, x.cols()) = x;
+ h.row(3).setOnes();
+ return h;
+}
+inline void EuclideanToHomogeneous(const Mat3X &x, Mat4X *h) {
+ h->resize(4, x.cols());
+ h->block(0, 0, 3, x.cols()) = x;
+ h->row(3).setOnes();
+}
+void EuclideanToHomogeneous(const Vec2 &X, Vec3 *H);
+void EuclideanToHomogeneous(const Vec3 &X, Vec4 *H);
+inline Vec3 EuclideanToHomogeneous(const Vec2 &x) {
+ return Vec3(x(0), x(1), 1);
+}
+inline Vec4 EuclideanToHomogeneous(const Vec3 &x) {
+ return Vec4(x(0), x(1), x(2), 1);
+}
+// Conversion from image coordinates to normalized camera coordinates
+void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n);
+void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n);
+
+inline Vec2 Project(const Mat34 &P, const Vec3 &X) {
+ Vec4 HX;
+ HX << X, 1.0;
+ Vec3 hx = P * HX;
+ return hx.head<2>() / hx(2);
+}
+
+inline void Project(const Mat34 &P, const Vec4 &X, Vec3 *x) {
+ *x = P * X;
+}
+
+inline void Project(const Mat34 &P, const Vec4 &X, Vec2 *x) {
+ Vec3 hx = P * X;
+ *x = hx.head<2>() / hx(2);
+}
+
+inline void Project(const Mat34 &P, const Vec3 &X, Vec3 *x) {
+ Vec4 HX;
+ HX << X, 1.0;
+ Project(P, HX, x);
+}
+
+inline void Project(const Mat34 &P, const Vec3 &X, Vec2 *x) {
+ Vec3 hx;
+ Project(P, X, x);
+ *x = hx.head<2>() / hx(2);
+}
+
+inline void Project(const Mat34 &P, const Mat4X &X, Mat2X *x) {
+ x->resize(2, X.cols());
+ for (int c = 0; c < X.cols(); ++c) {
+ Vec3 hx = P * X.col(c);
+ x->col(c) = hx.head<2>() / hx(2);
+ }
+}
+
+inline Mat2X Project(const Mat34 &P, const Mat4X &X) {
+ Mat2X x;
+ Project(P, X, &x);
+ return x;
+}
+
+inline void Project(const Mat34 &P, const Mat3X &X, Mat2X *x) {
+ x->resize(2, X.cols());
+ for (int c = 0; c < X.cols(); ++c) {
+ Vec4 HX;
+ HX << X.col(c), 1.0;
+ Vec3 hx = P * HX;
+ x->col(c) = hx.head<2>() / hx(2);
+ }
+}
+
+inline void Project(const Mat34 &P, const Mat3X &X, const Vecu &ids, Mat2X *x) {
+ x->resize(2, ids.size());
+ Vec4 HX;
+ Vec3 hx;
+ for (int c = 0; c < ids.size(); ++c) {
+ HX << X.col(ids[c]), 1.0;
+ hx = P * HX;
+ x->col(c) = hx.head<2>() / hx(2);
+ }
+}
+
+inline Mat2X Project(const Mat34 &P, const Mat3X &X) {
+ Mat2X x(2, X.cols());
+ Project(P, X, &x);
+ return x;
+}
+
+inline Mat2X Project(const Mat34 &P, const Mat3X &X, const Vecu &ids) {
+ Mat2X x(2, ids.size());
+ Project(P, X, ids, &x);
+ return x;
+}
+
+double Depth(const Mat3 &R, const Vec3 &t, const Vec3 &X);
+double Depth(const Mat3 &R, const Vec3 &t, const Vec4 &X);
+
+/**
+* Returns true if the homogenious 3D point X is in front of
+* the camera P.
+*/
+inline bool isInFrontOfCamera(const Mat34 &P, const Vec4 &X){
+ double condition_1 = P.row(2).dot(X) * X[3];
+ double condition_2 = X[2] * X[3];
+ if( condition_1 > 0 && condition_2 > 0 )
+ return true;
+ else
+ return false;
+}
+
+inline bool isInFrontOfCamera(const Mat34 &P, const Vec3 &X){
+ Vec4 X_homo;
+ X_homo.segment<3>(0) = X;
+ X_homo(3) = 1;
+ return isInFrontOfCamera( P, X_homo);
+}
+
+/**
+* Transforms a 2D point from pixel image coordinates to a 2D point in
+* normalized image coordinates.
+*/
+inline Vec2 ImageToNormImageCoordinates(Mat3 &Kinverse, Vec2 &x){
+ Vec3 x_h = Kinverse*EuclideanToHomogeneous(x);
+ return HomogeneousToEuclidean( x_h );
+}
+
+/// Estimates the root mean square error (2D)
+inline double RootMeanSquareError(const Mat2X &x_image,
+ const Mat4X &X_world,
+ const Mat34 &P) {
+ size_t num_points = x_image.cols();
+ Mat2X dx = Project(P, X_world) - x_image;
+ return dx.norm() / num_points;
+}
+
+/// Estimates the root mean square error (2D)
+inline double RootMeanSquareError(const Mat2X &x_image,
+ const Mat3X &X_world,
+ const Mat3 &K,
+ const Mat3 &R,
+ const Vec3 &t) {
+ Mat34 P;
+ P_From_KRt(K, R, t, &P);
+ size_t num_points = x_image.cols();
+ Mat2X dx = Project(P, X_world) - x_image;
+ return dx.norm() / num_points;
+}
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_PROJECTION_H_
diff --git a/extern/libmv/libmv/multiview/resection.h b/extern/libmv/libmv/multiview/resection.h
new file mode 100644
index 00000000000..e4623899147
--- /dev/null
+++ b/extern/libmv/libmv/multiview/resection.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Compute the projection matrix from a set of 3D points X and their
+// projections x = PX in 2D. This is useful if a point cloud is reconstructed.
+//
+// Algorithm is the standard DLT as described in Hartley & Zisserman, page 179.
+
+#ifndef LIBMV_MULTIVIEW_RESECTION_H
+#define LIBMV_MULTIVIEW_RESECTION_H
+
+#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+namespace resection {
+
+// x's are 2D image coordinates, (x,y,1), and X's are homogeneous four vectors.
+template<typename T>
+void Resection(const Matrix<T, 2, Dynamic> &x,
+ const Matrix<T, 4, Dynamic> &X,
+ Matrix<T, 3, 4> *P) {
+ int N = x.cols();
+ assert(X.cols() == N);
+
+ Matrix<T, Dynamic, 12> design(2*N, 12);
+ design.setZero();
+ for (int i = 0; i < N; i++) {
+ T xi = x(0, i);
+ T yi = x(1, i);
+ // See equation (7.2) on page 179 of H&Z.
+ design.template block<1,4>(2*i, 4) = -X.col(i).transpose();
+ design.template block<1,4>(2*i, 8) = yi*X.col(i).transpose();
+ design.template block<1,4>(2*i + 1, 0) = X.col(i).transpose();
+ design.template block<1,4>(2*i + 1, 8) = -xi*X.col(i).transpose();
+ }
+ Matrix<T, 12, 1> p;
+ Nullspace(&design, &p);
+ reshape(p, 3, 4, P);
+}
+
+} // namespace resection
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_RESECTION_H
diff --git a/extern/libmv/libmv/multiview/triangulation.cc b/extern/libmv/libmv/multiview/triangulation.cc
new file mode 100644
index 00000000000..b9d38cef936
--- /dev/null
+++ b/extern/libmv/libmv/multiview/triangulation.cc
@@ -0,0 +1,49 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/triangulation.h"
+
+namespace libmv {
+
+// HZ 12.2 pag.312
+void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
+ const Mat34 &P2, const Vec2 &x2,
+ Vec4 *X_homogeneous) {
+ Mat4 design;
+ for (int i = 0; i < 4; ++i) {
+ design(0,i) = x1(0) * P1(2,i) - P1(0,i);
+ design(1,i) = x1(1) * P1(2,i) - P1(1,i);
+ design(2,i) = x2(0) * P2(2,i) - P2(0,i);
+ design(3,i) = x2(1) * P2(2,i) - P2(1,i);
+ }
+ Nullspace(&design, X_homogeneous);
+}
+
+void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
+ const Mat34 &P2, const Vec2 &x2,
+ Vec3 *X_euclidean) {
+ Vec4 X_homogeneous;
+ TriangulateDLT(P1, x1, P2, x2, &X_homogeneous);
+ HomogeneousToEuclidean(X_homogeneous, X_euclidean);
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/multiview/triangulation.h b/extern/libmv/libmv/multiview/triangulation.h
new file mode 100644
index 00000000000..c35774d9e1b
--- /dev/null
+++ b/extern/libmv/libmv/multiview/triangulation.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_TRIANGULATION_H_
+#define LIBMV_MULTIVIEW_TRIANGULATION_H_
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
+ const Mat34 &P2, const Vec2 &x2,
+ Vec4 *X_homogeneous);
+
+void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
+ const Mat34 &P2, const Vec2 &x2,
+ Vec3 *X_euclidean);
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_TRIANGULATION_H_
diff --git a/extern/libmv/libmv/numeric/dogleg.h b/extern/libmv/libmv/numeric/dogleg.h
new file mode 100644
index 00000000000..f05882c1191
--- /dev/null
+++ b/extern/libmv/libmv/numeric/dogleg.h
@@ -0,0 +1,261 @@
+// Copyright (c) 2007, 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// A simple implementation of Powell's dogleg nonlinear minimization.
+//
+// [1] K. Madsen, H. Nielsen, O. Tingleoff. Methods for Non-linear Least
+// Squares Problems.
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+//
+// TODO(keir): Cite the Lourakis' dogleg paper.
+
+#ifndef LIBMV_NUMERIC_DOGLEG_H
+#define LIBMV_NUMERIC_DOGLEG_H
+
+#include <cmath>
+#include <cstdio>
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/function_derivative.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+template<typename Function,
+ typename Jacobian = NumericJacobian<Function>,
+ typename Solver = Eigen::PartialPivLU<
+ Matrix<typename Function::FMatrixType::RealScalar,
+ Function::XMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime> > >
+class Dogleg {
+ public:
+ typedef typename Function::XMatrixType::RealScalar Scalar;
+ typedef typename Function::FMatrixType FVec;
+ typedef typename Function::XMatrixType Parameters;
+ typedef Matrix<typename Function::FMatrixType::RealScalar,
+ Function::FMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime> JMatrixType;
+ typedef Matrix<typename JMatrixType::RealScalar,
+ JMatrixType::ColsAtCompileTime,
+ JMatrixType::ColsAtCompileTime> AMatrixType;
+
+ enum Status {
+ RUNNING,
+ GRADIENT_TOO_SMALL, // eps > max(J'*f(x))
+ RELATIVE_STEP_SIZE_TOO_SMALL, // eps > ||dx|| / ||x||
+ TRUST_REGION_TOO_SMALL, // eps > radius / ||x||
+ ERROR_TOO_SMALL, // eps > ||f(x)||
+ HIT_MAX_ITERATIONS,
+ };
+
+ enum Step {
+ DOGLEG,
+ GAUSS_NEWTON,
+ STEEPEST_DESCENT,
+ };
+
+ Dogleg(const Function &f)
+ : f_(f), df_(f) {}
+
+ struct SolverParameters {
+ SolverParameters()
+ : gradient_threshold(1e-16),
+ relative_step_threshold(1e-16),
+ error_threshold(1e-16),
+ initial_trust_radius(1e0),
+ max_iterations(500) {}
+ Scalar gradient_threshold; // eps > max(J'*f(x))
+ Scalar relative_step_threshold; // eps > ||dx|| / ||x||
+ Scalar error_threshold; // eps > ||f(x)||
+ Scalar initial_trust_radius; // Initial u for solving normal equations.
+ int max_iterations; // Maximum number of solver iterations.
+ };
+
+ struct Results {
+ Scalar error_magnitude; // ||f(x)||
+ Scalar gradient_magnitude; // ||J'f(x)||
+ int iterations;
+ Status status;
+ };
+
+ Status Update(const Parameters &x, const SolverParameters &params,
+ JMatrixType *J, AMatrixType *A, FVec *error, Parameters *g) {
+ *J = df_(x);
+ // TODO(keir): In the case of m = n, avoid computing A and just do J^-1 directly.
+ *A = (*J).transpose() * (*J);
+ *error = f_(x);
+ *g = (*J).transpose() * *error;
+ if (g->array().abs().maxCoeff() < params.gradient_threshold) {
+ return GRADIENT_TOO_SMALL;
+ } else if (error->array().abs().maxCoeff() < params.error_threshold) {
+ return ERROR_TOO_SMALL;
+ }
+ return RUNNING;
+ }
+
+ Step SolveDoglegDirection(const Parameters &dx_sd,
+ const Parameters &dx_gn,
+ Scalar radius,
+ Scalar alpha,
+ Parameters *dx_dl,
+ Scalar *beta) {
+ Parameters a, b_minus_a;
+ // Solve for Dogleg step dx_dl.
+ if (dx_gn.norm() < radius) {
+ *dx_dl = dx_gn;
+ return GAUSS_NEWTON;
+
+ } else if (alpha * dx_sd.norm() > radius) {
+ *dx_dl = (radius / dx_sd.norm()) * dx_sd;
+ return STEEPEST_DESCENT;
+
+ } else {
+ Parameters a = alpha * dx_sd;
+ const Parameters &b = dx_gn;
+ b_minus_a = a - b;
+ Scalar Mbma2 = b_minus_a.squaredNorm();
+ Scalar Ma2 = a.squaredNorm();
+ Scalar c = a.dot(b_minus_a);
+ Scalar radius2 = radius*radius;
+ if (c <= 0) {
+ *beta = (-c + sqrt(c*c + Mbma2*(radius2 - Ma2)))/(Mbma2);
+ } else {
+ *beta = (radius2 - Ma2) /
+ (c + sqrt(c*c + Mbma2*(radius2 - Ma2)));
+ }
+ *dx_dl = alpha * dx_sd + (*beta) * (dx_gn - alpha*dx_sd);
+ return DOGLEG;
+ }
+ }
+
+ Results minimize(Parameters *x_and_min) {
+ SolverParameters params;
+ return minimize(params, x_and_min);
+ }
+
+ Results minimize(const SolverParameters &params, Parameters *x_and_min) {
+ Parameters &x = *x_and_min;
+ JMatrixType J;
+ AMatrixType A;
+ FVec error;
+ Parameters g;
+
+ Results results;
+ results.status = Update(x, params, &J, &A, &error, &g);
+
+ Scalar radius = params.initial_trust_radius;
+ bool x_updated = true;
+
+ Parameters x_new;
+ Parameters dx_sd; // Steepest descent step.
+ Parameters dx_dl; // Dogleg step.
+ Parameters dx_gn; // Gauss-Newton step.
+ printf("iteration ||f(x)|| max(g) radius\n");
+ int i = 0;
+ for (; results.status == RUNNING && i < params.max_iterations; ++i) {
+ printf("%9d %12g %12g %12g",
+ i, f_(x).norm(), g.array().abs().maxCoeff(), radius);
+
+ //LG << "iteration: " << i;
+ //LG << "||f(x)||: " << f_(x).norm();
+ //LG << "max(g): " << g.cwise().abs().maxCoeff();
+ //LG << "radius: " << radius;
+ // Eqn 3.19 from [1]
+ Scalar alpha = g.squaredNorm() / (J*g).squaredNorm();
+
+ // Solve for steepest descent direction dx_sd.
+ dx_sd = -g;
+
+ // Solve for Gauss-Newton direction dx_gn.
+ if (x_updated) {
+ // TODO(keir): See Appendix B of [1] for discussion of when A is
+ // singular and there are many solutions. Solving that involves the SVD
+ // and is slower, but should still work.
+ Solver solver(A);
+ dx_gn = solver.solve(-g);
+ if (!(A * dx_gn).isApprox(-g)) {
+ LOG(ERROR) << "Failed to solve normal eqns. TODO: Solve via SVD.";
+ return results;
+ }
+ x_updated = false;
+ }
+
+ // Solve for dogleg direction dx_dl.
+ Scalar beta = 0;
+ Step step = SolveDoglegDirection(dx_sd, dx_gn, radius, alpha,
+ &dx_dl, &beta);
+
+ Scalar e3 = params.relative_step_threshold;
+ if (dx_dl.norm() < e3*(x.norm() + e3)) {
+ results.status = RELATIVE_STEP_SIZE_TOO_SMALL;
+ break;
+ }
+
+ x_new = x + dx_dl;
+ Scalar actual = f_(x).squaredNorm() - f_(x_new).squaredNorm();
+ Scalar predicted = 0;
+ if (step == GAUSS_NEWTON) {
+ predicted = f_(x).squaredNorm();
+ } else if (step == STEEPEST_DESCENT) {
+ predicted = radius * (2*alpha*g.norm() - radius) / 2 / alpha;
+ } else if (step == DOGLEG) {
+ predicted = 0.5 * alpha * (1-beta)*(1-beta)*g.squaredNorm() +
+ beta*(2-beta)*f_(x).squaredNorm();
+ }
+ Scalar rho = actual / predicted;
+
+ if (step == GAUSS_NEWTON) printf(" GAUSS");
+ if (step == STEEPEST_DESCENT) printf(" STEE");
+ if (step == DOGLEG) printf(" DOGL");
+
+ printf(" %12g %12g %12g\n", rho, actual, predicted);
+
+ if (rho > 0) {
+ // Accept update because the linear model is a good fit.
+ x = x_new;
+ results.status = Update(x, params, &J, &A, &error, &g);
+ x_updated = true;
+ }
+ if (rho > 0.75) {
+ radius = std::max(radius, 3*dx_dl.norm());
+ } else if (rho < 0.25) {
+ radius /= 2;
+ if (radius < e3 * (x.norm() + e3)) {
+ results.status = TRUST_REGION_TOO_SMALL;
+ }
+ }
+ }
+ if (results.status == RUNNING) {
+ results.status = HIT_MAX_ITERATIONS;
+ }
+ results.error_magnitude = error.norm();
+ results.gradient_magnitude = g.norm();
+ results.iterations = i;
+ return results;
+ }
+
+ private:
+ const Function &f_;
+ Jacobian df_;
+};
+
+} // namespace mv
+
+#endif // LIBMV_NUMERIC_DOGLEG_H
diff --git a/extern/libmv/libmv/numeric/function_derivative.h b/extern/libmv/libmv/numeric/function_derivative.h
new file mode 100644
index 00000000000..d7bc437b2e0
--- /dev/null
+++ b/extern/libmv/libmv/numeric/function_derivative.h
@@ -0,0 +1,107 @@
+// Copyright (c) 2007, 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_NUMERIC_DERIVATIVE_H
+#define LIBMV_NUMERIC_DERIVATIVE_H
+
+#include <cmath>
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+// Numeric derivative of a function.
+// TODO(keir): Consider adding a quadratic approximation.
+
+enum NumericJacobianMode {
+ CENTRAL,
+ FORWARD,
+};
+
+template<typename Function, NumericJacobianMode mode=CENTRAL>
+class NumericJacobian {
+ public:
+ typedef typename Function::XMatrixType Parameters;
+ typedef typename Function::XMatrixType::RealScalar XScalar;
+ typedef typename Function::FMatrixType FMatrixType;
+ typedef Matrix<typename Function::FMatrixType::RealScalar,
+ Function::FMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime>
+ JMatrixType;
+
+ NumericJacobian(const Function &f) : f_(f) {}
+
+ // TODO(keir): Perhaps passing the jacobian back by value is not a good idea.
+ JMatrixType operator()(const Parameters &x) {
+ // Empirically determined constant.
+ Parameters eps = x.array().abs() * XScalar(1e-5);
+ // To handle cases where a paremeter is exactly zero, instead use the mean
+ // eps for the other dimensions.
+ XScalar mean_eps = eps.sum() / eps.rows();
+ if (mean_eps == XScalar(0)) {
+ // TODO(keir): Do something better here.
+ mean_eps = 1e-8; // ~sqrt(machine precision).
+ }
+ // TODO(keir): Elimininate this needless function evaluation for the
+ // central difference case.
+ FMatrixType fx = f_(x);
+ const int rows = fx.rows();
+ const int cols = x.rows();
+ JMatrixType jacobian(rows, cols);
+ Parameters x_plus_delta = x;
+ for (int c = 0; c < cols; ++c) {
+ if (eps(c) == XScalar(0)) {
+ eps(c) = mean_eps;
+ }
+ x_plus_delta(c) = x(c) + eps(c);
+ jacobian.col(c) = f_(x_plus_delta);
+
+ XScalar one_over_h = 1 / eps(c);
+ if (mode == CENTRAL) {
+ x_plus_delta(c) = x(c) - eps(c);
+ jacobian.col(c) -= f_(x_plus_delta);
+ one_over_h /= 2;
+ } else {
+ jacobian.col(c) -= fx;
+ }
+ x_plus_delta(c) = x(c);
+ jacobian.col(c) = jacobian.col(c) * one_over_h;
+ }
+ return jacobian;
+ }
+ private:
+ const Function &f_;
+};
+
+template<typename Function, typename Jacobian>
+bool CheckJacobian(const Function &f, const typename Function::XMatrixType &x) {
+ Jacobian j_analytic(f);
+ NumericJacobian<Function> j_numeric(f);
+
+ typename NumericJacobian<Function>::JMatrixType J_numeric = j_numeric(x);
+ typename NumericJacobian<Function>::JMatrixType J_analytic = j_analytic(x);
+ LG << J_numeric - J_analytic;
+ return true;
+}
+
+} // namespace libmv
+
+#endif // LIBMV_NUMERIC_DERIVATIVE_H
diff --git a/extern/libmv/libmv/numeric/levenberg_marquardt.h b/extern/libmv/libmv/numeric/levenberg_marquardt.h
new file mode 100644
index 00000000000..4473b72f156
--- /dev/null
+++ b/extern/libmv/libmv/numeric/levenberg_marquardt.h
@@ -0,0 +1,183 @@
+// Copyright (c) 2007, 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// A simple implementation of levenberg marquardt.
+//
+// [1] K. Madsen, H. Nielsen, O. Tingleoff. Methods for Non-linear Least
+// Squares Problems.
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+//
+// TODO(keir): Cite the Lourakis' dogleg paper.
+
+#ifndef LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H
+#define LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H
+
+#include <cmath>
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/function_derivative.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+template<typename Function,
+ typename Jacobian = NumericJacobian<Function>,
+ typename Solver = Eigen::PartialPivLU<
+ Matrix<typename Function::FMatrixType::RealScalar,
+ Function::XMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime> > >
+class LevenbergMarquardt {
+ public:
+ typedef typename Function::XMatrixType::RealScalar Scalar;
+ typedef typename Function::FMatrixType FVec;
+ typedef typename Function::XMatrixType Parameters;
+ typedef Matrix<typename Function::FMatrixType::RealScalar,
+ Function::FMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime> JMatrixType;
+ typedef Matrix<typename JMatrixType::RealScalar,
+ JMatrixType::ColsAtCompileTime,
+ JMatrixType::ColsAtCompileTime> AMatrixType;
+
+ // TODO(keir): Some of these knobs can be derived from each other and
+ // removed, instead of requiring the user to set them.
+ enum Status {
+ RUNNING,
+ GRADIENT_TOO_SMALL, // eps > max(J'*f(x))
+ RELATIVE_STEP_SIZE_TOO_SMALL, // eps > ||dx|| / ||x||
+ ERROR_TOO_SMALL, // eps > ||f(x)||
+ HIT_MAX_ITERATIONS,
+ };
+
+ LevenbergMarquardt(const Function &f)
+ : f_(f), df_(f) {}
+
+ struct SolverParameters {
+ SolverParameters()
+ : gradient_threshold(1e-16),
+ relative_step_threshold(1e-16),
+ error_threshold(1e-16),
+ initial_scale_factor(1e-3),
+ max_iterations(100) {}
+ Scalar gradient_threshold; // eps > max(J'*f(x))
+ Scalar relative_step_threshold; // eps > ||dx|| / ||x||
+ Scalar error_threshold; // eps > ||f(x)||
+ Scalar initial_scale_factor; // Initial u for solving normal equations.
+ int max_iterations; // Maximum number of solver iterations.
+ };
+
+ struct Results {
+ Scalar error_magnitude; // ||f(x)||
+ Scalar gradient_magnitude; // ||J'f(x)||
+ int iterations;
+ Status status;
+ };
+
+ Status Update(const Parameters &x, const SolverParameters &params,
+ JMatrixType *J, AMatrixType *A, FVec *error, Parameters *g) {
+ *J = df_(x);
+ *A = (*J).transpose() * (*J);
+ *error = -f_(x);
+ *g = (*J).transpose() * *error;
+ if (g->array().abs().maxCoeff() < params.gradient_threshold) {
+ return GRADIENT_TOO_SMALL;
+ } else if (error->norm() < params.error_threshold) {
+ return ERROR_TOO_SMALL;
+ }
+ return RUNNING;
+ }
+
+ Results minimize(Parameters *x_and_min) {
+ SolverParameters params;
+ minimize(params, x_and_min);
+ }
+
+ Results minimize(const SolverParameters &params, Parameters *x_and_min) {
+ Parameters &x = *x_and_min;
+ JMatrixType J;
+ AMatrixType A;
+ FVec error;
+ Parameters g;
+
+ Results results;
+ results.status = Update(x, params, &J, &A, &error, &g);
+
+ Scalar u = Scalar(params.initial_scale_factor*A.diagonal().maxCoeff());
+ Scalar v = 2;
+
+ Parameters dx, x_new;
+ int i;
+ for (i = 0; results.status == RUNNING && i < params.max_iterations; ++i) {
+ VLOG(1) << "iteration: " << i;
+ VLOG(1) << "||f(x)||: " << f_(x).norm();
+ VLOG(1) << "max(g): " << g.array().abs().maxCoeff();
+ VLOG(1) << "u: " << u;
+ VLOG(1) << "v: " << v;
+
+ AMatrixType A_augmented = A + u*AMatrixType::Identity(J.cols(), J.cols());
+ Solver solver(A_augmented);
+ dx = solver.solve(g);
+ bool solved = (A_augmented * dx).isApprox(g);
+ if (!solved) {
+ LOG(ERROR) << "Failed to solve";
+ }
+ if (solved && dx.norm() <= params.relative_step_threshold * x.norm()) {
+ results.status = RELATIVE_STEP_SIZE_TOO_SMALL;
+ break;
+ }
+ if (solved) {
+ x_new = x + dx;
+ // Rho is the ratio of the actual reduction in error to the reduction
+ // in error that would be obtained if the problem was linear.
+ // See [1] for details.
+ Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
+ / dx.dot(u*dx + g));
+ if (rho > 0) {
+ // Accept the Gauss-Newton step because the linear model fits well.
+ x = x_new;
+ results.status = Update(x, params, &J, &A, &error, &g);
+ Scalar tmp = Scalar(2*rho-1);
+ u = u*std::max(1/3., 1 - (tmp*tmp*tmp));
+ v = 2;
+ continue;
+ }
+ }
+ // Reject the update because either the normal equations failed to solve
+ // or the local linear model was not good (rho < 0). Instead, increase u
+ // to move closer to gradient descent.
+ u *= v;
+ v *= 2;
+ }
+ if (results.status == RUNNING) {
+ results.status = HIT_MAX_ITERATIONS;
+ }
+ results.error_magnitude = error.norm();
+ results.gradient_magnitude = g.norm();
+ results.iterations = i;
+ return results;
+ }
+
+ private:
+ const Function &f_;
+ Jacobian df_;
+};
+
+} // namespace mv
+
+#endif // LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H
diff --git a/extern/libmv/libmv/numeric/numeric.cc b/extern/libmv/libmv/numeric/numeric.cc
new file mode 100644
index 00000000000..0ca3a64e4f4
--- /dev/null
+++ b/extern/libmv/libmv/numeric/numeric.cc
@@ -0,0 +1,136 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+Mat3 RotationAroundX(double angle) {
+ double c, s;
+ sincos(angle, &s, &c);
+ Mat3 R;
+ R << 1, 0, 0,
+ 0, c, -s,
+ 0, s, c;
+ return R;
+}
+
+Mat3 RotationAroundY(double angle) {
+ double c, s;
+ sincos(angle, &s, &c);
+ Mat3 R;
+ R << c, 0, s,
+ 0, 1, 0,
+ -s, 0, c;
+ return R;
+}
+
+Mat3 RotationAroundZ(double angle) {
+ double c, s;
+ sincos(angle, &s, &c);
+ Mat3 R;
+ R << c, -s, 0,
+ s, c, 0,
+ 0, 0, 1;
+ return R;
+}
+
+
+Mat3 RotationRodrigues(const Vec3 &axis) {
+ double theta = axis.norm();
+ Vec3 w = axis / theta;
+ Mat3 W = CrossProductMatrix(w);
+
+ return Mat3::Identity() + sin(theta) * W + (1 - cos(theta)) * W * W;
+}
+
+
+Mat3 LookAt(Vec3 center) {
+ Vec3 zc = center.normalized();
+ Vec3 xc = Vec3::UnitY().cross(zc).normalized();
+ Vec3 yc = zc.cross(xc);
+ Mat3 R;
+ R.row(0) = xc;
+ R.row(1) = yc;
+ R.row(2) = zc;
+ return R;
+}
+
+Mat3 CrossProductMatrix(const Vec3 &x) {
+ Mat3 X;
+ X << 0, -x(2), x(1),
+ x(2), 0, -x(0),
+ -x(1), x(0), 0;
+ return X;
+}
+
+void MeanAndVarianceAlongRows(const Mat &A,
+ Vec *mean_pointer,
+ Vec *variance_pointer) {
+ Vec &mean = *mean_pointer;
+ Vec &variance = *variance_pointer;
+ int n = A.rows();
+ int m = A.cols();
+ mean.resize(n);
+ variance.resize(n);
+
+ for (int i = 0; i < n; ++i) {
+ mean(i) = 0;
+ variance(i) = 0;
+ for (int j = 0; j < m; ++j) {
+ double x = A(i, j);
+ mean(i) += x;
+ variance(i) += x * x;
+ }
+ }
+
+ mean /= m;
+ for (int i = 0; i < n; ++i) {
+ variance(i) = variance(i) / m - Square(mean(i));
+ }
+}
+
+void HorizontalStack(const Mat &left, const Mat &right, Mat *stacked) {
+ assert(left.rows() == left.rows());
+ int n = left.rows();
+ int m1 = left.cols();
+ int m2 = right.cols();
+
+ stacked->resize(n, m1 + m2);
+ stacked->block(0, 0, n, m1) = left;
+ stacked->block(0, m1, n, m2) = right;
+}
+
+void MatrixColumn(const Mat &A, int i, Vec2 *v) {
+ assert(A.rows() == 2);
+ *v << A(0,i), A(1,i);
+}
+void MatrixColumn(const Mat &A, int i, Vec3 *v) {
+ assert(A.rows() == 3);
+ *v << A(0,i), A(1,i), A(2,i);
+}
+void MatrixColumn(const Mat &A, int i, Vec4 *v) {
+ assert(A.rows() == 4);
+ *v << A(0,i), A(1,i), A(2,i), A(3,i);
+}
+
+} // namespace libmv
+
diff --git a/extern/libmv/libmv/numeric/numeric.h b/extern/libmv/libmv/numeric/numeric.h
new file mode 100644
index 00000000000..21e0f067446
--- /dev/null
+++ b/extern/libmv/libmv/numeric/numeric.h
@@ -0,0 +1,479 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Matrix and vector classes, based on Eigen2.
+//
+// Avoid using Eigen2 classes directly; instead typedef them here.
+
+#ifndef LIBMV_NUMERIC_NUMERIC_H
+#define LIBMV_NUMERIC_NUMERIC_H
+
+#include <Eigen/Cholesky>
+#include <Eigen/Core>
+#include <Eigen/Eigenvalues>
+#include <Eigen/Geometry>
+#include <Eigen/LU>
+#include <Eigen/QR>
+#include <Eigen/SVD>
+
+#if _WIN32 || __APPLE__
+ void static sincos (double x, double *sinx, double *cosx) {
+ *sinx = sin(x);
+ *cosx = cos(x);
+ }
+#endif //_WIN32 || __APPLE__
+
+#if (defined(WIN32) || defined(WIN64)) && !defined(__MINGW32__)
+ inline long lround(double d) {
+ return (long)(d>0 ? d+0.5 : ceil(d-0.5));
+ }
+ inline int round(double d) {
+ return (d>0) ? int(d+0.5) : int(d-0.5);
+ }
+ typedef unsigned int uint;
+#endif //_WIN32
+
+namespace libmv {
+
+typedef Eigen::MatrixXd Mat;
+typedef Eigen::VectorXd Vec;
+
+typedef Eigen::MatrixXf Matf;
+typedef Eigen::VectorXf Vecf;
+
+typedef Eigen::Matrix<unsigned int, Eigen::Dynamic, Eigen::Dynamic> Matu;
+typedef Eigen::Matrix<unsigned int, Eigen::Dynamic, 1> Vecu;
+typedef Eigen::Matrix<unsigned int, 2, 1> Vec2u;
+
+typedef Eigen::Matrix<double, 2, 2> Mat2;
+typedef Eigen::Matrix<double, 2, 3> Mat23;
+typedef Eigen::Matrix<double, 3, 3> Mat3;
+typedef Eigen::Matrix<double, 3, 4> Mat34;
+typedef Eigen::Matrix<double, 3, 5> Mat35;
+typedef Eigen::Matrix<double, 4, 1> Mat41;
+typedef Eigen::Matrix<double, 4, 3> Mat43;
+typedef Eigen::Matrix<double, 4, 4> Mat4;
+typedef Eigen::Matrix<double, 4, 6> Mat46;
+typedef Eigen::Matrix<float, 2, 2> Mat2f;
+typedef Eigen::Matrix<float, 2, 3> Mat23f;
+typedef Eigen::Matrix<float, 3, 3> Mat3f;
+typedef Eigen::Matrix<float, 3, 4> Mat34f;
+typedef Eigen::Matrix<float, 3, 5> Mat35f;
+typedef Eigen::Matrix<float, 4, 3> Mat43f;
+typedef Eigen::Matrix<float, 4, 4> Mat4f;
+typedef Eigen::Matrix<float, 4, 6> Mat46f;
+
+typedef Eigen::Matrix<double, 3, 3, Eigen::RowMajor> RMat3;
+typedef Eigen::Matrix<double, 4, 4, Eigen::RowMajor> RMat4;
+
+typedef Eigen::Matrix<double, 2, Eigen::Dynamic> Mat2X;
+typedef Eigen::Matrix<double, 3, Eigen::Dynamic> Mat3X;
+typedef Eigen::Matrix<double, 4, Eigen::Dynamic> Mat4X;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 2> MatX2;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 3> MatX3;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 4> MatX4;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 5> MatX5;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 6> MatX6;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 7> MatX7;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 8> MatX8;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 9> MatX9;
+typedef Eigen::Matrix<double, Eigen::Dynamic,15> MatX15;
+typedef Eigen::Matrix<double, Eigen::Dynamic,16> MatX16;
+
+typedef Eigen::Vector2d Vec2;
+typedef Eigen::Vector3d Vec3;
+typedef Eigen::Vector4d Vec4;
+typedef Eigen::Matrix<double, 5, 1> Vec5;
+typedef Eigen::Matrix<double, 6, 1> Vec6;
+typedef Eigen::Matrix<double, 7, 1> Vec7;
+typedef Eigen::Matrix<double, 8, 1> Vec8;
+typedef Eigen::Matrix<double, 9, 1> Vec9;
+typedef Eigen::Matrix<double, 10, 1> Vec10;
+typedef Eigen::Matrix<double, 11, 1> Vec11;
+typedef Eigen::Matrix<double, 12, 1> Vec12;
+typedef Eigen::Matrix<double, 13, 1> Vec13;
+typedef Eigen::Matrix<double, 14, 1> Vec14;
+typedef Eigen::Matrix<double, 15, 1> Vec15;
+typedef Eigen::Matrix<double, 16, 1> Vec16;
+typedef Eigen::Matrix<double, 17, 1> Vec17;
+typedef Eigen::Matrix<double, 18, 1> Vec18;
+typedef Eigen::Matrix<double, 19, 1> Vec19;
+typedef Eigen::Matrix<double, 20, 1> Vec20;
+
+typedef Eigen::Vector2f Vec2f;
+typedef Eigen::Vector3f Vec3f;
+typedef Eigen::Vector4f Vec4f;
+
+typedef Eigen::VectorXi VecXi;
+
+typedef Eigen::Vector2i Vec2i;
+typedef Eigen::Vector3i Vec3i;
+typedef Eigen::Vector4i Vec4i;
+
+typedef Eigen::Matrix<float,
+ Eigen::Dynamic,
+ Eigen::Dynamic,
+ Eigen::RowMajor> RMatf;
+
+typedef Eigen::NumTraits<double> EigenDouble;
+
+using Eigen::Map;
+using Eigen::Dynamic;
+using Eigen::Matrix;
+
+// Find U, s, and VT such that
+//
+// A = U * diag(s) * VT
+//
+template <typename TMat, typename TVec>
+inline void SVD(TMat *A, Vec *s, Mat *U, Mat *VT) {
+ assert(0);
+}
+
+// Solve the linear system Ax = 0 via SVD. Store the solution in x, such that
+// ||x|| = 1.0. Return the singluar value corresponding to the solution.
+// Destroys A and resizes x if necessary.
+// TODO(maclean): Take the SVD of the transpose instead of this zero padding.
+template <typename TMat, typename TVec>
+double Nullspace(TMat *A, TVec *nullspace) {
+ Eigen::JacobiSVD<TMat> svd(*A, Eigen::ComputeFullV);
+ (*nullspace) = svd.matrixV().col(A->cols()-1);
+ if (A->rows() >= A->cols())
+ return svd.singularValues()(A->cols()-1);
+ else
+ return 0.0;
+}
+
+// Solve the linear system Ax = 0 via SVD. Finds two solutions, x1 and x2, such
+// that x1 is the best solution and x2 is the next best solution (in the L2
+// norm sense). Store the solution in x1 and x2, such that ||x|| = 1.0. Return
+// the singluar value corresponding to the solution x1. Destroys A and resizes
+// x if necessary.
+template <typename TMat, typename TVec1, typename TVec2>
+double Nullspace2(TMat *A, TVec1 *x1, TVec2 *x2) {
+ Eigen::JacobiSVD<TMat> svd(*A, Eigen::ComputeFullV);
+ *x1 = svd.matrixV().col(A->cols() - 1);
+ *x2 = svd.matrixV().col(A->cols() - 2);
+ if (A->rows() >= A->cols())
+ return svd.singularValues()(A->cols()-1);
+ else
+ return 0.0;
+}
+
+// In place transpose for square matrices.
+template<class TA>
+inline void TransposeInPlace(TA *A) {
+ *A = A->transpose().eval();
+}
+
+template<typename TVec>
+inline double NormL1(const TVec &x) {
+ return x.array().abs().sum();
+}
+
+template<typename TVec>
+inline double NormL2(const TVec &x) {
+ return x.norm();
+}
+
+template<typename TVec>
+inline double NormLInfinity(const TVec &x) {
+ return x.array().abs().maxCoeff();
+}
+
+template<typename TVec>
+inline double DistanceL1(const TVec &x, const TVec &y) {
+ return (x - y).array().abs().sum();
+}
+
+template<typename TVec>
+inline double DistanceL2(const TVec &x, const TVec &y) {
+ return (x - y).norm();
+}
+template<typename TVec>
+inline double DistanceLInfinity(const TVec &x, const TVec &y) {
+ return (x - y).array().abs().maxCoeff();
+}
+
+// Normalize a vector with the L1 norm, and return the norm before it was
+// normalized.
+template<typename TVec>
+inline double NormalizeL1(TVec *x) {
+ double norm = NormL1(*x);
+ *x /= norm;
+ return norm;
+}
+
+// Normalize a vector with the L2 norm, and return the norm before it was
+// normalized.
+template<typename TVec>
+inline double NormalizeL2(TVec *x) {
+ double norm = NormL2(*x);
+ *x /= norm;
+ return norm;
+}
+
+// Normalize a vector with the L^Infinity norm, and return the norm before it
+// was normalized.
+template<typename TVec>
+inline double NormalizeLInfinity(TVec *x) {
+ double norm = NormLInfinity(*x);
+ *x /= norm;
+ return norm;
+}
+
+// Return the square of a number.
+template<typename T>
+inline T Square(T x) {
+ return x * x;
+}
+
+Mat3 RotationAroundX(double angle);
+Mat3 RotationAroundY(double angle);
+Mat3 RotationAroundZ(double angle);
+
+// Returns the rotation matrix of a rotation of angle |axis| around axis.
+// This is computed using the Rodrigues formula, see:
+// http://mathworld.wolfram.com/RodriguesRotationFormula.html
+Mat3 RotationRodrigues(const Vec3 &axis);
+
+// Make a rotation matrix such that center becomes the direction of the
+// positive z-axis, and y is oriented close to up.
+Mat3 LookAt(Vec3 center);
+
+// Return a diagonal matrix from a vector containg the diagonal values.
+template <typename TVec>
+inline Mat Diag(const TVec &x) {
+ return x.asDiagonal();
+}
+
+template<typename TMat>
+inline double FrobeniusNorm(const TMat &A) {
+ return sqrt(A.array().abs2().sum());
+}
+
+template<typename TMat>
+inline double FrobeniusDistance(const TMat &A, const TMat &B) {
+ return FrobeniusNorm(A - B);
+}
+
+inline Vec3 CrossProduct(const Vec3 &x, const Vec3 &y) {
+ return x.cross(y);
+}
+
+Mat3 CrossProductMatrix(const Vec3 &x);
+
+void MeanAndVarianceAlongRows(const Mat &A,
+ Vec *mean_pointer,
+ Vec *variance_pointer);
+
+#if _WIN32
+ // TODO(bomboze): un-#if this for both platforms once tested under Windows
+ /* This solution was extensively discussed here http://forum.kde.org/viewtopic.php?f=74&t=61940 */
+ #define SUM_OR_DYNAMIC(x,y) (x==Eigen::Dynamic||y==Eigen::Dynamic)?Eigen::Dynamic:(x+y)
+
+ template<typename Derived1, typename Derived2>
+ struct hstack_return {
+ typedef typename Derived1::Scalar Scalar;
+ enum {
+ RowsAtCompileTime = Derived1::RowsAtCompileTime,
+ ColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::ColsAtCompileTime, Derived2::ColsAtCompileTime),
+ Options = Derived1::Flags&Eigen::RowMajorBit ? Eigen::RowMajor : 0,
+ MaxRowsAtCompileTime = Derived1::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxColsAtCompileTime, Derived2::MaxColsAtCompileTime)
+ };
+ typedef Eigen::Matrix<Scalar,
+ RowsAtCompileTime,
+ ColsAtCompileTime,
+ Options,
+ MaxRowsAtCompileTime,
+ MaxColsAtCompileTime> type;
+ };
+
+ template<typename Derived1, typename Derived2>
+ typename hstack_return<Derived1,Derived2>::type
+ HStack (const Eigen::MatrixBase<Derived1>& lhs, const Eigen::MatrixBase<Derived2>& rhs) {
+ typename hstack_return<Derived1,Derived2>::type res;
+ res.resize(lhs.rows(), lhs.cols()+rhs.cols());
+ res << lhs, rhs;
+ return res;
+ };
+
+
+ template<typename Derived1, typename Derived2>
+ struct vstack_return {
+ typedef typename Derived1::Scalar Scalar;
+ enum {
+ RowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::RowsAtCompileTime, Derived2::RowsAtCompileTime),
+ ColsAtCompileTime = Derived1::ColsAtCompileTime,
+ Options = Derived1::Flags&Eigen::RowMajorBit ? Eigen::RowMajor : 0,
+ MaxRowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxRowsAtCompileTime, Derived2::MaxRowsAtCompileTime),
+ MaxColsAtCompileTime = Derived1::MaxColsAtCompileTime
+ };
+ typedef Eigen::Matrix<Scalar,
+ RowsAtCompileTime,
+ ColsAtCompileTime,
+ Options,
+ MaxRowsAtCompileTime,
+ MaxColsAtCompileTime> type;
+ };
+
+ template<typename Derived1, typename Derived2>
+ typename vstack_return<Derived1,Derived2>::type
+ VStack (const Eigen::MatrixBase<Derived1>& lhs, const Eigen::MatrixBase<Derived2>& rhs) {
+ typename vstack_return<Derived1,Derived2>::type res;
+ res.resize(lhs.rows()+rhs.rows(), lhs.cols());
+ res << lhs, rhs;
+ return res;
+ };
+
+
+#else //_WIN32
+
+ // Since it is not possible to typedef privately here, use a macro.
+ // Always take dynamic columns if either side is dynamic.
+ #define COLS \
+ ((ColsLeft == Eigen::Dynamic || ColsRight == Eigen::Dynamic) \
+ ? Eigen::Dynamic : (ColsLeft + ColsRight))
+
+ // Same as above, except that prefer fixed size if either is fixed.
+ #define ROWS \
+ ((RowsLeft == Eigen::Dynamic && RowsRight == Eigen::Dynamic) \
+ ? Eigen::Dynamic \
+ : ((RowsLeft == Eigen::Dynamic) \
+ ? RowsRight \
+ : RowsLeft \
+ ) \
+ )
+
+ // TODO(keir): Add a static assert if both rows are at compiletime.
+ template<typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
+ Eigen::Matrix<T, ROWS, COLS>
+ HStack(const Eigen::Matrix<T, RowsLeft, ColsLeft> &left,
+ const Eigen::Matrix<T, RowsRight, ColsRight> &right) {
+ assert(left.rows() == right.rows());
+ int n = left.rows();
+ int m1 = left.cols();
+ int m2 = right.cols();
+
+ Eigen::Matrix<T, ROWS, COLS> stacked(n, m1 + m2);
+ stacked.block(0, 0, n, m1) = left;
+ stacked.block(0, m1, n, m2) = right;
+ return stacked;
+ }
+
+ // Reuse the above macros by swapping the order of Rows and Cols. Nasty, but
+ // the duplication is worse.
+ // TODO(keir): Add a static assert if both rows are at compiletime.
+ // TODO(keir): Mail eigen list about making this work for general expressions
+ // rather than only matrix types.
+ template<typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
+ Eigen::Matrix<T, COLS, ROWS>
+ VStack(const Eigen::Matrix<T, ColsLeft, RowsLeft> &top,
+ const Eigen::Matrix<T, ColsRight, RowsRight> &bottom) {
+ assert(top.cols() == bottom.cols());
+ int n1 = top.rows();
+ int n2 = bottom.rows();
+ int m = top.cols();
+
+ Eigen::Matrix<T, COLS, ROWS> stacked(n1 + n2, m);
+ stacked.block(0, 0, n1, m) = top;
+ stacked.block(n1, 0, n2, m) = bottom;
+ return stacked;
+ }
+ #undef COLS
+ #undef ROWS
+#endif //_WIN32
+
+
+
+void HorizontalStack(const Mat &left, const Mat &right, Mat *stacked);
+
+template<typename TTop, typename TBot, typename TStacked>
+void VerticalStack(const TTop &top, const TBot &bottom, TStacked *stacked) {
+ assert(top.cols() == bottom.cols());
+ int n1 = top.rows();
+ int n2 = bottom.rows();
+ int m = top.cols();
+
+ stacked->resize(n1 + n2, m);
+ stacked->block(0, 0, n1, m) = top;
+ stacked->block(n1, 0, n2, m) = bottom;
+}
+
+void MatrixColumn(const Mat &A, int i, Vec2 *v);
+void MatrixColumn(const Mat &A, int i, Vec3 *v);
+void MatrixColumn(const Mat &A, int i, Vec4 *v);
+
+template <typename TMat, typename TCols>
+TMat ExtractColumns(const TMat &A, const TCols &columns) {
+ TMat compressed(A.rows(), columns.size());
+ for (int i = 0; i < columns.size(); ++i) {
+ compressed.col(i) = A.col(columns[i]);
+ }
+ return compressed;
+}
+
+template <typename TMat, typename TDest>
+void reshape(const TMat &a, int rows, int cols, TDest *b) {
+ assert(a.rows()*a.cols() == rows*cols);
+ b->resize(rows, cols);
+ for (int i = 0; i < rows; i++) {
+ for (int j = 0; j < cols; j++) {
+ (*b)(i, j) = a[cols*i + j];
+ }
+ }
+}
+
+inline bool isnan(double i) {
+#ifdef WIN32
+ return _isnan(i) > 0;
+#else
+ return std::isnan(i);
+#endif
+}
+
+/// Ceil function that has the same behaviour for positive
+/// and negative values
+template <typename FloatType>
+FloatType ceil0(const FloatType& value) {
+ FloatType result = std::ceil( std::fabs( value ) );
+ return (value < 0.0) ? -result : result;
+}
+
+/// Returns the skew anti-symmetric matrix of a vector
+inline Mat3 SkewMat(const Vec3 &x) {
+ Mat3 skew;
+ skew << 0 , -x(2), x(1),
+ x(2), 0 , -x(0),
+ -x(1), x(0), 0;
+ return skew;
+}
+/// Returns the skew anti-symmetric matrix of a vector with only
+/// the first two (independent) lines
+inline Mat23 SkewMatMinimal(const Vec2 &x) {
+ Mat23 skew;
+ skew << 0,-1, x(1),
+ 1, 0, -x(0);
+ return skew;
+}
+} // namespace libmv
+
+#endif // LIBMV_NUMERIC_NUMERIC_H
diff --git a/extern/libmv/libmv/numeric/poly.cc b/extern/libmv/libmv/numeric/poly.cc
new file mode 100644
index 00000000000..d96e3c104c7
--- /dev/null
+++ b/extern/libmv/libmv/numeric/poly.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Routines for solving polynomials.
+
+// TODO(keir): Add a solver for degree > 3 polynomials.
diff --git a/extern/libmv/libmv/numeric/poly.h b/extern/libmv/libmv/numeric/poly.h
new file mode 100644
index 00000000000..cb1d65b32c4
--- /dev/null
+++ b/extern/libmv/libmv/numeric/poly.h
@@ -0,0 +1,123 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_NUMERIC_POLY_H_
+#define LIBMV_NUMERIC_POLY_H_
+
+#include <cmath>
+#include <stdio.h>
+
+namespace libmv {
+
+// Solve the cubic polynomial
+//
+// x^3 + a*x^2 + b*x + c = 0
+//
+// The number of roots (from zero to three) is returned. If the number of roots
+// is less than three, then higher numbered x's are not changed. For example,
+// if there are 2 roots, only x0 and x1 are set.
+//
+// The GSL cubic solver was used as a reference for this routine.
+template<typename Real>
+int SolveCubicPolynomial(Real a, Real b, Real c,
+ Real *x0, Real *x1, Real *x2) {
+ Real q = a * a - 3 * b;
+ Real r = 2 * a * a * a - 9 * a * b + 27 * c;
+
+ Real Q = q / 9;
+ Real R = r / 54;
+
+ Real Q3 = Q * Q * Q;
+ Real R2 = R * R;
+
+ Real CR2 = 729 * r * r;
+ Real CQ3 = 2916 * q * q * q;
+
+ if (R == 0 && Q == 0) {
+ // Tripple root in one place.
+ *x0 = *x1 = *x2 = -a / 3 ;
+ return 3;
+
+ } else if (CR2 == CQ3) {
+ // This test is actually R2 == Q3, written in a form suitable for exact
+ // computation with integers.
+ //
+ // Due to finite precision some double roots may be missed, and considered
+ // to be a pair of complex roots z = x +/- epsilon i close to the real
+ // axis.
+ Real sqrtQ = sqrt (Q);
+ if (R > 0) {
+ *x0 = -2 * sqrtQ - a / 3;
+ *x1 = sqrtQ - a / 3;
+ *x2 = sqrtQ - a / 3;
+ } else {
+ *x0 = -sqrtQ - a / 3;
+ *x1 = -sqrtQ - a / 3;
+ *x2 = 2 * sqrtQ - a / 3;
+ }
+ return 3;
+
+ } else if (CR2 < CQ3) {
+ // This case is equivalent to R2 < Q3.
+ Real sqrtQ = sqrt (Q);
+ Real sqrtQ3 = sqrtQ * sqrtQ * sqrtQ;
+ Real theta = acos (R / sqrtQ3);
+ Real norm = -2 * sqrtQ;
+ *x0 = norm * cos (theta / 3) - a / 3;
+ *x1 = norm * cos ((theta + 2.0 * M_PI) / 3) - a / 3;
+ *x2 = norm * cos ((theta - 2.0 * M_PI) / 3) - a / 3;
+
+ // Put the roots in ascending order.
+ if (*x0 > *x1) {
+ std::swap(*x0, *x1);
+ }
+ if (*x1 > *x2) {
+ std::swap(*x1, *x2);
+ if (*x0 > *x1) {
+ std::swap(*x0, *x1);
+ }
+ }
+ return 3;
+ }
+ Real sgnR = (R >= 0 ? 1 : -1);
+ Real A = -sgnR * pow (fabs (R) + sqrt (R2 - Q3), 1.0/3.0);
+ Real B = Q / A ;
+ *x0 = A + B - a / 3;
+ return 1;
+}
+
+// The coefficients are in ascending powers, i.e. coeffs[N]*x^N.
+template<typename Real>
+int SolveCubicPolynomial(const Real *coeffs, Real *solutions) {
+ if (coeffs[0] == 0.0) {
+ // TODO(keir): This is a quadratic not a cubic. Implement a quadratic
+ // solver!
+ return 0;
+ }
+ Real a = coeffs[2] / coeffs[3];
+ Real b = coeffs[1] / coeffs[3];
+ Real c = coeffs[0] / coeffs[3];
+ return SolveCubicPolynomial(a, b, c,
+ solutions + 0,
+ solutions + 1,
+ solutions + 2);
+}
+} // namespace libmv
+#endif // LIBMV_NUMERIC_POLY_H_
diff --git a/extern/libmv/libmv/numeric/tinyvector.cc b/extern/libmv/libmv/numeric/tinyvector.cc
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/extern/libmv/libmv/numeric/tinyvector.cc
diff --git a/extern/libmv/libmv/simple_pipeline/bundle.cc b/extern/libmv/libmv/simple_pipeline/bundle.cc
new file mode 100644
index 00000000000..cb8822dcf44
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/bundle.cc
@@ -0,0 +1,184 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#define V3DLIB_ENABLE_SUITESPARSE 1
+
+#include <map>
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/fundamental.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "third_party/ssba/Geometry/v3d_cameramatrix.h"
+#include "third_party/ssba/Geometry/v3d_metricbundle.h"
+#include "third_party/ssba/Math/v3d_linear.h"
+#include "third_party/ssba/Math/v3d_linear_utils.h"
+
+namespace libmv {
+
+void EuclideanBundle(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction) {
+ vector<Marker> markers = tracks.AllMarkers();
+
+ // "index" in this context is the index that V3D's optimizer will see. The
+ // V3D index must be dense in that the cameras are numbered 0...n-1, which is
+ // not the case for the "image" numbering that arises from the tracks
+ // structure. The complicated mapping is necessary to convert between the two
+ // representations.
+ std::map<EuclideanCamera *, int> camera_to_index;
+ std::map<EuclideanPoint *, int> point_to_index;
+ vector<EuclideanCamera *> index_to_camera;
+ vector<EuclideanPoint *> index_to_point;
+ int num_cameras = 0;
+ int num_points = 0;
+ for (int i = 0; i < markers.size(); ++i) {
+ const Marker &marker = markers[i];
+ EuclideanCamera *camera = reconstruction->CameraForImage(marker.image);
+ EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
+ if (camera && point) {
+ if (camera_to_index.find(camera) == camera_to_index.end()) {
+ camera_to_index[camera] = num_cameras;
+ index_to_camera.push_back(camera);
+ num_cameras++;
+ }
+ if (point_to_index.find(point) == point_to_index.end()) {
+ point_to_index[point] = num_points;
+ index_to_point.push_back(point);
+ num_points++;
+ }
+ }
+ }
+
+ // Make a V3D identity matrix, needed in a few places for K, since this
+ // assumes a calibrated setup.
+ V3D::Matrix3x3d identity3x3;
+ identity3x3[0][0] = 1.0;
+ identity3x3[0][1] = 0.0;
+ identity3x3[0][2] = 0.0;
+ identity3x3[1][0] = 0.0;
+ identity3x3[1][1] = 1.0;
+ identity3x3[1][2] = 0.0;
+ identity3x3[2][0] = 0.0;
+ identity3x3[2][1] = 0.0;
+ identity3x3[2][2] = 1.0;
+
+ // Convert libmv's cameras to V3D's cameras.
+ std::vector<V3D::CameraMatrix> v3d_cameras(index_to_camera.size());
+ for (int k = 0; k < index_to_camera.size(); ++k) {
+ V3D::Matrix3x3d R;
+ V3D::Vector3d t;
+
+ // Libmv's rotation matrix type.
+ const Mat3 &R_libmv = index_to_camera[k]->R;
+ const Vec3 &t_libmv = index_to_camera[k]->t;
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ R[i][j] = R_libmv(i, j);
+ }
+ t[i] = t_libmv(i);
+ }
+ v3d_cameras[k].setIntrinsic(identity3x3);
+ v3d_cameras[k].setRotation(R);
+ v3d_cameras[k].setTranslation(t);
+ }
+ LG << "Number of cameras: " << index_to_camera.size();
+
+ // Convert libmv's points to V3D's points.
+ std::vector<V3D::Vector3d> v3d_points(index_to_point.size());
+ for (int i = 0; i < index_to_point.size(); i++) {
+ v3d_points[i][0] = index_to_point[i]->X(0);
+ v3d_points[i][1] = index_to_point[i]->X(1);
+ v3d_points[i][2] = index_to_point[i]->X(2);
+ }
+ LG << "Number of points: " << index_to_point.size();
+
+ // Convert libmv's measurements to v3d measurements.
+ int num_residuals = 0;
+ std::vector<V3D::Vector2d> v3d_measurements;
+ std::vector<int> v3d_camera_for_measurement;
+ std::vector<int> v3d_point_for_measurement;
+ for (int i = 0; i < markers.size(); ++i) {
+ EuclideanCamera *camera = reconstruction->CameraForImage(markers[i].image);
+ EuclideanPoint *point = reconstruction->PointForTrack(markers[i].track);
+ if (!camera || !point) {
+ continue;
+ }
+ V3D::Vector2d v3d_point;
+ v3d_point[0] = markers[i].x;
+ v3d_point[1] = markers[i].y;
+ v3d_measurements.push_back(v3d_point);
+ v3d_camera_for_measurement.push_back(camera_to_index[camera]);
+ v3d_point_for_measurement.push_back(point_to_index[point]);
+ num_residuals++;
+ }
+ LG << "Number of residuals: " << num_residuals;
+
+ // This is calibrated reconstruction, so use zero distortion.
+ V3D::StdDistortionFunction v3d_distortion;
+ v3d_distortion.k1 = 0;
+ v3d_distortion.k2 = 0;
+ v3d_distortion.p1 = 0;
+ v3d_distortion.p2 = 0;
+
+ // Finally, run the bundle adjustment.
+ double const inlierThreshold = 500000.0;
+ V3D::CommonInternalsMetricBundleOptimizer opt(V3D::FULL_BUNDLE_METRIC,
+ inlierThreshold,
+ identity3x3,
+ v3d_distortion,
+ v3d_cameras,
+ v3d_points,
+ v3d_measurements,
+ v3d_camera_for_measurement,
+ v3d_point_for_measurement);
+ opt.maxIterations = 50;
+ opt.minimize();
+ LG << "Bundle status: " << opt.status;
+
+ // Convert V3D's cameras back to libmv's cameras.
+ for (int k = 0; k < num_cameras; k++) {
+ V3D::Matrix3x4d const Rt = v3d_cameras[k].getOrientation();
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ index_to_camera[k]->R(i, j) = Rt[i][j];
+ }
+ index_to_camera[k]->t(i) = Rt[i][3];
+ }
+ }
+
+ // Convert V3D's points back to libmv's points.
+ for (int k = 0; k < num_points; k++) {
+ for (int i = 0; i < 3; ++i) {
+ index_to_point[k]->X(i) = v3d_points[k][i];
+ }
+ }
+}
+
+void ProjectiveBundle(const Tracks & /*tracks*/,
+ ProjectiveReconstruction * /*reconstruction*/) {
+ // TODO(keir): Implement this! This can't work until we have a better bundler
+ // than SSBA, since SSBA has no support for projective bundling.
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/bundle.h b/extern/libmv/libmv/simple_pipeline/bundle.h
new file mode 100644
index 00000000000..c7fb2a79607
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/bundle.h
@@ -0,0 +1,72 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_BUNDLE_H
+#define LIBMV_SIMPLE_PIPELINE_BUNDLE_H
+
+namespace libmv {
+
+class EuclideanReconstruction;
+class ProjectiveReconstruction;
+class Tracks;
+
+/*!
+ Refine camera poses and 3D coordinates using bundle adjustment.
+
+ This routine adjusts all cameras and points in \a *reconstruction. This
+ assumes a full observation for reconstructed tracks; this implies that if
+ there is a reconstructed 3D point (a bundle) for a track, then all markers
+ for that track will be included in the minimization. \a tracks should
+ contain markers used in the initial reconstruction.
+
+ The cameras and bundles (3D points) are refined in-place.
+
+ \note This assumes an outlier-free set of markers.
+ \note This assumes a calibrated reconstruction, e.g. the markers are
+ already corrected for camera intrinsics and radial distortion.
+
+ \sa EuclideanResect, EuclideanIntersect, EuclideanReconstructTwoFrames
+*/
+void EuclideanBundle(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction);
+
+/*!
+ Refine camera poses and 3D coordinates using bundle adjustment.
+
+ This routine adjusts all cameras and points in \a *reconstruction. This
+ assumes a full observation for reconstructed tracks; this implies that if
+ there is a reconstructed 3D point (a bundle) for a track, then all markers
+ for that track will be included in the minimization. \a tracks should
+ contain markers used in the initial reconstruction.
+
+ The cameras and bundles (homogeneous 3D points) are refined in-place.
+
+ \note This assumes an outlier-free set of markers.
+ \note This assumes that radial distortion is already corrected for, but
+ does not assume that that other intrinsics are.
+
+ \sa ProjectiveResect, ProjectiveIntersect, ProjectiveReconstructTwoFrames
+*/
+void ProjectiveBundle(const Tracks &tracks,
+ ProjectiveReconstruction *reconstruction);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_BUNDLE_H
diff --git a/extern/libmv/libmv/simple_pipeline/camera_intrinsics.cc b/extern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
new file mode 100644
index 00000000000..110a16d1812
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
@@ -0,0 +1,345 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+#include "libmv/numeric/levenberg_marquardt.h"
+
+namespace libmv {
+
+struct Offset {
+ signed char ix, iy;
+ unsigned char fx,fy;
+};
+
+struct Grid {
+ struct Offset *offset;
+ int width, height;
+};
+
+static struct Grid *copyGrid(struct Grid *from)
+{
+ struct Grid *to = NULL;
+
+ if (from) {
+ to = new Grid;
+
+ to->width = from->width;
+ to->height = from->height;
+
+ to->offset = new Offset[to->width*to->height];
+ memcpy(to->offset, from->offset, sizeof(struct Offset)*to->width*to->height);
+ }
+
+ return to;
+}
+
+CameraIntrinsics::CameraIntrinsics()
+ : K_(Mat3::Identity()),
+ image_width_(0),
+ image_height_(0),
+ k1_(0),
+ k2_(0),
+ k3_(0),
+ p1_(0),
+ p2_(0),
+ distort_(0),
+ undistort_(0) {}
+
+CameraIntrinsics::CameraIntrinsics(const CameraIntrinsics &from)
+ : K_(from.K_),
+ image_width_(from.image_width_),
+ image_height_(from.image_height_),
+ k1_(from.k1_),
+ k2_(from.k2_),
+ k3_(from.k3_),
+ p1_(from.p1_),
+ p2_(from.p2_)
+{
+ distort_ = copyGrid(from.distort_);
+ undistort_ = copyGrid(from.undistort_);
+}
+
+CameraIntrinsics::~CameraIntrinsics() {
+ FreeLookupGrid();
+}
+
+/// Set the entire calibration matrix at once.
+void CameraIntrinsics::SetK(const Mat3 new_k) {
+ K_ = new_k;
+ FreeLookupGrid();
+}
+
+/// Set both x and y focal length in pixels.
+void CameraIntrinsics::SetFocalLength(double focal_x, double focal_y) {
+ K_(0, 0) = focal_x;
+ K_(1, 1) = focal_y;
+ FreeLookupGrid();
+}
+
+void CameraIntrinsics::SetPrincipalPoint(double cx, double cy) {
+ K_(0, 2) = cx;
+ K_(1, 2) = cy;
+ FreeLookupGrid();
+}
+
+void CameraIntrinsics::SetImageSize(int width, int height) {
+ image_width_ = width;
+ image_height_ = height;
+ FreeLookupGrid();
+}
+
+void CameraIntrinsics::SetRadialDistortion(double k1, double k2, double k3) {
+ k1_ = k1;
+ k2_ = k2;
+ k3_ = k3;
+ FreeLookupGrid();
+}
+
+void CameraIntrinsics::SetTangentialDistortion(double p1, double p2) {
+ p1_ = p1;
+ p2_ = p2;
+ FreeLookupGrid();
+}
+
+void CameraIntrinsics::ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const {
+ double x = normalized_x;
+ double y = normalized_y;
+
+ // Apply distortion to the normalized points to get (xd, yd).
+ double r2 = x*x + y*y;
+ double r4 = r2 * r2;
+ double r6 = r4 * r2;
+ double r_coeff = (1 + k1_*r2 + k2_*r4 + k3_*r6);
+ double xd = x * r_coeff + 2*p1_*x*y + p2_*(r2 + 2*x*x);
+ double yd = y * r_coeff + 2*p2_*x*y + p1_*(r2 + 2*y*y);
+
+ // Apply focal length and principal point to get the final image coordinates.
+ *image_x = focal_length_x() * xd + principal_point_x();
+ *image_y = focal_length_y() * yd + principal_point_y();
+}
+
+struct InvertIntrinsicsCostFunction {
+ public:
+ typedef Vec2 FMatrixType;
+ typedef Vec2 XMatrixType;
+
+ InvertIntrinsicsCostFunction(const CameraIntrinsics &intrinsics,
+ double image_x, double image_y)
+ : intrinsics(intrinsics), x(image_x), y(image_y) {}
+
+ Vec2 operator()(const Vec2 &u) const {
+ double xx, yy;
+ intrinsics.ApplyIntrinsics(u(0), u(1), &xx, &yy);
+ Vec2 fx;
+ fx << (xx - x), (yy - y);
+ return fx;
+ }
+ const CameraIntrinsics &intrinsics;
+ double x, y;
+};
+
+void CameraIntrinsics::InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const {
+ // Compute the initial guess. For a camera with no distortion, this will also
+ // be the final answer; the LM iteration will terminate immediately.
+ Vec2 normalized;
+ normalized(0) = (image_x - principal_point_x()) / focal_length_x();
+ normalized(1) = (image_y - principal_point_y()) / focal_length_y();
+
+ typedef LevenbergMarquardt<InvertIntrinsicsCostFunction> Solver;
+
+ InvertIntrinsicsCostFunction intrinsics_cost(*this, image_x, image_y);
+ Solver::SolverParameters params;
+ Solver solver(intrinsics_cost);
+
+ /*Solver::Results results =*/ solver.minimize(params, &normalized);
+
+ // TODO(keir): Better error handling.
+
+ *normalized_x = normalized(0);
+ *normalized_y = normalized(1);
+}
+
+// TODO(MatthiasF): downsample lookup
+template<typename WarpFunction>
+void CameraIntrinsics::ComputeLookupGrid(Grid* grid, int width, int height) {
+ double aspx = (double)width / image_width_;
+ double aspy = (double)height / image_height_;
+
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ double src_x = x / aspx, src_y = y / aspy;
+ double warp_x, warp_y;
+ WarpFunction(this,src_x,src_y,&warp_x,&warp_y);
+ warp_x = warp_x*aspx;
+ warp_y = warp_y*aspy;
+ int ix = int(warp_x), iy = int(warp_y);
+ int fx = round((warp_x-ix)*256), fy = round((warp_y-iy)*256);
+ if(fx == 256) { fx=0; ix++; }
+ if(fy == 256) { fy=0; iy++; }
+ // Use nearest border pixel
+ if( ix < 0 ) { ix = 0, fx = 0; }
+ if( iy < 0 ) { iy = 0, fy = 0; }
+ if( ix >= width-2 ) ix = width-2;
+ if( iy >= height-2 ) iy = height-2;
+ if ( ix-x > -128 && ix-x < 128 && iy-y > -128 && iy-y < 128 ) {
+ Offset offset = { ix-x, iy-y, fx, fy };
+ grid->offset[y*width+x] = offset;
+ } else {
+ Offset offset = { 0, 0, 0, 0 };
+ grid->offset[y*width+x] = offset;
+ }
+ }
+ }
+}
+
+// TODO(MatthiasF): cubic B-Spline image sampling, bilinear lookup
+template<typename T,int N>
+static void Warp(const Grid* grid, const T* src, T* dst,
+ int width, int height) {
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ Offset offset = grid->offset[y*width+x];
+ const T* s = &src[((y+offset.iy)*width+(x+offset.ix))*N];
+ for (int i = 0; i < N; i++) {
+ dst[(y*width+x)*N+i] = ((s[ i] * (256-offset.fx) + s[ N+i] * offset.fx) * (256-offset.fy)
+ +(s[width*N+i] * (256-offset.fx) + s[width*N+N+i] * offset.fx) * offset.fy) / (256*256);
+ }
+ }
+ }
+}
+
+void CameraIntrinsics::FreeLookupGrid() {
+ if(distort_) {
+ delete distort_->offset;
+ delete distort_;
+ distort_ = NULL;
+ }
+
+ if(undistort_) {
+ delete undistort_->offset;
+ delete undistort_;
+ undistort_ = NULL;
+ }
+}
+
+// FIXME: C++ templates limitations makes thing complicated, but maybe there is a simpler method.
+struct ApplyIntrinsicsFunction {
+ ApplyIntrinsicsFunction(CameraIntrinsics* intrinsics, double x, double y,
+ double *warp_x, double *warp_y) {
+ intrinsics->ApplyIntrinsics(
+ (x-intrinsics->principal_point_x())/intrinsics->focal_length_x(),
+ (y-intrinsics->principal_point_y())/intrinsics->focal_length_y(),
+ warp_x, warp_y);
+ }
+};
+struct InvertIntrinsicsFunction {
+ InvertIntrinsicsFunction(CameraIntrinsics* intrinsics, double x, double y,
+ double *warp_x, double *warp_y) {
+ intrinsics->InvertIntrinsics(x,y,warp_x,warp_y);
+ *warp_x = *warp_x*intrinsics->focal_length_x()+intrinsics->principal_point_x();
+ *warp_y = *warp_y*intrinsics->focal_length_y()+intrinsics->principal_point_y();
+ }
+};
+
+void CameraIntrinsics::CheckDistortLookupGrid(int width, int height)
+{
+ if(distort_) {
+ if(distort_->width != width || distort_->height != height) {
+ delete [] distort_->offset;
+ distort_->offset = NULL;
+ }
+ } else {
+ distort_ = new Grid;
+ distort_->offset = NULL;
+ }
+
+ if(!distort_->offset) {
+ distort_->offset = new Offset[width*height];
+ ComputeLookupGrid<InvertIntrinsicsFunction>(distort_,width,height);
+ }
+
+ distort_->width = width;
+ distort_->height = height;
+}
+
+void CameraIntrinsics::CheckUndistortLookupGrid(int width, int height)
+{
+ if(undistort_) {
+ if(undistort_->width != width || undistort_->height != height) {
+ delete [] undistort_->offset;
+ undistort_->offset = NULL;
+ }
+ } else {
+ undistort_ = new Grid;
+ undistort_->offset = NULL;
+ }
+
+ if(!undistort_->offset) {
+ undistort_->offset = new Offset[width*height];
+ ComputeLookupGrid<ApplyIntrinsicsFunction>(undistort_,width,height);
+ }
+
+ undistort_->width = width;
+ undistort_->height = height;
+}
+
+void CameraIntrinsics::Distort(const float* src, float* dst, int width, int height, int channels) {
+ CheckDistortLookupGrid(width, height);
+ if(channels==1) Warp<float,1>(distort_,src,dst,width,height);
+ else if(channels==2) Warp<float,2>(distort_,src,dst,width,height);
+ else if(channels==3) Warp<float,3>(distort_,src,dst,width,height);
+ else if(channels==4) Warp<float,4>(distort_,src,dst,width,height);
+ //else assert("channels must be between 1 and 4");
+}
+
+void CameraIntrinsics::Distort(const unsigned char* src, unsigned char* dst, int width, int height, int channels) {
+ CheckDistortLookupGrid(width, height);
+ if(channels==1) Warp<unsigned char,1>(distort_,src,dst,width,height);
+ else if(channels==2) Warp<unsigned char,2>(distort_,src,dst,width,height);
+ else if(channels==3) Warp<unsigned char,3>(distort_,src,dst,width,height);
+ else if(channels==4) Warp<unsigned char,4>(distort_,src,dst,width,height);
+ //else assert("channels must be between 1 and 4");
+}
+
+void CameraIntrinsics::Undistort(const float* src, float* dst, int width, int height, int channels) {
+ CheckUndistortLookupGrid(width, height);
+ if(channels==1) Warp<float,1>(undistort_,src,dst,width,height);
+ else if(channels==2) Warp<float,2>(undistort_,src,dst,width,height);
+ else if(channels==3) Warp<float,3>(undistort_,src,dst,width,height);
+ else if(channels==4) Warp<float,4>(undistort_,src,dst,width,height);
+ //else assert("channels must be between 1 and 4");
+}
+
+void CameraIntrinsics::Undistort(const unsigned char* src, unsigned char* dst, int width, int height, int channels) {
+ CheckUndistortLookupGrid(width, height);
+ if(channels==1) Warp<unsigned char,1>(undistort_,src,dst,width,height);
+ else if(channels==2) Warp<unsigned char,2>(undistort_,src,dst,width,height);
+ else if(channels==3) Warp<unsigned char,3>(undistort_,src,dst,width,height);
+ else if(channels==4) Warp<unsigned char,4>(undistort_,src,dst,width,height);
+ //else assert("channels must be between 1 and 4");
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/camera_intrinsics.h b/extern/libmv/libmv/simple_pipeline/camera_intrinsics.h
new file mode 100644
index 00000000000..f5255713e89
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/camera_intrinsics.h
@@ -0,0 +1,152 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_CAMERA_INTRINSICS_H_
+#define LIBMV_SIMPLE_PIPELINE_CAMERA_INTRINSICS_H_
+
+#include <Eigen/Core>
+typedef Eigen::Matrix<double, 3, 3> Mat3;
+
+namespace libmv {
+
+struct Grid;
+
+class CameraIntrinsics {
+ public:
+ CameraIntrinsics();
+ CameraIntrinsics(const CameraIntrinsics &from);
+ ~CameraIntrinsics();
+
+ const Mat3 &K() const { return K_; }
+ // FIXME(MatthiasF): these should be CamelCase methods
+ double focal_length() const { return K_(0, 0); }
+ double focal_length_x() const { return K_(0, 0); }
+ double focal_length_y() const { return K_(1, 1); }
+ double principal_point_x() const { return K_(0, 2); }
+ double principal_point_y() const { return K_(1, 2); }
+ int image_width() const { return image_width_; }
+ int image_height() const { return image_height_; }
+ double k1() const { return k1_; }
+ double k2() const { return k2_; }
+ double k3() const { return k3_; }
+ double p1() const { return p1_; }
+ double p2() const { return p2_; }
+
+ /// Set the entire calibration matrix at once.
+ void SetK(const Mat3 new_k);
+
+ /// Set both x and y focal length in pixels.
+ void SetFocalLength(double focal_x, double focal_y);
+
+ void SetPrincipalPoint(double cx, double cy);
+
+ void SetImageSize(int width, int height);
+
+ void SetRadialDistortion(double k1, double k2, double k3 = 0);
+
+ void SetTangentialDistortion(double p1, double p2);
+
+ /*!
+ Apply camera intrinsics to the normalized point to get image coordinates.
+
+ This applies the lens distortion to a point which is in normalized
+ camera coordinates (i.e. the principal point is at (0, 0)) to get image
+ coordinates in pixels.
+ */
+ void ApplyIntrinsics(double normalized_x, double normalized_y,
+ double *image_x, double *image_y) const;
+
+ /*!
+ Invert camera intrinsics on the image point to get normalized coordinates.
+
+ This reverses the effect of lens distortion on a point which is in image
+ coordinates to get normalized camera coordinates.
+ */
+ void InvertIntrinsics(double image_x, double image_y,
+ double *normalized_x, double *normalized_y) const;
+
+ /*!
+ Distort an image using the current camera instrinsics
+
+ The distorted image is computed in \a dst using samples from \a src.
+ both buffers should be \a width x \a height x \a channels sized.
+
+ \note This is the reference implementation using floating point images.
+ */
+ void Distort(const float* src, float* dst,
+ int width, int height, int channels);
+ /*!
+ Distort an image using the current camera instrinsics
+
+ The distorted image is computed in \a dst using samples from \a src.
+ both buffers should be \a width x \a height x \a channels sized.
+
+ \note This version is much faster.
+ */
+ void Distort(const unsigned char* src, unsigned char* dst,
+ int width, int height, int channels);
+ /*!
+ Undistort an image using the current camera instrinsics
+
+ The undistorted image is computed in \a dst using samples from \a src.
+ both buffers should be \a width x \a height x \a channels sized.
+
+ \note This is the reference implementation using floating point images.
+ */
+ void Undistort(const float* src, float* dst,
+ int width, int height, int channels);
+ /*!
+ Undistort an image using the current camera instrinsics
+
+ The undistorted image is computed in \a dst using samples from \a src.
+ both buffers should be \a width x \a height x \a channels sized.
+
+ \note This version is much faster.
+ */
+ void Undistort(const unsigned char* src, unsigned char* dst,
+ int width, int height, int channels);
+
+ private:
+ template<typename WarpFunction> void ComputeLookupGrid(struct Grid* grid, int width, int height);
+ void CheckUndistortLookupGrid(int width, int height);
+ void CheckDistortLookupGrid(int width, int height);
+ void FreeLookupGrid();
+
+ // The traditional intrinsics matrix from x = K[R|t]X.
+ Mat3 K_;
+
+ // This is the size of the image. This is necessary to, for example, handle
+ // the case of processing a scaled image.
+ int image_width_;
+ int image_height_;
+
+ // OpenCV's distortion model with third order polynomial radial distortion
+ // terms and second order tangential distortion. The distortion is applied to
+ // the normalized coordinates before the focal length, which makes them
+ // independent of image size.
+ double k1_, k2_, k3_, p1_, p2_;
+
+ struct Grid *distort_;
+ struct Grid *undistort_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_CAMERA_INTRINSICS_H_
diff --git a/extern/libmv/libmv/simple_pipeline/detect.cc b/extern/libmv/libmv/simple_pipeline/detect.cc
new file mode 100644
index 00000000000..6fc0cdd120a
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/detect.cc
@@ -0,0 +1,110 @@
+/****************************************************************************
+**
+** Copyright (c) 2011 libmv authors.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to
+** deal in the Software without restriction, including without limitation the
+** rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+** sell copies of the Software, and to permit persons to whom the Software is
+** furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Software.
+**
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+** IN THE SOFTWARE.
+**
+****************************************************************************/
+
+#include "libmv/simple_pipeline/detect.h"
+#include <stdlib.h>
+#include <string.h>
+
+namespace libmv {
+
+typedef unsigned int uint;
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+static uint SAD(const ubyte* imageA, const ubyte* imageB, int strideA, int strideB) {
+ __m128i a = _mm_setzero_si128();
+ for(int i = 0; i < 16; i++) {
+ a = _mm_adds_epu16(a, _mm_sad_epu8( _mm_loadu_si128((__m128i*)(imageA+i*strideA)),
+ _mm_loadu_si128((__m128i*)(imageB+i*strideB))));
+ }
+ return _mm_extract_epi16(a,0) + _mm_extract_epi16(a,4);
+}
+#else
+static uint SAD(const ubyte* imageA, const ubyte* imageB, int strideA, int strideB) {
+ uint sad=0;
+ for(int i = 0; i < 16; i++) {
+ for(int j = 0; j < 16; j++) {
+ sad += abs((int)imageA[i*strideA+j] - imageB[i*strideB+j]);
+ }
+ }
+ return sad;
+}
+#endif
+
+void Detect(ubyte* image, int stride, int width, int height, Feature* detected, int* count, int distance, ubyte* pattern) {
+ unsigned short histogram[256];
+ memset(histogram,0,sizeof(histogram));
+ ubyte* scores = new ubyte[width*height];
+ memset(scores,0,width*height);
+ const int r = 1; //radius for self similarity comparison
+ for(int y=distance; y<height-distance; y++) {
+ for(int x=distance; x<width-distance; x++) {
+ ubyte* s = &image[y*stride+x];
+ int score = // low self-similarity with overlapping patterns //OPTI: load pattern once
+ SAD(s, s-r*stride-r, stride, stride)+SAD(s, s-r*stride, stride, stride)+SAD(s, s-r*stride+r, stride, stride)+
+ SAD(s, s -r, stride, stride)+ SAD(s, s +r, stride, stride)+
+ SAD(s, s+r*stride-r, stride, stride)+SAD(s, s+r*stride, stride, stride)+SAD(s, s+r*stride+r, stride, stride);
+ score /= 256; // normalize
+ if(pattern) score -= SAD(s, pattern, stride, 16); // find only features similar to pattern
+ if(score<=16) continue; // filter very self-similar features
+ score -= 16; // translate to score/histogram values
+ if(score>255) score=255; // clip
+ ubyte* c = &scores[y*width+x];
+ for(int i=-distance; i<0; i++) {
+ for(int j=-distance; j<distance; j++) {
+ int s = c[i*width+j];
+ if(s == 0) continue;
+ if(s >= score) goto nonmax;
+ c[i*width+j]=0, histogram[s]--;
+ }
+ }
+ for(int i=0, j=-distance; j<0; j++) {
+ int s = c[i*width+j];
+ if(s == 0) continue;
+ if(s >= score) goto nonmax;
+ c[i*width+j]=0, histogram[s]--;
+ }
+ c[0] = score, histogram[score]++;
+ nonmax:;
+ }
+ }
+ int min=255, total=0;
+ for(; min>0; min--) {
+ int h = histogram[min];
+ if(total+h > *count) break;
+ total += h;
+ }
+ int i=0;
+ for(int y=16; y<height-16; y++) {
+ for(int x=16; x<width-16; x++) {
+ int s = scores[y*width+x];
+ Feature f = { x+8, y+8, s, 16 };
+ if(s>min) detected[i++] = f;
+ }
+ }
+ *count = i;
+ delete[] scores;
+}
+
+}
diff --git a/extern/libmv/libmv/simple_pipeline/detect.h b/extern/libmv/libmv/simple_pipeline/detect.h
new file mode 100644
index 00000000000..23b239b81d6
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/detect.h
@@ -0,0 +1,72 @@
+/****************************************************************************
+**
+** Copyright (c) 2011 libmv authors.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to
+** deal in the Software without restriction, including without limitation the
+** rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+** sell copies of the Software, and to permit persons to whom the Software is
+** furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Software.
+**
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+** IN THE SOFTWARE.
+**
+****************************************************************************/
+
+#ifndef LIBMV_SIMPLE_PIPELINE_DETECT_H_
+#define LIBMV_SIMPLE_PIPELINE_DETECT_H_
+
+#ifdef __cplusplus
+namespace libmv {
+#endif
+
+typedef unsigned char ubyte;
+
+/*!
+ \a Feature is the 2D location of a detected feature in an image.
+
+ \a x, \a y is the position of the center in pixels (from image top-left).
+ \a score is an estimate of how well the pattern will be tracked.
+ \a size can be used as an initial size to track the pattern.
+
+ \sa Detect
+*/
+struct Feature {
+ float x, y;
+ float score;
+ float size;
+};
+ //radius for non maximal suppression
+/*!
+ Detect features in an image.
+
+ \a image is a single channel 8-bit image of size \a width x \a height
+
+ \a detected is an array with space to hold \a *count features.
+ \a *count is the maximum count to detect on input and the actual
+ detected count on output.
+
+ \a distance is the minimal distance between detected features.
+
+ if \a pattern is null all good features will be found.
+ if \a pattern is not null only features similar to \a pattern will be found.
+
+ \note \a You can crop the image (to avoid detecting markers near the borders) without copying:
+ image += marginY*stride+marginX, width -= 2*marginX, height -= 2*marginY;
+*/
+void Detect(ubyte* image, int stride, int width, int height, Feature* detected, int* count, int distance /*=32*/, ubyte* pattern /*=0*/);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc b/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
new file mode 100644
index 00000000000..0597f09f728
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
@@ -0,0 +1,218 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/fundamental.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/levenberg_marquardt.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
+
+namespace libmv {
+namespace {
+
+void CoordinatesForMarkersInImage(const vector<Marker> &markers,
+ int image,
+ Mat *coordinates) {
+ vector<Vec2> coords;
+ for (int i = 0; i < markers.size(); ++i) {
+ const Marker &marker = markers[i];
+ if (markers[i].image == image) {
+ coords.push_back(Vec2(marker.x, marker.y));
+ }
+ }
+ coordinates->resize(2, coords.size());
+ for (int i = 0; i < coords.size(); i++) {
+ coordinates->col(i) = coords[i];
+ }
+}
+
+void GetImagesInMarkers(const vector<Marker> &markers,
+ int *image1, int *image2) {
+ if (markers.size() < 2) {
+ return;
+ }
+ *image1 = markers[0].image;
+ for (int i = 1; i < markers.size(); ++i) {
+ if (markers[i].image != *image1) {
+ *image2 = markers[i].image;
+ return;
+ }
+ }
+ *image2 = -1;
+ LOG(FATAL) << "Only one image in the markers.";
+}
+
+} // namespace
+
+bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction) {
+ if (markers.size() < 16) {
+ return false;
+ }
+
+ int image1, image2;
+ GetImagesInMarkers(markers, &image1, &image2);
+
+ Mat x1, x2;
+ CoordinatesForMarkersInImage(markers, image1, &x1);
+ CoordinatesForMarkersInImage(markers, image2, &x2);
+
+ Mat3 F;
+ NormalizedEightPointSolver(x1, x2, &F);
+
+ // The F matrix should be an E matrix, but squash it just to be sure.
+ Eigen::JacobiSVD<Mat3> svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV);
+
+ // See Hartley & Zisserman page 294, result 11.1, which shows how to get the
+ // closest essential matrix to a matrix that is "almost" an essential matrix.
+ double a = svd.singularValues()(0);
+ double b = svd.singularValues()(1);
+ double s = (a + b) / 2.0;
+ LG << "Initial reconstruction's rotation is non-euclidean by "
+ << (((a - b) / std::max(a, b)) * 100) << "%; singular values:"
+ << svd.singularValues().transpose();
+
+ Vec3 diag;
+ diag << s, s, 0;
+ Mat3 E = svd.matrixU() * diag.asDiagonal() * svd.matrixV().transpose();
+
+ // Recover motion between the two images. Since this function assumes a
+ // calibrated camera, use the identity for K.
+ Mat3 R;
+ Vec3 t;
+ Mat3 K = Mat3::Identity();
+ if (!MotionFromEssentialAndCorrespondence(E,
+ K, x1.col(0),
+ K, x2.col(0),
+ &R, &t)) {
+ return false;
+ }
+
+ // Image 1 gets the reference frame, image 2 gets the relative motion.
+ reconstruction->InsertCamera(image1, Mat3::Identity(), Vec3::Zero());
+ reconstruction->InsertCamera(image2, R, t);
+
+ LG << "From two frame reconstruction got:\nR:\n" << R
+ << "\nt:" << t.transpose();
+ return true;
+}
+
+namespace {
+
+Mat3 DecodeF(const Vec9 &encoded_F) {
+ // Decode F and force it to be rank 2.
+ Map<const Mat3> full_rank_F(encoded_F.data(), 3, 3);
+ Eigen::JacobiSVD<Mat3> svd(full_rank_F, Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Vec3 diagonal = svd.singularValues();
+ diagonal(2) = 0;
+ Mat3 F = svd.matrixU() * diagonal.asDiagonal() * svd.matrixV().transpose();
+ return F;
+}
+
+// This is the stupidest way to refine F known to mankind, since it requires
+// doing a full SVD of F at each iteration. This uses sampson error.
+struct FundamentalSampsonCostFunction {
+ public:
+ typedef Vec FMatrixType;
+ typedef Vec9 XMatrixType;
+
+ // Assumes markers are ordered by track.
+ FundamentalSampsonCostFunction(const vector<Marker> &markers)
+ : markers(markers) {}
+
+ Vec operator()(const Vec9 &encoded_F) const {
+ // Decode F and force it to be rank 2.
+ Mat3 F = DecodeF(encoded_F);
+
+ Vec residuals(markers.size() / 2);
+ residuals.setZero();
+ for (int i = 0; i < markers.size() / 2; ++i) {
+ const Marker &marker1 = markers[2*i + 0];
+ const Marker &marker2 = markers[2*i + 1];
+ CHECK_EQ(marker1.track, marker2.track);
+ Vec2 x1(marker1.x, marker1.y);
+ Vec2 x2(marker2.x, marker2.y);
+
+ residuals[i] = SampsonDistance(F, x1, x2);
+ }
+ return residuals;
+ }
+ const vector<Marker> &markers;
+};
+
+} // namespace
+
+bool ProjectiveReconstructTwoFrames(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction) {
+ if (markers.size() < 16) {
+ return false;
+ }
+
+ int image1, image2;
+ GetImagesInMarkers(markers, &image1, &image2);
+
+ Mat x1, x2;
+ CoordinatesForMarkersInImage(markers, image1, &x1);
+ CoordinatesForMarkersInImage(markers, image2, &x2);
+
+ Mat3 F;
+ NormalizedEightPointSolver(x1, x2, &F);
+
+ // XXX Verify sampson distance.
+#if 0
+ // Refine the resulting projection fundamental matrix using Sampson's
+ // approximation of geometric error. This avoids having to do a full bundle
+ // at the cost of some accuracy.
+ //
+ // TODO(keir): After switching to a better bundling library, use a proper
+ // full bundle adjust here instead of this lame bundle adjustment.
+ typedef LevenbergMarquardt<FundamentalSampsonCostFunction> Solver;
+
+ FundamentalSampsonCostFunction fundamental_cost(markers);
+
+ // Pack the initial P matrix into a size-12 vector..
+ Vec9 encoded_F = Map<Vec9>(F.data(), 3, 3);
+
+ Solver solver(fundamental_cost);
+
+ Solver::SolverParameters params;
+ Solver::Results results = solver.minimize(params, &encoded_F);
+ // TODO(keir): Check results to ensure clean termination.
+
+ // Recover F from the minimization.
+ F = DecodeF(encoded_F);
+#endif
+
+ // Image 1 gets P = [I|0], image 2 gets arbitrary P.
+ Mat34 P1 = Mat34::Zero();
+ P1.block<3, 3>(0, 0) = Mat3::Identity();
+ Mat34 P2;
+ ProjectionsFromFundamental(F, &P1, &P2);
+
+ reconstruction->InsertCamera(image1, P1);
+ reconstruction->InsertCamera(image2, P2);
+
+ LG << "From two frame reconstruction got P2:\n" << P2;
+ return true;
+}
+} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.h b/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.h
new file mode 100644
index 00000000000..f512c9a3439
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.h
@@ -0,0 +1,74 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_INITIALIZE_RECONSTRUCTION_H
+#define LIBMV_SIMPLE_PIPELINE_INITIALIZE_RECONSTRUCTION_H
+
+#include "libmv/base/vector.h"
+
+namespace libmv {
+
+struct Marker;
+class EuclideanReconstruction;
+class ProjectiveReconstruction;
+
+/*!
+ Initialize the \link EuclideanReconstruction reconstruction \endlink using
+ two frames.
+
+ \a markers should contain all \l Marker markers \endlink belonging to
+ tracks visible in both frames. The pose estimation of the camera for
+ these frames will be inserted into \a *reconstruction.
+
+ \note The two frames need to have both enough parallax and enough common tracks
+ for accurate reconstruction. At least 8 tracks are suggested.
+ \note The origin of the coordinate system is defined to be the camera of
+ the first keyframe.
+ \note This assumes a calibrated reconstruction, e.g. the markers are
+ already corrected for camera intrinsics and radial distortion.
+ \note This assumes an outlier-free set of markers.
+
+ \sa EuclideanResect, EuclideanIntersect, EuclideanBundle
+*/
+bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction);
+
+/*!
+ Initialize the \link ProjectiveReconstruction reconstruction \endlink using
+ two frames.
+
+ \a markers should contain all \l Marker markers \endlink belonging to
+ tracks visible in both frames. An estimate of the projection matrices for
+ the two frames will get added to the reconstruction.
+
+ \note The two frames need to have both enough parallax and enough common tracks
+ for accurate reconstruction. At least 8 tracks are suggested.
+ \note The origin of the coordinate system is defined to be the camera of
+ the first keyframe.
+ \note This assumes the markers are already corrected for radial distortion.
+ \note This assumes an outlier-free set of markers.
+
+ \sa ProjectiveResect, ProjectiveIntersect, ProjectiveBundle
+*/
+bool ProjectiveReconstructTwoFrames(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction);
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_INITIALIZE_RECONSTRUCTION_H
diff --git a/extern/libmv/libmv/simple_pipeline/intersect.cc b/extern/libmv/libmv/simple_pipeline/intersect.cc
new file mode 100644
index 00000000000..b1518e04651
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/intersect.cc
@@ -0,0 +1,205 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/triangulation.h"
+#include "libmv/multiview/nviewtriangulation.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/levenberg_marquardt.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
+
+namespace libmv {
+
+namespace {
+
+struct EuclideanIntersectCostFunction {
+ public:
+ typedef Vec FMatrixType;
+ typedef Vec3 XMatrixType;
+
+ EuclideanIntersectCostFunction(const vector<Marker> &markers,
+ const EuclideanReconstruction &reconstruction)
+ : markers(markers),
+ reconstruction(reconstruction) {}
+
+ Vec operator()(const Vec3 &X) const {
+ Vec residuals(2 * markers.size());
+ residuals.setZero();
+ for (int i = 0; i < markers.size(); ++i) {
+ const EuclideanCamera &camera =
+ *reconstruction.CameraForImage(markers[i].image);
+ Vec3 projected = camera.R * X + camera.t;
+ projected /= projected(2);
+ residuals[2*i + 0] = projected(0) - markers[i].x;
+ residuals[2*i + 1] = projected(1) - markers[i].y;
+ }
+ return residuals;
+ }
+ const vector<Marker> &markers;
+ const EuclideanReconstruction &reconstruction;
+};
+
+} // namespace
+
+bool EuclideanIntersect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction) {
+ if (markers.size() < 2) {
+ return false;
+ }
+
+ // Compute projective camera matrices for the cameras the intersection is
+ // going to use.
+ Mat3 K = Mat3::Identity();
+ vector<Mat34> cameras;
+ Mat34 P;
+ for (int i = 0; i < markers.size(); ++i) {
+ EuclideanCamera *camera = reconstruction->CameraForImage(markers[i].image);
+ P_From_KRt(K, camera->R, camera->t, &P);
+ cameras.push_back(P);
+ }
+
+ // Stack the 2D coordinates together as required by NViewTriangulate.
+ Mat2X points(2, markers.size());
+ for (int i = 0; i < markers.size(); ++i) {
+ points(0, i) = markers[i].x;
+ points(1, i) = markers[i].y;
+ }
+
+ Vec4 Xp;
+ LG << "Intersecting with " << markers.size() << " markers.";
+ NViewTriangulateAlgebraic(points, cameras, &Xp);
+
+ // Get euclidean version of the homogeneous point.
+ Xp /= Xp(3);
+ Vec3 X = Xp.head<3>();
+
+ typedef LevenbergMarquardt<EuclideanIntersectCostFunction> Solver;
+
+ EuclideanIntersectCostFunction triangulate_cost(markers, *reconstruction);
+ Solver::SolverParameters params;
+ Solver solver(triangulate_cost);
+
+ Solver::Results results = solver.minimize(params, &X);
+
+ // Try projecting the point; make sure it's in front of everyone.
+ for (int i = 0; i < cameras.size(); ++i) {
+ const EuclideanCamera &camera =
+ *reconstruction->CameraForImage(markers[i].image);
+ Vec3 x = camera.R * X + camera.t;
+ if (x(2) < 0) {
+ LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image
+ << ": " << x.transpose();
+ }
+ }
+
+ Vec3 point = X.head<3>();
+ reconstruction->InsertPoint(markers[0].track, point);
+
+ // TODO(keir): Add proper error checking.
+ return true;
+}
+
+namespace {
+
+struct ProjectiveIntersectCostFunction {
+ public:
+ typedef Vec FMatrixType;
+ typedef Vec4 XMatrixType;
+
+ ProjectiveIntersectCostFunction(
+ const vector<Marker> &markers,
+ const ProjectiveReconstruction &reconstruction)
+ : markers(markers), reconstruction(reconstruction) {}
+
+ Vec operator()(const Vec4 &X) const {
+ Vec residuals(2 * markers.size());
+ residuals.setZero();
+ for (int i = 0; i < markers.size(); ++i) {
+ const ProjectiveCamera &camera =
+ *reconstruction.CameraForImage(markers[i].image);
+ Vec3 projected = camera.P * X;
+ projected /= projected(2);
+ residuals[2*i + 0] = projected(0) - markers[i].x;
+ residuals[2*i + 1] = projected(1) - markers[i].y;
+ }
+ return residuals;
+ }
+ const vector<Marker> &markers;
+ const ProjectiveReconstruction &reconstruction;
+};
+
+} // namespace
+
+bool ProjectiveIntersect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction) {
+ if (markers.size() < 2) {
+ return false;
+ }
+
+ // Get the cameras to use for the intersection.
+ vector<Mat34> cameras;
+ for (int i = 0; i < markers.size(); ++i) {
+ ProjectiveCamera *camera = reconstruction->CameraForImage(markers[i].image);
+ cameras.push_back(camera->P);
+ }
+
+ // Stack the 2D coordinates together as required by NViewTriangulate.
+ Mat2X points(2, markers.size());
+ for (int i = 0; i < markers.size(); ++i) {
+ points(0, i) = markers[i].x;
+ points(1, i) = markers[i].y;
+ }
+
+ Vec4 X;
+ LG << "Intersecting with " << markers.size() << " markers.";
+ NViewTriangulateAlgebraic(points, cameras, &X);
+ X /= X(3);
+
+ typedef LevenbergMarquardt<ProjectiveIntersectCostFunction> Solver;
+
+ ProjectiveIntersectCostFunction triangulate_cost(markers, *reconstruction);
+ Solver::SolverParameters params;
+ Solver solver(triangulate_cost);
+
+ Solver::Results results = solver.minimize(params, &X);
+ (void) results; // TODO(keir): Ensure results are good.
+
+ // Try projecting the point; make sure it's in front of everyone.
+ for (int i = 0; i < cameras.size(); ++i) {
+ const ProjectiveCamera &camera =
+ *reconstruction->CameraForImage(markers[i].image);
+ Vec3 x = camera.P * X;
+ if (x(2) < 0) {
+ LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image
+ << ": " << x.transpose();
+ }
+ }
+
+ reconstruction->InsertPoint(markers[0].track, X);
+
+ // TODO(keir): Add proper error checking.
+ return true;
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/intersect.h b/extern/libmv/libmv/simple_pipeline/intersect.h
new file mode 100644
index 00000000000..edbf4a0335b
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/intersect.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_INTERSECT_H
+#define LIBMV_SIMPLE_PIPELINE_INTERSECT_H
+
+#include "libmv/base/vector.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+
+namespace libmv {
+
+/*!
+ Estimate the 3D coordinates of a track by intersecting rays from images.
+
+ This takes a set of markers, where each marker is for the same track but
+ different images, and reconstructs the 3D position of that track. Each of
+ the frames for which there is a marker for that track must have a
+ corresponding reconstructed camera in \a *reconstruction.
+
+ \a markers should contain all \l Marker markers \endlink belonging to
+ tracks visible in all frames.
+ \a reconstruction should contain the cameras for all frames.
+ The new \l Point points \endlink will be inserted in \a reconstruction.
+
+ \note This assumes a calibrated reconstruction, e.g. the markers are
+ already corrected for camera intrinsics and radial distortion.
+ \note This assumes an outlier-free set of markers.
+
+ \sa EuclideanResect
+*/
+bool EuclideanIntersect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction);
+
+/*!
+ Estimate the homogeneous coordinates of a track by intersecting rays.
+
+ This takes a set of markers, where each marker is for the same track but
+ different images, and reconstructs the homogeneous 3D position of that
+ track. Each of the frames for which there is a marker for that track must
+ have a corresponding reconstructed camera in \a *reconstruction.
+
+ \a markers should contain all \l Marker markers \endlink belonging to
+ tracks visible in all frames.
+ \a reconstruction should contain the cameras for all frames.
+ The new \l Point points \endlink will be inserted in \a reconstruction.
+
+ \note This assumes that radial distortion is already corrected for, but
+ does not assume that e.g. focal length and principal point are
+ accounted for.
+ \note This assumes an outlier-free set of markers.
+
+ \sa Resect
+*/
+bool ProjectiveIntersect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_INTERSECT_H
diff --git a/extern/libmv/libmv/simple_pipeline/pipeline.cc b/extern/libmv/libmv/simple_pipeline/pipeline.cc
new file mode 100644
index 00000000000..818c24cb5e7
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/pipeline.cc
@@ -0,0 +1,317 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <cstdio>
+
+#include "libmv/logging/logging.h"
+#include "libmv/simple_pipeline/bundle.h"
+#include "libmv/simple_pipeline/intersect.h"
+#include "libmv/simple_pipeline/resect.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+
+#ifdef _MSC_VER
+# define snprintf _snprintf
+#endif
+
+namespace libmv {
+namespace {
+
+// These are "strategy" classes which make it possible to use the same code for
+// both projective and euclidean reconstruction.
+// FIXME(MatthiasF): OOP would achieve the same goal while avoiding
+// template bloat and making interface changes much easier.
+struct EuclideanPipelineRoutines {
+ typedef EuclideanReconstruction Reconstruction;
+ typedef EuclideanCamera Camera;
+ typedef EuclideanPoint Point;
+
+ static void Bundle(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction) {
+ EuclideanBundle(tracks, reconstruction);
+ }
+
+ static bool Resect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction, bool final_pass) {
+ return EuclideanResect(markers, reconstruction, final_pass);
+ }
+
+ static bool Intersect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction) {
+ return EuclideanIntersect(markers, reconstruction);
+ }
+
+ static Marker ProjectMarker(const EuclideanPoint &point,
+ const EuclideanCamera &camera,
+ const CameraIntrinsics &intrinsics) {
+ Vec3 projected = camera.R * point.X + camera.t;
+ projected /= projected(2);
+
+ Marker reprojected_marker;
+ intrinsics.ApplyIntrinsics(projected(0),
+ projected(1),
+ &reprojected_marker.x,
+ &reprojected_marker.y);
+
+ reprojected_marker.image = camera.image;
+ reprojected_marker.track = point.track;
+ return reprojected_marker;
+ }
+};
+
+struct ProjectivePipelineRoutines {
+ typedef ProjectiveReconstruction Reconstruction;
+ typedef ProjectiveCamera Camera;
+ typedef ProjectivePoint Point;
+
+ static void Bundle(const Tracks &tracks,
+ ProjectiveReconstruction *reconstruction) {
+ ProjectiveBundle(tracks, reconstruction);
+ }
+
+ static bool Resect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction, bool final_pass) {
+ return ProjectiveResect(markers, reconstruction);
+ }
+
+ static bool Intersect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction) {
+ return ProjectiveIntersect(markers, reconstruction);
+ }
+
+ static Marker ProjectMarker(const ProjectivePoint &point,
+ const ProjectiveCamera &camera,
+ const CameraIntrinsics &intrinsics) {
+ Vec3 projected = camera.P * point.X;
+ projected /= projected(2);
+
+ Marker reprojected_marker;
+ intrinsics.ApplyIntrinsics(projected(0),
+ projected(1),
+ &reprojected_marker.x,
+ &reprojected_marker.y);
+
+ reprojected_marker.image = camera.image;
+ reprojected_marker.track = point.track;
+ return reprojected_marker;
+ }
+};
+
+} // namespace
+
+template<typename PipelineRoutines>
+void InternalCompleteReconstruction(
+ const Tracks &tracks,
+ typename PipelineRoutines::Reconstruction *reconstruction) {
+ int max_track = tracks.MaxTrack();
+ int max_image = tracks.MaxImage();
+ int num_resects = -1;
+ int num_intersects = -1;
+ LG << "Max track: " << max_track;
+ LG << "Max image: " << max_image;
+ LG << "Number of markers: " << tracks.NumMarkers();
+ while (num_resects != 0 || num_intersects != 0) {
+ // Do all possible intersections.
+ num_intersects = 0;
+ for (int track = 0; track <= max_track; ++track) {
+ if (reconstruction->PointForTrack(track)) {
+ LG << "Skipping point: " << track;
+ continue;
+ }
+ vector<Marker> all_markers = tracks.MarkersForTrack(track);
+ LG << "Got " << all_markers.size() << " markers for track " << track;
+
+ vector<Marker> reconstructed_markers;
+ for (int i = 0; i < all_markers.size(); ++i) {
+ if (reconstruction->CameraForImage(all_markers[i].image)) {
+ reconstructed_markers.push_back(all_markers[i]);
+ }
+ }
+ LG << "Got " << reconstructed_markers.size()
+ << " reconstructed markers for track " << track;
+ if (reconstructed_markers.size() >= 2) {
+ PipelineRoutines::Intersect(reconstructed_markers, reconstruction);
+ num_intersects++;
+ LG << "Ran Intersect() for track " << track;
+ }
+ }
+ if (num_intersects) {
+ PipelineRoutines::Bundle(tracks, reconstruction);
+ LG << "Ran Bundle() after intersections.";
+ }
+ LG << "Did " << num_intersects << " intersects.";
+
+ // Do all possible resections.
+ num_resects = 0;
+ for (int image = 0; image <= max_image; ++image) {
+ if (reconstruction->CameraForImage(image)) {
+ LG << "Skipping frame: " << image;
+ continue;
+ }
+ vector<Marker> all_markers = tracks.MarkersInImage(image);
+ LG << "Got " << all_markers.size() << " markers for image " << image;
+
+ vector<Marker> reconstructed_markers;
+ for (int i = 0; i < all_markers.size(); ++i) {
+ if (reconstruction->PointForTrack(all_markers[i].track)) {
+ reconstructed_markers.push_back(all_markers[i]);
+ }
+ }
+ LG << "Got " << reconstructed_markers.size()
+ << " reconstructed markers for image " << image;
+ if (reconstructed_markers.size() >= 5) {
+ if (PipelineRoutines::Resect(reconstructed_markers, reconstruction, false)) {
+ num_resects++;
+ LG << "Ran Resect() for image " << image;
+ } else {
+ LG << "Failed Resect() for image " << image;
+ }
+ }
+ }
+ if (num_resects) {
+ PipelineRoutines::Bundle(tracks, reconstruction);
+ }
+ LG << "Did " << num_resects << " resects.";
+ }
+
+ // One last pass...
+ num_resects = 0;
+ for (int image = 0; image <= max_image; ++image) {
+ if (reconstruction->CameraForImage(image)) {
+ LG << "Skipping frame: " << image;
+ continue;
+ }
+ vector<Marker> all_markers = tracks.MarkersInImage(image);
+
+ vector<Marker> reconstructed_markers;
+ for (int i = 0; i < all_markers.size(); ++i) {
+ if (reconstruction->PointForTrack(all_markers[i].track)) {
+ reconstructed_markers.push_back(all_markers[i]);
+ }
+ }
+ if (reconstructed_markers.size() >= 5) {
+ if (PipelineRoutines::Resect(reconstructed_markers, reconstruction, true)) {
+ num_resects++;
+ LG << "Ran Resect() for image " << image;
+ } else {
+ LG << "Failed Resect() for image " << image;
+ }
+ }
+ }
+ if (num_resects) {
+ PipelineRoutines::Bundle(tracks, reconstruction);
+ }
+}
+
+template<typename PipelineRoutines>
+double InternalReprojectionError(const Tracks &image_tracks,
+ const typename PipelineRoutines::Reconstruction &reconstruction,
+ const CameraIntrinsics &intrinsics) {
+ int num_skipped = 0;
+ int num_reprojected = 0;
+ double total_error = 0.0;
+ vector<Marker> markers = image_tracks.AllMarkers();
+ for (int i = 0; i < markers.size(); ++i) {
+ const typename PipelineRoutines::Camera *camera =
+ reconstruction.CameraForImage(markers[i].image);
+ const typename PipelineRoutines::Point *point =
+ reconstruction.PointForTrack(markers[i].track);
+ if (!camera || !point) {
+ num_skipped++;
+ continue;
+ }
+ num_reprojected++;
+
+ Marker reprojected_marker =
+ PipelineRoutines::ProjectMarker(*point, *camera, intrinsics);
+ double ex = reprojected_marker.x - markers[i].x;
+ double ey = reprojected_marker.y - markers[i].y;
+
+ const int N = 100;
+ char line[N];
+ snprintf(line, N,
+ "image %-3d track %-3d "
+ "x %7.1f y %7.1f "
+ "rx %7.1f ry %7.1f "
+ "ex %7.1f ey %7.1f"
+ " e %7.1f",
+ markers[i].image,
+ markers[i].track,
+ markers[i].x,
+ markers[i].y,
+ reprojected_marker.x,
+ reprojected_marker.y,
+ ex,
+ ey,
+ sqrt(ex*ex + ey*ey));
+ //LG << line;
+ total_error += sqrt(ex*ex + ey*ey);
+ }
+ LG << "Skipped " << num_skipped << " markers.";
+ LG << "Reprojected " << num_reprojected << " markers.";
+ LG << "Total error: " << total_error;
+ LG << "Average error: " << (total_error / num_reprojected) << " [pixels].";
+ return total_error / num_reprojected;
+}
+
+double EuclideanReprojectionError(const Tracks &image_tracks,
+ const EuclideanReconstruction &reconstruction,
+ const CameraIntrinsics &intrinsics) {
+ return InternalReprojectionError<EuclideanPipelineRoutines>(image_tracks,
+ reconstruction,
+ intrinsics);
+}
+
+double ProjectiveReprojectionError(
+ const Tracks &image_tracks,
+ const ProjectiveReconstruction &reconstruction,
+ const CameraIntrinsics &intrinsics) {
+ return InternalReprojectionError<ProjectivePipelineRoutines>(image_tracks,
+ reconstruction,
+ intrinsics);
+}
+
+void EuclideanCompleteReconstruction(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction) {
+ InternalCompleteReconstruction<EuclideanPipelineRoutines>(tracks,
+ reconstruction);
+}
+
+void ProjectiveCompleteReconstruction(const Tracks &tracks,
+ ProjectiveReconstruction *reconstruction) {
+ InternalCompleteReconstruction<ProjectivePipelineRoutines>(tracks,
+ reconstruction);
+}
+
+void InvertIntrinsicsForTracks(const Tracks &raw_tracks,
+ const CameraIntrinsics &camera_intrinsics,
+ Tracks *calibrated_tracks) {
+ vector<Marker> markers = raw_tracks.AllMarkers();
+ for (int i = 0; i < markers.size(); ++i) {
+ camera_intrinsics.InvertIntrinsics(markers[i].x,
+ markers[i].y,
+ &(markers[i].x),
+ &(markers[i].y));
+ }
+ *calibrated_tracks = Tracks(markers);
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/pipeline.h b/extern/libmv/libmv/simple_pipeline/pipeline.h
new file mode 100644
index 00000000000..b7dfcb7993a
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/pipeline.h
@@ -0,0 +1,95 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_PIPELINE_H_
+#define LIBMV_SIMPLE_PIPELINE_PIPELINE_H_
+
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+
+namespace libmv {
+
+/*!
+ Estimate camera poses and scene 3D coordinates for all frames and tracks.
+
+ This method should be used once there is an initial reconstruction in
+ place, for example by reconstructing from two frames that have a sufficient
+ baseline and number of tracks in common. This function iteratively
+ triangulates points that are visible by cameras that have their pose
+ estimated, then resections (i.e. estimates the pose) of cameras that are
+ not estimated yet that can see triangulated points. This process is
+ repeated until all points and cameras are estimated. Periodically, bundle
+ adjustment is run to ensure a quality reconstruction.
+
+ \a tracks should contain markers used in the reconstruction.
+ \a reconstruction should contain at least some 3D points or some estimated
+ cameras. The minimum number of cameras is two (with no 3D points) and the
+ minimum number of 3D points (with no estimated cameras) is 5.
+
+ \sa EuclideanResect, EuclideanIntersect, EuclideanBundle
+*/
+void EuclideanCompleteReconstruction(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction);
+
+/*!
+ Estimate camera matrices and homogeneous 3D coordinates for all frames and
+ tracks.
+
+ This method should be used once there is an initial reconstruction in
+ place, for example by reconstructing from two frames that have a sufficient
+ baseline and number of tracks in common. This function iteratively
+ triangulates points that are visible by cameras that have their pose
+ estimated, then resections (i.e. estimates the pose) of cameras that are
+ not estimated yet that can see triangulated points. This process is
+ repeated until all points and cameras are estimated. Periodically, bundle
+ adjustment is run to ensure a quality reconstruction.
+
+ \a tracks should contain markers used in the reconstruction.
+ \a reconstruction should contain at least some 3D points or some estimated
+ cameras. The minimum number of cameras is two (with no 3D points) and the
+ minimum number of 3D points (with no estimated cameras) is 5.
+
+ \sa ProjectiveResect, ProjectiveIntersect, ProjectiveBundle
+*/
+void ProjectiveCompleteReconstruction(const Tracks &tracks,
+ ProjectiveReconstruction *reconstruction);
+
+
+class CameraIntrinsics;
+
+// TODO(keir): Decide if we want these in the public API, and if so, what the
+// appropriate include file is.
+
+double EuclideanReprojectionError(const Tracks &image_tracks,
+ const EuclideanReconstruction &reconstruction,
+ const CameraIntrinsics &intrinsics);
+
+double ProjectiveReprojectionError(
+ const Tracks &image_tracks,
+ const ProjectiveReconstruction &reconstruction,
+ const CameraIntrinsics &intrinsics);
+
+void InvertIntrinsicsForTracks(const Tracks &raw_tracks,
+ const CameraIntrinsics &camera_intrinsics,
+ Tracks *calibrated_tracks);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_PIPELINE_H_
diff --git a/extern/libmv/libmv/simple_pipeline/reconstruction.cc b/extern/libmv/libmv/simple_pipeline/reconstruction.cc
new file mode 100644
index 00000000000..65e5dd27d5d
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/reconstruction.cc
@@ -0,0 +1,191 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+EuclideanReconstruction::EuclideanReconstruction() {}
+EuclideanReconstruction::EuclideanReconstruction(
+ const EuclideanReconstruction &other) {
+ cameras_ = other.cameras_;
+ points_ = other.points_;
+}
+
+EuclideanReconstruction &EuclideanReconstruction::operator=(
+ const EuclideanReconstruction &other) {
+ if (&other != this) {
+ cameras_ = other.cameras_;
+ points_ = other.points_;
+ }
+ return *this;
+}
+
+void EuclideanReconstruction::InsertCamera(int image,
+ const Mat3 &R,
+ const Vec3 &t) {
+ LG << "InsertCamera " << image << ":\nR:\n"<< R << "\nt:\n" << t;
+ if (image >= cameras_.size()) {
+ cameras_.resize(image + 1);
+ }
+ cameras_[image].image = image;
+ cameras_[image].R = R;
+ cameras_[image].t = t;
+}
+
+void EuclideanReconstruction::InsertPoint(int track, const Vec3 &X) {
+ LG << "InsertPoint " << track << ":\n" << X;
+ if (track >= points_.size()) {
+ points_.resize(track + 1);
+ }
+ points_[track].track = track;
+ points_[track].X = X;
+}
+
+EuclideanCamera *EuclideanReconstruction::CameraForImage(int image) {
+ return const_cast<EuclideanCamera *>(
+ static_cast<const EuclideanReconstruction *>(
+ this)->CameraForImage(image));
+}
+
+const EuclideanCamera *EuclideanReconstruction::CameraForImage(
+ int image) const {
+ if (image < 0 || image >= cameras_.size()) {
+ return NULL;
+ }
+ const EuclideanCamera *camera = &cameras_[image];
+ if (camera->image == -1) {
+ return NULL;
+ }
+ return camera;
+}
+
+vector<EuclideanCamera> EuclideanReconstruction::AllCameras() const {
+ vector<EuclideanCamera> cameras;
+ for (int i = 0; i < cameras_.size(); ++i) {
+ if (cameras_[i].image != -1) {
+ cameras.push_back(cameras_[i]);
+ }
+ }
+ return cameras;
+}
+
+EuclideanPoint *EuclideanReconstruction::PointForTrack(int track) {
+ return const_cast<EuclideanPoint *>(
+ static_cast<const EuclideanReconstruction *>(this)->PointForTrack(track));
+}
+
+const EuclideanPoint *EuclideanReconstruction::PointForTrack(int track) const {
+ if (track < 0 || track >= points_.size()) {
+ return NULL;
+ }
+ const EuclideanPoint *point = &points_[track];
+ if (point->track == -1) {
+ return NULL;
+ }
+ return point;
+}
+
+vector<EuclideanPoint> EuclideanReconstruction::AllPoints() const {
+ vector<EuclideanPoint> points;
+ for (int i = 0; i < points_.size(); ++i) {
+ if (points_[i].track != -1) {
+ points.push_back(points_[i]);
+ }
+ }
+ return points;
+}
+
+void ProjectiveReconstruction::InsertCamera(int image,
+ const Mat34 &P) {
+ LG << "InsertCamera " << image << ":\nP:\n"<< P;
+ if (image >= cameras_.size()) {
+ cameras_.resize(image + 1);
+ }
+ cameras_[image].image = image;
+ cameras_[image].P = P;
+}
+
+void ProjectiveReconstruction::InsertPoint(int track, const Vec4 &X) {
+ LG << "InsertPoint " << track << ":\n" << X;
+ if (track >= points_.size()) {
+ points_.resize(track + 1);
+ }
+ points_[track].track = track;
+ points_[track].X = X;
+}
+
+ProjectiveCamera *ProjectiveReconstruction::CameraForImage(int image) {
+ return const_cast<ProjectiveCamera *>(
+ static_cast<const ProjectiveReconstruction *>(
+ this)->CameraForImage(image));
+}
+
+const ProjectiveCamera *ProjectiveReconstruction::CameraForImage(
+ int image) const {
+ if (image < 0 || image >= cameras_.size()) {
+ return NULL;
+ }
+ const ProjectiveCamera *camera = &cameras_[image];
+ if (camera->image == -1) {
+ return NULL;
+ }
+ return camera;
+}
+
+vector<ProjectiveCamera> ProjectiveReconstruction::AllCameras() const {
+ vector<ProjectiveCamera> cameras;
+ for (int i = 0; i < cameras_.size(); ++i) {
+ if (cameras_[i].image != -1) {
+ cameras.push_back(cameras_[i]);
+ }
+ }
+ return cameras;
+}
+
+ProjectivePoint *ProjectiveReconstruction::PointForTrack(int track) {
+ return const_cast<ProjectivePoint *>(
+ static_cast<const ProjectiveReconstruction *>(this)->PointForTrack(track));
+}
+
+const ProjectivePoint *ProjectiveReconstruction::PointForTrack(int track) const {
+ if (track < 0 || track >= points_.size()) {
+ return NULL;
+ }
+ const ProjectivePoint *point = &points_[track];
+ if (point->track == -1) {
+ return NULL;
+ }
+ return point;
+}
+
+vector<ProjectivePoint> ProjectiveReconstruction::AllPoints() const {
+ vector<ProjectivePoint> points;
+ for (int i = 0; i < points_.size(); ++i) {
+ if (points_[i].track != -1) {
+ points.push_back(points_[i]);
+ }
+ }
+ return points;
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/reconstruction.h b/extern/libmv/libmv/simple_pipeline/reconstruction.h
new file mode 100644
index 00000000000..947a0636476
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/reconstruction.h
@@ -0,0 +1,217 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_H_
+#define LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_H_
+
+#include "libmv/base/vector.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+/*!
+ A EuclideanCamera is the location and rotation of the camera viewing \a image.
+
+ \a image identify which image from \l Tracks this camera represents.
+ \a R is a 3x3 matrix representing the rotation of the camera.
+ \a t is a translation vector representing its positions.
+
+ \sa Reconstruction
+*/
+struct EuclideanCamera {
+ EuclideanCamera() : image(-1) {}
+ EuclideanCamera(const EuclideanCamera &c) : image(c.image), R(c.R), t(c.t) {}
+
+ int image;
+ Mat3 R;
+ Vec3 t;
+};
+
+/*!
+ A Point is the 3D location of a track.
+
+ \a track identify which track from \l Tracks this point corresponds to.
+ \a X represents the 3D position of the track.
+
+ \sa Reconstruction
+*/
+struct EuclideanPoint {
+ EuclideanPoint() : track(-1) {}
+ EuclideanPoint(const EuclideanPoint &p) : track(p.track), X(p.X) {}
+ int track;
+ Vec3 X;
+};
+
+/*!
+ The EuclideanReconstruction class stores \link EuclideanCamera cameras
+ \endlink and \link EuclideanPoint points \endlink.
+
+ The EuclideanReconstruction container is intended as the store of 3D
+ reconstruction data to be used with the MultiView API.
+
+ The container has lookups to query a \a EuclideanCamera from an \a image or
+ a \a EuclideanPoint from a \a track.
+
+ \sa Camera, Point
+*/
+class EuclideanReconstruction {
+ public:
+ // Default constructor starts with no cameras.
+ EuclideanReconstruction();
+
+ /// Copy constructor.
+ EuclideanReconstruction(const EuclideanReconstruction &other);
+
+ EuclideanReconstruction &operator=(const EuclideanReconstruction &other);
+
+ /*!
+ Insert a camera into the set. If there is already a camera for the given
+ \a image, the existing camera is replaced. If there is no camera for the
+ given \a image, a new one is added.
+
+ \a image is the key used to retrieve the cameras with the other methods
+ in this class.
+
+ \note You should use the same \a image identifier as in \l Tracks.
+ */
+ void InsertCamera(int image, const Mat3 &R, const Vec3 &t);
+
+ /*!
+ Insert a point into the reconstruction. If there is already a point for
+ the given \a track, the existing point is replaced. If there is no point
+ for the given \a track, a new one is added.
+
+ \a track is the key used to retrieve the points with the
+ other methods in this class.
+
+ \note You should use the same \a track identifier as in \l Tracks.
+ */
+ void InsertPoint(int track, const Vec3 &X);
+
+ /// Returns a pointer to the camera corresponding to \a image.
+ EuclideanCamera *CameraForImage(int image);
+ const EuclideanCamera *CameraForImage(int image) const;
+
+ /// Returns all cameras.
+ vector<EuclideanCamera> AllCameras() const;
+
+ /// Returns a pointer to the point corresponding to \a track.
+ EuclideanPoint *PointForTrack(int track);
+ const EuclideanPoint *PointForTrack(int track) const;
+
+ /// Returns all points.
+ vector<EuclideanPoint> AllPoints() const;
+
+ private:
+ vector<EuclideanCamera> cameras_;
+ vector<EuclideanPoint> points_;
+};
+
+/*!
+ A ProjectiveCamera is the projection matrix for the camera of \a image.
+
+ \a image identify which image from \l Tracks this camera represents.
+ \a P is the 3x4 projection matrix.
+
+ \sa ProjectiveReconstruction
+*/
+struct ProjectiveCamera {
+ ProjectiveCamera() : image(-1) {}
+ ProjectiveCamera(const ProjectiveCamera &c) : image(c.image), P(c.P) {}
+
+ int image;
+ Mat34 P;
+};
+
+/*!
+ A Point is the 3D location of a track.
+
+ \a track identifies which track from \l Tracks this point corresponds to.
+ \a X is the homogeneous 3D position of the track.
+
+ \sa Reconstruction
+*/
+struct ProjectivePoint {
+ ProjectivePoint() : track(-1) {}
+ ProjectivePoint(const ProjectivePoint &p) : track(p.track), X(p.X) {}
+ int track;
+ Vec4 X;
+};
+
+/*!
+ The ProjectiveReconstruction class stores \link ProjectiveCamera cameras
+ \endlink and \link ProjectivePoint points \endlink.
+
+ The ProjectiveReconstruction container is intended as the store of 3D
+ reconstruction data to be used with the MultiView API.
+
+ The container has lookups to query a \a ProjectiveCamera from an \a image or
+ a \a ProjectivePoint from a \a track.
+
+ \sa Camera, Point
+*/
+class ProjectiveReconstruction {
+ public:
+ /*!
+ Insert a camera into the set. If there is already a camera for the given
+ \a image, the existing camera is replaced. If there is no camera for the
+ given \a image, a new one is added.
+
+ \a image is the key used to retrieve the cameras with the other methods
+ in this class.
+
+ \note You should use the same \a image identifier as in \l Tracks.
+ */
+ void InsertCamera(int image, const Mat34 &P);
+
+ /*!
+ Insert a point into the reconstruction. If there is already a point for
+ the given \a track, the existing point is replaced. If there is no point
+ for the given \a track, a new one is added.
+
+ \a track is the key used to retrieve the points with the
+ other methods in this class.
+
+ \note You should use the same \a track identifier as in \l Tracks.
+ */
+ void InsertPoint(int track, const Vec4 &X);
+
+ /// Returns a pointer to the camera corresponding to \a image.
+ ProjectiveCamera *CameraForImage(int image);
+ const ProjectiveCamera *CameraForImage(int image) const;
+
+ /// Returns all cameras.
+ vector<ProjectiveCamera> AllCameras() const;
+
+ /// Returns a pointer to the point corresponding to \a track.
+ ProjectivePoint *PointForTrack(int track);
+ const ProjectivePoint *PointForTrack(int track) const;
+
+ /// Returns all points.
+ vector<ProjectivePoint> AllPoints() const;
+
+ private:
+ vector<ProjectiveCamera> cameras_;
+ vector<ProjectivePoint> points_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_H_
diff --git a/extern/libmv/libmv/simple_pipeline/resect.cc b/extern/libmv/libmv/simple_pipeline/resect.cc
new file mode 100644
index 00000000000..6e71c3c7206
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/resect.cc
@@ -0,0 +1,271 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <cstdio>
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/euclidean_resection.h"
+#include "libmv/multiview/resection.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/levenberg_marquardt.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
+
+namespace libmv {
+namespace {
+
+Mat2X PointMatrixFromMarkers(const vector<Marker> &markers) {
+ Mat2X points(2, markers.size());
+ for (int i = 0; i < markers.size(); ++i) {
+ points(0, i) = markers[i].x;
+ points(1, i) = markers[i].y;
+ }
+ return points;
+}
+
+Mat3 RotationFromEulerVector(Vec3 euler_vector) {
+ double theta = euler_vector.norm();
+ if (theta == 0.0) {
+ return Mat3::Identity();
+ }
+ Vec3 w = euler_vector / theta;
+ Mat3 w_hat = CrossProductMatrix(w);
+ return Mat3::Identity() + w_hat*sin(theta) + w_hat*w_hat*(1 - cos(theta));
+}
+
+// Uses an incremental rotation:
+//
+// x = R' * R * X + t;
+//
+// to avoid issues with the rotation representation. R' is derived from a
+// euler vector encoding the rotation in 3 parameters; the direction is the
+// axis to rotate around and the magnitude is the amount of the rotation.
+struct EuclideanResectCostFunction {
+ public:
+ typedef Vec FMatrixType;
+ typedef Vec6 XMatrixType;
+
+ EuclideanResectCostFunction(const vector<Marker> &markers,
+ const EuclideanReconstruction &reconstruction,
+ const Mat3 initial_R)
+ : markers(markers),
+ reconstruction(reconstruction),
+ initial_R(initial_R) {}
+
+ // dRt has dR (delta R) encoded as a euler vector in the first 3 parameters,
+ // followed by t in the next 3 parameters.
+ Vec operator()(const Vec6 &dRt) const {
+ // Unpack R, t from dRt.
+ Mat3 R = RotationFromEulerVector(dRt.head<3>()) * initial_R;
+ Vec3 t = dRt.tail<3>();
+
+ // Compute the reprojection error for each coordinate.
+ Vec residuals(2 * markers.size());
+ residuals.setZero();
+ for (int i = 0; i < markers.size(); ++i) {
+ const EuclideanPoint &point =
+ *reconstruction.PointForTrack(markers[i].track);
+ Vec3 projected = R * point.X + t;
+ projected /= projected(2);
+ residuals[2*i + 0] = projected(0) - markers[i].x;
+ residuals[2*i + 1] = projected(1) - markers[i].y;
+ }
+ return residuals;
+ }
+
+ const vector<Marker> &markers;
+ const EuclideanReconstruction &reconstruction;
+ const Mat3 &initial_R;
+};
+
+} // namespace
+
+bool EuclideanResect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction, bool final_pass) {
+ if (markers.size() < 5) {
+ return false;
+ }
+ Mat2X points_2d = PointMatrixFromMarkers(markers);
+ Mat3X points_3d(3, markers.size());
+ for (int i = 0; i < markers.size(); i++) {
+ points_3d.col(i) = reconstruction->PointForTrack(markers[i].track)->X;
+ }
+ LG << "Points for resect:\n" << points_2d;
+
+ Mat3 R;
+ Vec3 t;
+ if (0 || !euclidean_resection::EuclideanResection(points_2d, points_3d, &R, &t)) {
+ // printf("Resection for image %d failed\n", markers[0].image);
+ LG << "Resection for image " << markers[0].image << " failed;"
+ << " trying fallback projective resection.";
+ if (!final_pass) return false;
+ // Euclidean resection failed. Fall back to projective resection, which is
+ // less reliable but better conditioned when there are many points.
+ Mat34 P;
+ Mat4X points_3d_homogeneous(4, markers.size());
+ for (int i = 0; i < markers.size(); i++) {
+ points_3d_homogeneous.col(i).head<3>() = points_3d.col(i);
+ points_3d_homogeneous(3, i) = 1.0;
+ }
+ resection::Resection(points_2d, points_3d_homogeneous, &P);
+ if ((P * points_3d_homogeneous.col(0))(2) < 0) {
+ LG << "Point behind camera; switch sign.";
+ P = -P;
+ }
+
+ Mat3 ignored;
+ KRt_From_P(P, &ignored, &R, &t);
+
+ // The R matrix should be a rotation, but don't rely on it.
+ Eigen::JacobiSVD<Mat3> svd(R, Eigen::ComputeFullU | Eigen::ComputeFullV);
+
+ LG << "Resection rotation is: " << svd.singularValues().transpose();
+ LG << "Determinant is: " << R.determinant();
+
+ // Correct to make R a rotation.
+ R = svd.matrixU() * svd.matrixV().transpose();
+
+ Vec3 xx = R * points_3d.col(0) + t;
+ if (xx(2) < 0.0) {
+ LG << "Final point is still behind camera...";
+ }
+ // XXX Need to check if error is horrible and fail here too in that case.
+ }
+
+ // Refine the result.
+ typedef LevenbergMarquardt<EuclideanResectCostFunction> Solver;
+
+ // Give the cost our initial guess for R.
+ EuclideanResectCostFunction resect_cost(markers, *reconstruction, R);
+
+ // Encode the initial parameters: start with zero delta rotation, and the
+ // guess for t obtained from resection.
+ Vec6 dRt = Vec6::Zero();
+ dRt.tail<3>() = t;
+
+ Solver solver(resect_cost);
+
+ Solver::SolverParameters params;
+ Solver::Results results = solver.minimize(params, &dRt);
+ LG << "LM found incremental rotation: " << dRt.head<3>().transpose();
+ // TODO(keir): Check results to ensure clean termination.
+
+ // Unpack the rotation and translation.
+ R = RotationFromEulerVector(dRt.head<3>()) * R;
+ t = dRt.tail<3>();
+
+ LG << "Resection for image " << markers[0].image << " got:\n"
+ << "R:\n" << R << "\nt:\n" << t;
+ reconstruction->InsertCamera(markers[0].image, R, t);
+ return true;
+}
+
+namespace {
+
+// Directly parameterize the projection matrix, P, which is a 12 parameter
+// homogeneous entry. In theory P should be parameterized with only 11
+// parametetrs, but in practice it works fine to let the extra degree of
+// freedom drift.
+struct ProjectiveResectCostFunction {
+ public:
+ typedef Vec FMatrixType;
+ typedef Vec12 XMatrixType;
+
+ ProjectiveResectCostFunction(const vector<Marker> &markers,
+ const ProjectiveReconstruction &reconstruction)
+ : markers(markers),
+ reconstruction(reconstruction) {}
+
+ Vec operator()(const Vec12 &vector_P) const {
+ // Unpack P from vector_P.
+ Map<const Mat34> P(vector_P.data(), 3, 4);
+
+ // Compute the reprojection error for each coordinate.
+ Vec residuals(2 * markers.size());
+ residuals.setZero();
+ for (int i = 0; i < markers.size(); ++i) {
+ const ProjectivePoint &point =
+ *reconstruction.PointForTrack(markers[i].track);
+ Vec3 projected = P * point.X;
+ projected /= projected(2);
+ residuals[2*i + 0] = projected(0) - markers[i].x;
+ residuals[2*i + 1] = projected(1) - markers[i].y;
+ }
+ return residuals;
+ }
+
+ const vector<Marker> &markers;
+ const ProjectiveReconstruction &reconstruction;
+};
+
+} // namespace
+
+bool ProjectiveResect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction) {
+ if (markers.size() < 5) {
+ return false;
+ }
+
+ // Stack the homogeneous 3D points as the columns of a matrix.
+ Mat2X points_2d = PointMatrixFromMarkers(markers);
+ Mat4X points_3d_homogeneous(4, markers.size());
+ for (int i = 0; i < markers.size(); i++) {
+ points_3d_homogeneous.col(i) =
+ reconstruction->PointForTrack(markers[i].track)->X;
+ }
+ LG << "Points for resect:\n" << points_2d;
+
+ // Resection the point.
+ Mat34 P;
+ resection::Resection(points_2d, points_3d_homogeneous, &P);
+
+ // Flip the sign of P if necessary to keep the point in front of the camera.
+ if ((P * points_3d_homogeneous.col(0))(2) < 0) {
+ LG << "Point behind camera; switch sign.";
+ P = -P;
+ }
+
+ // TODO(keir): Check if error is horrible and fail in that case.
+
+ // Refine the resulting projection matrix using geometric error.
+ typedef LevenbergMarquardt<ProjectiveResectCostFunction> Solver;
+
+ ProjectiveResectCostFunction resect_cost(markers, *reconstruction);
+
+ // Pack the initial P matrix into a size-12 vector..
+ Vec12 vector_P = Map<Vec12>(P.data());
+
+ Solver solver(resect_cost);
+
+ Solver::SolverParameters params;
+ Solver::Results results = solver.minimize(params, &vector_P);
+ // TODO(keir): Check results to ensure clean termination.
+
+ // Unpack the projection matrix.
+ P = Map<Mat34>(vector_P.data(), 3, 4);
+
+ LG << "Resection for image " << markers[0].image << " got:\n"
+ << "P:\n" << P;
+ reconstruction->InsertCamera(markers[0].image, P);
+ return true;
+}
+} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/resect.h b/extern/libmv/libmv/simple_pipeline/resect.h
new file mode 100644
index 00000000000..f8b5b9f68ee
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/resect.h
@@ -0,0 +1,86 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_RESECT_H
+#define LIBMV_SIMPLE_PIPELINE_RESECT_H
+
+#include "libmv/base/vector.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+
+namespace libmv {
+
+/*!
+ Estimate the Euclidean pose of a camera from 2D to 3D correspondences.
+
+ This takes a set of markers visible in one frame (which is the one to
+ resection), such that the markers are also reconstructed in 3D in the
+ reconstruction object, and solves for the pose and orientation of the
+ camera for that frame.
+
+ \a markers should contain \l Marker markers \endlink belonging to tracks
+ visible in the one frame to be resectioned. Each of the tracks associated
+ with the markers must have a corresponding reconstructed 3D position in the
+ \a *reconstruction object.
+
+ \a *reconstruction should contain the 3D points associated with the tracks
+ for the markers present in \a markers.
+
+ \note This assumes a calibrated reconstruction, e.g. the markers are
+ already corrected for camera intrinsics and radial distortion.
+ \note This assumes an outlier-free set of markers.
+
+ \return True if the resection was successful, false otherwise.
+
+ \sa EuclideanIntersect, EuclideanReconstructTwoFrames
+*/
+bool EuclideanResect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction, bool final_pass);
+
+/*!
+ Estimate the projective pose of a camera from 2D to 3D correspondences.
+
+ This takes a set of markers visible in one frame (which is the one to
+ resection), such that the markers are also reconstructed in a projective
+ frame in the reconstruction object, and solves for the projective matrix of
+ the camera for that frame.
+
+ \a markers should contain \l Marker markers \endlink belonging to tracks
+ visible in the one frame to be resectioned. Each of the tracks associated
+ with the markers must have a corresponding reconstructed homogeneous 3D
+ position in the \a *reconstruction object.
+
+ \a *reconstruction should contain the homogeneous 3D points associated with
+ the tracks for the markers present in \a markers.
+
+ \note This assumes radial distortion has already been corrected, but
+ otherwise works for uncalibrated sequences.
+ \note This assumes an outlier-free set of markers.
+
+ \return True if the resection was successful, false otherwise.
+
+ \sa ProjectiveIntersect, ProjectiveReconstructTwoFrames
+*/
+bool ProjectiveResect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_RESECT_H
diff --git a/extern/libmv/libmv/simple_pipeline/tracks.cc b/extern/libmv/libmv/simple_pipeline/tracks.cc
new file mode 100644
index 00000000000..3fb8ddbe513
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/tracks.cc
@@ -0,0 +1,159 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <algorithm>
+#include <vector>
+#include <iterator>
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/simple_pipeline/tracks.h"
+
+namespace libmv {
+
+Tracks::Tracks(const Tracks &other) {
+ markers_ = other.markers_;
+}
+
+Tracks::Tracks(const vector<Marker> &markers) : markers_(markers) {}
+
+void Tracks::Insert(int image, int track, double x, double y) {
+ // TODO(keir): Wow, this is quadratic for repeated insertions. Fix this by
+ // adding a smarter data structure like a set<>.
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].image == image &&
+ markers_[i].track == track) {
+ markers_[i].x = x;
+ markers_[i].y = y;
+ return;
+ }
+ }
+ Marker marker = { image, track, x, y };
+ markers_.push_back(marker);
+}
+
+vector<Marker> Tracks::AllMarkers() const {
+ return markers_;
+}
+
+vector<Marker> Tracks::MarkersInImage(int image) const {
+ vector<Marker> markers;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (image == markers_[i].image) {
+ markers.push_back(markers_[i]);
+ }
+ }
+ return markers;
+}
+
+vector<Marker> Tracks::MarkersForTrack(int track) const {
+ vector<Marker> markers;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (track == markers_[i].track) {
+ markers.push_back(markers_[i]);
+ }
+ }
+ return markers;
+}
+
+vector<Marker> Tracks::MarkersForTracksInBothImages(int image1, int image2) const {
+ std::vector<int> image1_tracks;
+ std::vector<int> image2_tracks;
+
+ for (int i = 0; i < markers_.size(); ++i) {
+ int image = markers_[i].image;
+ if (image == image1) {
+ image1_tracks.push_back(markers_[i].track);
+ } else if (image == image2) {
+ image2_tracks.push_back(markers_[i].track);
+ }
+ }
+
+ std::sort(image1_tracks.begin(), image1_tracks.end());
+ std::sort(image2_tracks.begin(), image2_tracks.end());
+
+ std::vector<int> intersection;
+ std::set_intersection(image1_tracks.begin(), image1_tracks.end(),
+ image2_tracks.begin(), image2_tracks.end(),
+ std::back_inserter(intersection));
+
+ vector<Marker> markers;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if ((markers_[i].image == image1 || markers_[i].image == image2) &&
+ std::binary_search(intersection.begin(),intersection.end(),
+ markers_[i].track)) {
+ markers.push_back(markers_[i]);
+ }
+ }
+ return markers;
+}
+
+Marker Tracks::MarkerInImageForTrack(int image, int track) const {
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].image == image && markers_[i].track == track) {
+ return markers_[i];
+ }
+ }
+ Marker null = { -1, -1, -1, -1 };
+ return null;
+}
+
+void Tracks::RemoveMarkersForTrack(int track) {
+ int size = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].track != track) {
+ markers_[size++] = markers_[i];
+ }
+ }
+ markers_.resize(size);
+}
+
+void Tracks::RemoveMarker(int image, int track) {
+ int size = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].image != image || markers_[i].track != track) {
+ markers_[size++] = markers_[i];
+ }
+ }
+ markers_.resize(size);
+}
+
+int Tracks::MaxImage() const {
+ // TODO(MatthiasF): maintain a max_image_ member (updated on Insert)
+ int max_image = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ max_image = std::max(markers_[i].image, max_image);
+ }
+ return max_image;
+}
+
+int Tracks::MaxTrack() const {
+ // TODO(MatthiasF): maintain a max_track_ member (updated on Insert)
+ int max_track = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ max_track = std::max(markers_[i].track, max_track);
+ }
+ return max_track;
+}
+
+int Tracks::NumMarkers() const {
+ return markers_.size();
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/tracks.h b/extern/libmv/libmv/simple_pipeline/tracks.h
new file mode 100644
index 00000000000..739c3c4f243
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/tracks.h
@@ -0,0 +1,119 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_TRACKS_H_
+#define LIBMV_SIMPLE_PIPELINE_TRACKS_H_
+
+#include "libmv/base/vector.h"
+
+namespace libmv {
+
+/*!
+ A Marker is the 2D location of a tracked point in an image.
+
+ \a x, \a y is the position of the marker in pixels from the top left corner
+ in the image identified by \a image. All markers for to the same target
+ form a track identified by a common \a track number.
+
+ \note Markers are typically aggregated with the help of the \l Tracks class.
+
+ \sa Tracks
+*/
+struct Marker {
+ int image;
+ int track;
+ double x, y;
+};
+
+/*!
+ The Tracks class stores \link Marker reconstruction markers \endlink.
+
+ The Tracks container is intended as the store of correspondences between
+ images, which must get created before any 3D reconstruction can take place.
+
+ The container has several fast lookups for queries typically needed for
+ structure from motion algorithms, such as \l MarkersForTracksInBothImages().
+
+ \sa Marker
+*/
+class Tracks {
+ public:
+ Tracks() {}
+
+ // Copy constructor for a tracks object.
+ Tracks(const Tracks &other);
+
+ /// Construct a new tracks object using the given markers to start.
+ Tracks(const vector<Marker> &markers);
+
+ /*!
+ Inserts a marker into the set. If there is already a marker for the given
+ \a image and \a track, the existing marker is replaced. If there is no
+ marker for the given \a image and \a track, a new one is added.
+
+ \a image and \a track are the keys used to retrieve the markers with the
+ other methods in this class.
+
+ \note To get an identifier for a new track, use \l MaxTrack() + 1.
+ */
+ void Insert(int image, int track, double x, double y);
+
+ /// Returns all the markers.
+ vector<Marker> AllMarkers() const;
+
+ /// Returns all the markers belonging to a track.
+ vector<Marker> MarkersForTrack(int track) const;
+
+ /// Returns all the markers visible in \a image.
+ vector<Marker> MarkersInImage(int image) const;
+
+ /*!
+ Returns the markers in \a image1 and \a image2 which have a common track.
+
+ This is not the same as the union of the markers in \a image1 and \a
+ image2; each marker is for a track that appears in both images.
+ */
+ vector<Marker> MarkersForTracksInBothImages(int image1, int image2) const;
+
+ /// Returns the marker in \a image belonging to \a track.
+ Marker MarkerInImageForTrack(int image, int track) const;
+
+ /// Removes all the markers belonging to \a track.
+ void RemoveMarkersForTrack(int track);
+
+ /// Removes the marker in \a image belonging to \a track.
+ void RemoveMarker(int image, int track);
+
+ /// Returns the maximum image identifier used.
+ int MaxImage() const;
+
+ /// Returns the maximum track identifier used.
+ int MaxTrack() const;
+
+ /// Returns the number of markers.
+ int NumMarkers() const;
+
+ private:
+ vector<Marker> markers_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_MARKERS_H_
diff --git a/extern/libmv/libmv/tracking/klt_region_tracker.cc b/extern/libmv/libmv/tracking/klt_region_tracker.cc
new file mode 100644
index 00000000000..299077be155
--- /dev/null
+++ b/extern/libmv/libmv/tracking/klt_region_tracker.cc
@@ -0,0 +1,132 @@
+// Copyright (c) 2007, 2008, 2009, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/logging/logging.h"
+#include "libmv/tracking/klt_region_tracker.h"
+#include "libmv/image/image.h"
+#include "libmv/image/convolve.h"
+#include "libmv/image/sample.h"
+
+namespace libmv {
+
+// Compute the gradient matrix noted by Z and the error vector e. See Good
+// Features to Track.
+//
+// TODO(keir): The calls to SampleLinear() do boundary checking that should
+// instead happen outside the loop. Since this is the innermost loop, the extra
+// bounds checking hurts performance.
+static void ComputeTrackingEquation(const Array3Df &image_and_gradient1,
+ const Array3Df &image_and_gradient2,
+ double x1, double y1,
+ double x2, double y2,
+ int half_width,
+ float *gxx,
+ float *gxy,
+ float *gyy,
+ float *ex,
+ float *ey) {
+ *gxx = *gxy = *gyy = 0;
+ *ex = *ey = 0;
+ for (int r = -half_width; r <= half_width; ++r) {
+ for (int c = -half_width; c <= half_width; ++c) {
+ float xx1 = x1 + c;
+ float yy1 = y1 + r;
+ float xx2 = x2 + c;
+ float yy2 = y2 + r;
+ float I = SampleLinear(image_and_gradient1, yy1, xx1, 0);
+ float J = SampleLinear(image_and_gradient2, yy2, xx2, 0);
+ float gx = SampleLinear(image_and_gradient2, yy2, xx2, 1);
+ float gy = SampleLinear(image_and_gradient2, yy2, xx2, 2);
+ *gxx += gx * gx;
+ *gxy += gx * gy;
+ *gyy += gy * gy;
+ *ex += (I - J) * gx;
+ *ey += (I - J) * gy;
+ }
+ }
+}
+
+// Solve the tracking equation
+//
+// [gxx gxy] [dx] = [ex]
+// [gxy gyy] [dy] = [ey]
+//
+// for dx and dy. Borrowed from Stan Birchfield's KLT implementation.
+static bool SolveTrackingEquation(float gxx, float gxy, float gyy,
+ float ex, float ey,
+ float min_determinant,
+ float *dx, float *dy) {
+ float det = gxx * gyy - gxy * gxy;
+ if (det < min_determinant) {
+ *dx = 0;
+ *dy = 0;
+ return false;
+ }
+ *dx = (gyy * ex - gxy * ey) / det;
+ *dy = (gxx * ey - gxy * ex) / det;
+ return true;
+}
+
+bool KltRegionTracker::Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const {
+ Array3Df image_and_gradient1;
+ Array3Df image_and_gradient2;
+ BlurredImageAndDerivativesChannels(image1, sigma, &image_and_gradient1);
+ BlurredImageAndDerivativesChannels(image2, sigma, &image_and_gradient2);
+
+ int i;
+ float dx = 0, dy = 0;
+ for (i = 0; i < max_iterations; ++i) {
+ // Compute gradient matrix and error vector.
+ float gxx, gxy, gyy, ex, ey;
+ ComputeTrackingEquation(image_and_gradient1,
+ image_and_gradient2,
+ x1, y1,
+ *x2, *y2,
+ half_window_size,
+ &gxx, &gxy, &gyy, &ex, &ey);
+
+ // Solve the linear system for the best update to x2 and y2.
+ if (!SolveTrackingEquation(gxx, gxy, gyy, ex, ey, min_determinant,
+ &dx, &dy)) {
+ // The determinant, which indicates the trackiness of the point, is too
+ // small, so fail out.
+ LG << "Determinant too small; failing tracking.";
+ return false;
+ }
+
+ // Update the position with the solved displacement.
+ *x2 += dx;
+ *y2 += dy;
+
+ // If the update is small, then we probably found the target.
+ if (dx * dx + dy * dy < min_update_squared_distance) {
+ LG << "Successful track in " << i << " iterations.";
+ return true;
+ }
+ }
+ // Getting here means we hit max iterations, so tracking failed.
+ LG << "Too many iterations.";
+ return false;
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/tracking/klt_region_tracker.h b/extern/libmv/libmv/tracking/klt_region_tracker.h
new file mode 100644
index 00000000000..2b2d8a9a49d
--- /dev/null
+++ b/extern/libmv/libmv/tracking/klt_region_tracker.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2007, 2008, 2009, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_REGION_TRACKING_KLT_REGION_TRACKER_H_
+#define LIBMV_REGION_TRACKING_KLT_REGION_TRACKER_H_
+
+#include "libmv/image/image.h"
+#include "libmv/tracking/region_tracker.h"
+
+namespace libmv {
+
+struct KltRegionTracker : public RegionTracker {
+ KltRegionTracker()
+ : half_window_size(4),
+ max_iterations(16),
+ min_determinant(1e-6),
+ min_update_squared_distance(1e-6),
+ sigma(0.9) {}
+
+ virtual ~KltRegionTracker() {}
+
+ // Tracker interface.
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const;
+
+ // No point in creating getters or setters.
+ int half_window_size;
+ int max_iterations;
+ double min_determinant;
+ double min_update_squared_distance;
+ double sigma;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_REGION_TRACKING_KLT_REGION_TRACKER_H_
diff --git a/extern/libmv/libmv/tracking/pyramid_region_tracker.cc b/extern/libmv/libmv/tracking/pyramid_region_tracker.cc
new file mode 100644
index 00000000000..30e2839c5b1
--- /dev/null
+++ b/extern/libmv/libmv/tracking/pyramid_region_tracker.cc
@@ -0,0 +1,78 @@
+// Copyright (c) 2007, 2008, 2009, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <vector>
+
+#include "libmv/image/convolve.h"
+#include "libmv/image/image.h"
+#include "libmv/image/sample.h"
+#include "libmv/tracking/pyramid_region_tracker.h"
+
+namespace libmv {
+
+static void MakePyramid(const FloatImage &image, int num_levels,
+ std::vector<FloatImage> *pyramid) {
+ pyramid->resize(num_levels);
+ (*pyramid)[0] = image;
+ for (int i = 1; i < num_levels; ++i) {
+ DownsampleChannelsBy2((*pyramid)[i - 1], &(*pyramid)[i]);
+ }
+}
+
+bool PyramidRegionTracker::Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const {
+ // Shrink the guessed x and y location to match the coarsest level + 1 (which
+ // when gets corrected in the loop).
+ *x2 /= pow(2., num_levels_);
+ *y2 /= pow(2., num_levels_);
+
+ // Create all the levels of the pyramid, since tracking has to happen from
+ // the coarsest to finest levels, which means holding on to all levels of the
+ // pyraid at once.
+ std::vector<FloatImage> pyramid1(num_levels_);
+ std::vector<FloatImage> pyramid2(num_levels_);
+ MakePyramid(image1, num_levels_, &pyramid1);
+ MakePyramid(image2, num_levels_, &pyramid2);
+
+ for (int i = num_levels_ - 1; i >= 0; --i) {
+ // Position in the first image at pyramid level i.
+ double xx = x1 / pow(2., i);
+ double yy = y1 / pow(2., i);
+
+ // Guess the new tracked position is where the last level tracked to.
+ *x2 *= 2;
+ *y2 *= 2;
+
+ // Track the point on this level with the base tracker.
+ bool succeeded = tracker_->Track(pyramid1[i], pyramid2[i], xx, yy, x2, y2);
+
+ if (i == 0 && !succeeded) {
+ // Only fail on the highest-resolution level, because a failure on a
+ // coarse level does not mean failure at a lower level (consider
+ // out-of-bounds conditions).
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/tracking/pyramid_region_tracker.h b/extern/libmv/libmv/tracking/pyramid_region_tracker.h
new file mode 100644
index 00000000000..1f9675469f4
--- /dev/null
+++ b/extern/libmv/libmv/tracking/pyramid_region_tracker.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_CORRESPONDENCE_PYRAMID_TRACKER_H_
+#define LIBMV_CORRESPONDENCE_PYRAMID_TRACKER_H_
+
+#include "libmv/image/image.h"
+#include "libmv/base/scoped_ptr.h"
+#include "libmv/tracking/region_tracker.h"
+
+namespace libmv {
+
+class PyramidRegionTracker : public RegionTracker {
+ public:
+ PyramidRegionTracker(RegionTracker *tracker, int num_levels)
+ : tracker_(tracker), num_levels_(num_levels) {}
+
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const;
+ private:
+ scoped_ptr<RegionTracker> tracker_;
+ int num_levels_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_CORRESPONDENCE_PYRAMID_TRACKER_H_
diff --git a/extern/libmv/libmv/tracking/region_tracker.h b/extern/libmv/libmv/tracking/region_tracker.h
new file mode 100644
index 00000000000..4f7574df1a3
--- /dev/null
+++ b/extern/libmv/libmv/tracking/region_tracker.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_TRACKING_TRACKER_H_
+#define LIBMV_TRACKING_TRACKER_H_
+
+#include "libmv/image/image.h"
+
+namespace libmv {
+
+class RegionTracker {
+ public:
+ RegionTracker() {}
+ virtual ~RegionTracker() {}
+
+ /*!
+ Track a point from \a image1 to \a image2.
+
+ \a x2, \a y2 should start out as a best guess for the position in \a
+ image2. If no guess is available, (\a x1, \a y1) is a good start. Returns
+ true on success, false otherwise
+ */
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const = 0;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_CORRESPONDENCE_TRACKER_H_
diff --git a/extern/libmv/libmv/tracking/retrack_region_tracker.cc b/extern/libmv/libmv/tracking/retrack_region_tracker.cc
new file mode 100644
index 00000000000..b3230b1b173
--- /dev/null
+++ b/extern/libmv/libmv/tracking/retrack_region_tracker.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <cmath>
+#include <vector>
+
+#include "libmv/tracking/retrack_region_tracker.h"
+
+namespace libmv {
+
+bool RetrackRegionTracker::Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const {
+ // Track forward, getting x2 and y2.
+ if (!tracker_->Track(image1, image2, x1, y1, x2, y2)) {
+ return false;
+ }
+ // Now track x2 and y2 backward, to get xx1 and yy1 which, if the track is
+ // good, should match x1 and y1 (but may not if the track is bad).
+ double xx1 = *x2, yy1 = *x2;
+ if (!tracker_->Track(image2, image1, *x2, *y2, &xx1, &yy1)) {
+ return false;
+ }
+ double dx = xx1 - x1;
+ double dy = yy1 - y1;
+ return sqrt(dx * dx + dy * dy) < tolerance_;
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/tracking/retrack_region_tracker.h b/extern/libmv/libmv/tracking/retrack_region_tracker.h
new file mode 100644
index 00000000000..ab05f320834
--- /dev/null
+++ b/extern/libmv/libmv/tracking/retrack_region_tracker.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_TRACKING_RETRACK_REGION_TRACKER_H_
+#define LIBMV_TRACKING_RETRACK_REGION_TRACKER_H_
+
+#include "libmv/image/image.h"
+#include "libmv/base/scoped_ptr.h"
+#include "libmv/tracking/region_tracker.h"
+
+namespace libmv {
+
+// A region tracker that tries tracking backwards and forwards, rejecting a
+// track that doesn't track backwards to the starting point.
+class RetrackRegionTracker : public RegionTracker {
+ public:
+ RetrackRegionTracker(RegionTracker *tracker, double tolerance)
+ : tracker_(tracker), tolerance_(tolerance) {}
+
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const;
+ private:
+ scoped_ptr<RegionTracker> tracker_;
+ double tolerance_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_TRACKING_RETRACK_REGION_TRACKER_H_
diff --git a/extern/libmv/libmv/tracking/sad.cc b/extern/libmv/libmv/tracking/sad.cc
new file mode 100644
index 00000000000..9b446bb4c35
--- /dev/null
+++ b/extern/libmv/libmv/tracking/sad.cc
@@ -0,0 +1,174 @@
+/****************************************************************************
+**
+** Copyright (c) 2011 libmv authors.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to
+** deal in the Software without restriction, including without limitation the
+** rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+** sell copies of the Software, and to permit persons to whom the Software is
+** furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Software.
+**
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+** IN THE SOFTWARE.
+**
+****************************************************************************/
+
+#include "libmv/tracking/sad.h"
+#include <stdlib.h>
+#include <math.h>
+
+namespace libmv {
+
+void LaplaceFilter(ubyte* src, ubyte* dst, int width, int height, int strength) {
+ for(int y=1; y<height-1; y++) for(int x=1; x<width-1; x++) {
+ const ubyte* s = &src[y*width+x];
+ int l = 128 +
+ s[-width-1] + s[-width] + s[-width+1] +
+ s[1] - 8*s[0] + s[1] +
+ s[ width-1] + s[ width] + s[ width+1] ;
+ int d = ((256-strength)*s[0] + strength*l) / 256;
+ if(d < 0) d=0;
+ if(d > 255) d=255;
+ dst[y*width+x] = d;
+ }
+}
+
+struct vec2 {
+ float x,y;
+ inline vec2(float x, float y):x(x),y(y){}
+};
+inline vec2 operator*(mat32 m, vec2 v) {
+ return vec2(v.x*m(0,0)+v.y*m(0,1)+m(0,2),v.x*m(1,0)+v.y*m(1,1)+m(1,2));
+}
+
+//! fixed point bilinear sample with precision k
+template <int k> inline int sample(const ubyte* image,int stride, int x, int y, int u, int v) {
+ const ubyte* s = &image[y*stride+x];
+ return ((s[ 0] * (k-u) + s[ 1] * u) * (k-v)
+ + (s[stride] * (k-u) + s[stride+1] * u) * ( v) ) / (k*k);
+}
+
+#ifdef __SSE__
+#include <xmmintrin.h>
+int lround(float x) { return _mm_cvtss_si32(_mm_set_ss(x)); }
+#elif defined(_MSC_VER)
+int lround(float x) { return x+0.5; }
+#endif
+
+//TODO(MatthiasF): SSE optimization
+void SamplePattern(ubyte* image, int stride, mat32 warp, ubyte* pattern, int size) {
+ const int k = 256;
+ for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) {
+ vec2 p = warp*vec2(j-size/2,i-size/2);
+ int fx = lround(p.x*k), fy = lround(p.y*k);
+ int ix = fx/k, iy = fy/k;
+ int u = fx%k, v = fy%k;
+ pattern[i*size+j] = sample<k>(image,stride,ix,iy,u,v);
+ }
+}
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+static uint SAD(const ubyte* pattern, const ubyte* image, int stride, int size) {
+ __m128i a = _mm_setzero_si128();
+ for(int i = 0; i < size; i++) {
+ for(int j = 0; j < size/16; j++) {
+ a = _mm_adds_epu16(a, _mm_sad_epu8( _mm_loadu_si128((__m128i*)(pattern+i*size+j*16)),
+ _mm_loadu_si128((__m128i*)(image+i*stride+j*16))));
+ }
+ }
+ return _mm_extract_epi16(a,0) + _mm_extract_epi16(a,4);
+}
+#else
+static uint SAD(const ubyte* pattern, const ubyte* image, int stride, int size) {
+ uint sad=0;
+ for(int i = 0; i < size; i++) {
+ for(int j = 0; j < size; j++) {
+ sad += abs((int)pattern[i*size+j] - image[i*stride+j]);
+ }
+ }
+ return sad;
+}
+#endif
+
+float sq(float x) { return x*x; }
+float Track(ubyte* reference, ubyte* warped, int size, ubyte* image, int stride, int w, int h, mat32* warp, float areaPenalty, float conditionPenalty) {
+ mat32 m=*warp;
+ uint min=-1;
+
+ // exhaustive search integer pixel translation
+ int ix = m(0,2), iy = m(1,2);
+ for(int y = size/2; y < h-size/2; y++) {
+ for(int x = size/2; x < w-size/2; x++) {
+ m(0,2) = x, m(1,2) = y;
+ uint sad = SAD(warped,&image[(y-size/2)*stride+(x-size/2)],stride,size);
+ // TODO: using chroma could help disambiguate some cases
+ if(sad < min) {
+ min = sad;
+ ix = x, iy = y;
+ }
+ }
+ }
+ m(0,2) = ix, m(1,2) = iy;
+ min=-1; //reset score since direct warped search match too well (but the wrong pattern).
+
+ // 6D coordinate descent to find affine transform
+ ubyte* match = new ubyte[size*size];
+ float step = 0.5;
+ for(int p = 0; p < 8; p++) { //foreach precision level
+ for(int i = 0; i < 2; i++) { // iterate twice per precision level
+ //TODO: other sweep pattern might converge better
+ for(int d=0; d < 6; d++) { // iterate dimension sequentially (cyclic descent)
+ for(float e = -step; e <= step; e+=step) { //solve subproblem (evaluate only along one coordinate)
+ mat32 t = m;
+ t.data[d] += e;
+ //TODO: better performance would also allow a more exhaustive search
+ SamplePattern(image,stride,t,match,size);
+ uint sad = SAD(reference,match,size,size);
+ // regularization: keep constant area and good condition
+ float area = t(0,0)*t(1,1)-t(0,1)*t(1,0);
+ float x = sq(t(0,0))+sq(t(0,1)), y = sq(t(1,0))+sq(t(1,1));
+ float condition = x>y ? x/y : y/x;
+ sad += size*size*( areaPenalty*sq(area-1) + conditionPenalty*sq(condition-1) );
+ if(sad < min) {
+ min = sad;
+ m = t;
+ }
+ }
+ }
+ }
+ step /= 2;
+ }
+ *warp = m;
+
+ // Compute Pearson product-moment correlation coefficient
+ uint sX=0,sY=0,sXX=0,sYY=0,sXY=0;
+ SamplePattern(image,stride,m,match,size);
+ SAD(reference,match,size,size);
+ for(int i = 0; i < size; i++) {
+ for(int j = 0; j < size; j++) {
+ int x = reference[i*size+j];
+ int y = match[i*size+j];
+ sX += x;
+ sY += y;
+ sXX += x*x;
+ sYY += y*y;
+ sXY += x*y;
+ }
+ }
+ delete[] match;
+ const int N = size*size;
+ sX /= N, sY /= N, sXX /= N, sYY /= N, sXY /= N;
+ return (sXY-sX*sY)/sqrt(double((sXX-sX*sX)*(sYY-sY*sY)));
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/tracking/sad.h b/extern/libmv/libmv/tracking/sad.h
new file mode 100644
index 00000000000..9fe323b74c4
--- /dev/null
+++ b/extern/libmv/libmv/tracking/sad.h
@@ -0,0 +1,109 @@
+/****************************************************************************
+**
+** Copyright (c) 2011 libmv authors.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to
+** deal in the Software without restriction, including without limitation the
+** rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+** sell copies of the Software, and to permit persons to whom the Software is
+** furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Software.
+**
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+** IN THE SOFTWARE.
+**
+****************************************************************************/
+
+#ifndef LIBMV_TRACKING_SAD_H_
+#define LIBMV_TRACKING_SAD_H_
+
+#ifdef __cplusplus
+namespace libmv {
+#endif
+
+typedef unsigned char ubyte;
+typedef unsigned int uint;
+
+/*!
+ Convolve \a src into \a dst with the discrete laplacian operator.
+
+ \a src and \a dst should be \a width x \a height images.
+ \a strength is an interpolation coefficient (0-256) between original image and the laplacian.
+
+ \note Make sure the search region is filtered with the same strength as the pattern.
+*/
+void LaplaceFilter(ubyte* src, ubyte* dst, int width, int height, int strength);
+
+/// Affine transformation matrix in column major order.
+struct mat32 {
+ float data[3*2];
+#ifdef __cplusplus
+ inline mat32(int d=1) { for(int i=0;i<3*2;i++) data[i]=0; if(d!=0) for(int i=0;i<2;i++) m(i,i)=d; }
+ inline float m(int i, int j) const { return data[j*2+i]; }
+ inline float& m(int i, int j) { return data[j*2+i]; }
+ inline float operator()(int i, int j) const { return m(i,j); }
+ inline float& operator()(int i, int j) { return m(i,j); }
+ inline operator bool() const { for (int i=0; i<3*2; i++) if(data[i]!=0) return true; return false; }
+#endif
+};
+
+/*!
+ Sample \a pattern from \a image.
+
+ \a warp is the transformation to apply to \a image when sampling the \a pattern.
+*/
+void SamplePattern(ubyte* image, int stride, mat32 warp, ubyte* pattern, int size);
+
+/*!
+ Track \a pattern in \a image.
+
+ This template matcher computes the
+ \link http://en.wikipedia.org/wiki/Sum_of_absolute_differences Sum of Absolute Differences (SAD) \endlink
+ for each integer pixel position in the search region and then iteratively
+ refine subpixel position using a square search.
+ A similar method is used for motion estimation in video encoders.
+
+ \a reference is the pattern to track.
+ \a warped is a warped version of reference for fast unsampled integer search.
+ Best is to directly extract an already warped pattern from previous frame.
+ The \a size of the patterns should be aligned to 16.
+ \a image is a reference to the region to search.
+ \a stride is size of \a image lines.
+
+ On input, \a warp is the predicted affine transformation (e.g from previous frame)
+ On return, \a warp is the affine transformation which best match the reference \a pattern
+
+ \a areaPenalty and conditionPenalty control the regularization and need to be tweaked depending on the motion.
+ Setting them to 0 will allow any transformation (including unrealistic distortions and scaling).
+ Good values are between 0-32. 16 can be used as a realistic default.
+ areaPenalty control scaling (decrease to allow pull/zoom, increase to allow only 2D rotation).
+ a large conditionPenalty avoid a large ratio between the largest and smallest axices.
+ It need to be decreased for non-2D rotation (when pattern appears to scale along an axis).
+
+ \return Pearson product-moment correlation coefficient between reference and matched pattern.
+ This measure of the linear dependence between the patterns
+ ranges from −1 (negative correlation) to 1 (positive correlation).
+ A value of 0 implies that there is no linear correlation between the variables.
+
+ \note To track affine features:
+ - Sample reference pattern using estimated (e.g previous frame) warp.
+ -
+ \note \a stride allow you to reference your search region instead of copying.
+ \note For a 16x speedup, compile this tracker with SSE2 support.
+*/
+float Track(ubyte* reference, ubyte* warped, int size, ubyte* image, int stride, int width, int height, mat32* warp,
+ float areaPenalty, float conditionPenalty);
+
+#ifdef __cplusplus
+} // namespace libmv
+#endif
+
+#endif // LIBMV_TRACKING_SAD_H_
diff --git a/extern/libmv/libmv/tracking/trklt_region_tracker.cc b/extern/libmv/libmv/tracking/trklt_region_tracker.cc
new file mode 100644
index 00000000000..65aab37bc85
--- /dev/null
+++ b/extern/libmv/libmv/tracking/trklt_region_tracker.cc
@@ -0,0 +1,141 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/tracking/trklt_region_tracker.h"
+
+#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/image/image.h"
+#include "libmv/image/convolve.h"
+#include "libmv/image/sample.h"
+
+namespace libmv {
+
+// Computes U and e from the Ud = e equation (number 14) from the paper.
+static void ComputeTrackingEquation(const Array3Df &image_and_gradient1,
+ const Array3Df &image_and_gradient2,
+ double x1, double y1,
+ double x2, double y2,
+ int half_width,
+ double lambda,
+ Mat2f *U,
+ Vec2f *e) {
+ Mat2f A, B, C, D;
+ A = B = C = D = Mat2f::Zero();
+
+ Vec2f R, S, V, W;
+ R = S = V = W = Vec2f::Zero();
+
+ for (int r = -half_width; r <= half_width; ++r) {
+ for (int c = -half_width; c <= half_width; ++c) {
+ float xx1 = x1 + c;
+ float yy1 = y1 + r;
+ float xx2 = x2 + c;
+ float yy2 = y2 + r;
+
+ float I = SampleLinear(image_and_gradient1, yy1, xx1, 0);
+ float J = SampleLinear(image_and_gradient2, yy2, xx2, 0);
+
+ Vec2f gI, gJ;
+ gI << SampleLinear(image_and_gradient1, yy1, xx1, 1),
+ SampleLinear(image_and_gradient1, yy1, xx1, 2);
+ gJ << SampleLinear(image_and_gradient2, yy2, xx2, 1),
+ SampleLinear(image_and_gradient2, yy2, xx2, 2);
+
+ // Equation 15 from the paper.
+ A += gI * gI.transpose();
+ B += gI * gJ.transpose();
+ C += gJ * gJ.transpose();
+ R += I * gI;
+ S += J * gI;
+ V += I * gJ;
+ W += J * gJ;
+ }
+ }
+
+ // In the paper they show a D matrix, but it is just B transpose, so use that
+ // instead of explicitly computing D.
+ Mat2f Di = B.transpose().inverse();
+
+ // Equation 14 from the paper.
+ *U = A*Di*C + lambda*Di*C - 0.5*B;
+ *e = (A + lambda*Mat2f::Identity())*Di*(V - W) + 0.5*(S - R);
+}
+
+static bool SolveTrackingEquation(const Mat2f &U,
+ const Vec2f &e,
+ float min_determinant,
+ Vec2f *d) {
+ float det = U.determinant();
+ if (det < min_determinant) {
+ d->setZero();
+ return false;
+ }
+ *d = U.lu().solve(e);
+ return true;
+}
+
+bool TrkltRegionTracker::Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const {
+ Array3Df image_and_gradient1;
+ Array3Df image_and_gradient2;
+ BlurredImageAndDerivativesChannels(image1, sigma, &image_and_gradient1);
+ BlurredImageAndDerivativesChannels(image2, sigma, &image_and_gradient2);
+
+ int i;
+ Vec2f d = Vec2f::Zero();
+ for (i = 0; i < max_iterations; ++i) {
+ // Compute gradient matrix and error vector.
+ Mat2f U;
+ Vec2f e;
+ ComputeTrackingEquation(image_and_gradient1,
+ image_and_gradient2,
+ x1, y1,
+ *x2, *y2,
+ half_window_size,
+ lambda,
+ &U, &e);
+
+ // Solve the linear system for the best update to x2 and y2.
+ if (!SolveTrackingEquation(U, e, min_determinant, &d)) {
+ // The determinant, which indicates the trackiness of the point, is too
+ // small, so fail out.
+ LG << "Determinant too small; failing tracking.";
+ return false;
+ }
+
+ // Update the position with the solved displacement.
+ *x2 += d[0];
+ *y2 += d[1];
+
+ // If the update is small, then we probably found the target.
+ if (d.squaredNorm() < min_update_squared_distance) {
+ LG << "Successful track in " << i << " iterations.";
+ return true;
+ }
+ }
+ // Getting here means we hit max iterations, so tracking failed.
+ LG << "Too many iterations.";
+ return false;
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/tracking/trklt_region_tracker.h b/extern/libmv/libmv/tracking/trklt_region_tracker.h
new file mode 100644
index 00000000000..d5052395c55
--- /dev/null
+++ b/extern/libmv/libmv/tracking/trklt_region_tracker.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_REGION_TRACKING_TRKLT_REGION_TRACKER_H_
+#define LIBMV_REGION_TRACKING_TRKLT_REGION_TRACKER_H_
+
+#include "libmv/image/image.h"
+#include "libmv/tracking/region_tracker.h"
+
+namespace libmv {
+
+// An improved KLT algorithm that enforces that the tracking is time-reversible
+// [1]. This is not the same as the "symmetric" KLT that is sometimes used.
+// Anecdotally, this tracks much more consistently than vanilla KLT.
+//
+// [1] H. Wu, R. Chellappa, and A. Sankaranarayanan and S. Kevin Zhou. Robust
+// visual tracking using the time-reversibility constraint. International
+// Conference on Computer Vision (ICCV), Rio de Janeiro, October 2007.
+//
+struct TrkltRegionTracker : public RegionTracker {
+ TrkltRegionTracker()
+ : half_window_size(4),
+ max_iterations(16),
+ min_determinant(1e-6),
+ min_update_squared_distance(1e-6),
+ sigma(0.9),
+ lambda(0.05) {}
+
+ virtual ~TrkltRegionTracker() {}
+
+ // Tracker interface.
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const;
+
+ // No point in creating getters or setters.
+ int half_window_size;
+ int max_iterations;
+ double min_determinant;
+ double min_update_squared_distance;
+ double sigma;
+ double lambda;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_REGION_TRACKING_TRKLT_REGION_TRACKER_H_
diff --git a/extern/libmv/mkfiles.sh b/extern/libmv/mkfiles.sh
new file mode 100755
index 00000000000..6618f2849ea
--- /dev/null
+++ b/extern/libmv/mkfiles.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+find ./libmv/ -type f | sed -r 's/^\.\///' > files.txt
+find ./third_party/ -type f | sed -r 's/^\.\///' >> files.txt
diff --git a/extern/libmv/patches/bundle_tweaks.patch b/extern/libmv/patches/bundle_tweaks.patch
new file mode 100644
index 00000000000..f7b06b0a2dd
--- /dev/null
+++ b/extern/libmv/patches/bundle_tweaks.patch
@@ -0,0 +1,122 @@
+diff --git a/src/libmv/logging/logging.h b/src/libmv/logging/logging.h
+index 067da52..af86c4b 100644
+--- a/src/libmv/logging/logging.h
++++ b/src/libmv/logging/logging.h
+@@ -21,7 +21,7 @@
+ #ifndef LIBMV_LOGGING_LOGGING_H
+ #define LIBMV_LOGGING_LOGGING_H
+
+-#include "third_party/glog/src/glog/logging.h"
++#include "glog/logging.h"
+
+ #define LG LOG(INFO)
+ #define V0 LOG(INFO)
+diff --git a/src/third_party/glog/src/glog/logging.h b/src/third_party/glog/src/glog/logging.h
+index 57615ef..a58d478 100644
+--- a/src/third_party/glog/src/glog/logging.h
++++ b/src/third_party/glog/src/glog/logging.h
+@@ -33,6 +33,7 @@
+ // Pretty much everybody needs to #include this file so that they can
+ // log various happenings.
+ //
++
+ #ifndef _LOGGING_H_
+ #define _LOGGING_H_
+
+diff --git a/src/third_party/glog/src/logging.cc b/src/third_party/glog/src/logging.cc
+index 868898f..1bb3867 100644
+--- a/src/third_party/glog/src/logging.cc
++++ b/src/third_party/glog/src/logging.cc
+@@ -58,8 +58,8 @@
+ #include <errno.h> // for errno
+ #include <sstream>
+ #include "base/commandlineflags.h" // to get the program name
+-#include "glog/logging.h"
+-#include "glog/raw_logging.h"
++#include <glog/logging.h>
++#include <glog/raw_logging.h>
+ #include "base/googleinit.h"
+
+ #ifdef HAVE_STACKTRACE
+@@ -1232,7 +1232,9 @@ void LogMessage::RecordCrashReason(
+ }
+
+ static void logging_fail() {
+-#if defined(_DEBUG) && defined(_MSC_VER)
++// #if defined(_DEBUG) && defined(_MSC_VER)
++// doesn't work for my laptop (sergey)
++#if 0
+ // When debugging on windows, avoid the obnoxious dialog and make
+ // it possible to continue past a LOG(FATAL) in the debugger
+ _asm int 3
+diff --git a/src/third_party/glog/src/raw_logging.cc b/src/third_party/glog/src/raw_logging.cc
+index 50c6a71..b179a1e 100644
+--- a/src/third_party/glog/src/raw_logging.cc
++++ b/src/third_party/glog/src/raw_logging.cc
+@@ -42,8 +42,8 @@
+ #include <fcntl.h> // for open()
+ #include <time.h>
+ #include "config.h"
+-#include "glog/logging.h" // To pick up flag settings etc.
+-#include "glog/raw_logging.h"
++#include <glog/logging.h> // To pick up flag settings etc.
++#include <glog/raw_logging.h>
+ #include "base/commandlineflags.h"
+
+ #ifdef HAVE_STACKTRACE
+diff --git a/src/third_party/glog/src/utilities.h b/src/third_party/glog/src/utilities.h
+index ee54f94..2d4e99e 100644
+--- a/src/third_party/glog/src/utilities.h
++++ b/src/third_party/glog/src/utilities.h
+@@ -79,7 +79,7 @@
+ #endif
+
+ #include "config.h"
+-#include "glog/logging.h"
++#include <glog/logging.h>
+
+ // There are three different ways we can try to get the stack trace:
+ //
+diff --git a/src/third_party/glog/src/vlog_is_on.cc b/src/third_party/glog/src/vlog_is_on.cc
+index ee0e412..ed88514 100644
+--- a/src/third_party/glog/src/vlog_is_on.cc
++++ b/src/third_party/glog/src/vlog_is_on.cc
+@@ -40,8 +40,8 @@
+ #include <cstdio>
+ #include <string>
+ #include "base/commandlineflags.h"
+-#include "glog/logging.h"
+-#include "glog/raw_logging.h"
++#include <glog/logging.h>
++#include <glog/raw_logging.h>
+ #include "base/googleinit.h"
+
+ // glog doesn't have annotation
+diff --git a/src/third_party/glog/src/windows/config.h b/src/third_party/glog/src/windows/config.h
+index 114762e..682a1b9 100755
+--- a/src/third_party/glog/src/windows/config.h
++++ b/src/third_party/glog/src/windows/config.h
+@@ -19,7 +19,7 @@
+ #undef HAVE_LIBUNWIND_H
+
+ /* define if you have google gflags library */
+-#undef HAVE_LIB_GFLAGS
++#define HAVE_LIB_GFLAGS 1
+
+ /* define if you have libunwind */
+ #undef HAVE_LIB_UNWIND
+diff --git a/src/third_party/glog/src/windows/glog/logging.h b/src/third_party/glog/src/windows/glog/logging.h
+index 7a6df74..de51586 100755
+--- a/src/third_party/glog/src/windows/glog/logging.h
++++ b/src/third_party/glog/src/windows/glog/logging.h
+@@ -82,8 +82,8 @@
+ #include <inttypes.h> // a third place for uint16_t or u_int16_t
+ #endif
+
+-#if 0
+-#include <gflags/gflags.h>
++#if 1
++#include "third_party/gflags/gflags.h"
+ #endif
+
+ namespace google {
diff --git a/extern/libmv/patches/config_mac.patch b/extern/libmv/patches/config_mac.patch
new file mode 100644
index 00000000000..5a880155bfa
--- /dev/null
+++ b/extern/libmv/patches/config_mac.patch
@@ -0,0 +1,13 @@
+diff --git a/src/third_party/glog/src/config_mac.h b/src/third_party/glog/src/config_mac.h
+index a45575b..5f953d1 100644
+--- a/src/third_party/glog/src/config_mac.h
++++ b/src/third_party/glog/src/config_mac.h
+@@ -131,7 +131,7 @@
+ #define PACKAGE_VERSION "0.3.1"
+
+ /* How to access the PC from a struct ucontext */
+-#define PC_FROM_UCONTEXT uc_mcontext->__ss.__rip
++#undef PC_FROM_UCONTEXT
+
+ /* Define to necessary symbol if this constant uses a non-standard name on
+ your system. */
diff --git a/extern/libmv/patches/fast.patch b/extern/libmv/patches/fast.patch
new file mode 100644
index 00000000000..8e0aeb7e721
--- /dev/null
+++ b/extern/libmv/patches/fast.patch
@@ -0,0 +1,24 @@
+diff --git a/src/third_party/fast/fast.h b/src/third_party/fast/fast.h
+index 2b3825a..06fa90e 100644
+--- a/src/third_party/fast/fast.h
++++ b/src/third_party/fast/fast.h
+@@ -1,6 +1,10 @@
+ #ifndef FAST_H
+ #define FAST_H
+
++#ifdef __cplusplus
++extern "C" {
++#endif
++
+ typedef struct { int x, y; } xy;
+ typedef unsigned char byte;
+
+@@ -28,4 +32,8 @@ xy* fast12_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b
+ xy* nonmax_suppression(const xy* corners, const int* scores, int num_corners, int* ret_num_nonmax);
+
+
++#ifdef __cplusplus
++}
++#endif
++
+ #endif
diff --git a/extern/libmv/patches/function_derivative.patch b/extern/libmv/patches/function_derivative.patch
new file mode 100644
index 00000000000..be7ccfc911a
--- /dev/null
+++ b/extern/libmv/patches/function_derivative.patch
@@ -0,0 +1,21 @@
+diff --git a/src/libmv/numeric/function_derivative.h b/src/libmv/numeric/function_derivative.h
+index 0075d23..d7bc437 100644
+--- a/src/libmv/numeric/function_derivative.h
++++ b/src/libmv/numeric/function_derivative.h
+@@ -24,6 +24,7 @@
+ #include <cmath>
+
+ #include "libmv/numeric/numeric.h"
++#include "libmv/logging/logging.h"
+
+ namespace libmv {
+
+@@ -97,7 +98,7 @@ bool CheckJacobian(const Function &f, const typename Function::XMatrixType &x) {
+
+ typename NumericJacobian<Function>::JMatrixType J_numeric = j_numeric(x);
+ typename NumericJacobian<Function>::JMatrixType J_analytic = j_analytic(x);
+- //LG << J_numeric - J_analytic;
++ LG << J_numeric - J_analytic;
+ return true;
+ }
+
diff --git a/extern/libmv/patches/high_distortion_crash_fix.patch b/extern/libmv/patches/high_distortion_crash_fix.patch
new file mode 100644
index 00000000000..54ab66fa27c
--- /dev/null
+++ b/extern/libmv/patches/high_distortion_crash_fix.patch
@@ -0,0 +1,21 @@
+diff --git a/src/libmv/simple_pipeline/camera_intrinsics.cc b/src/libmv/simple_pipeline/camera_intrinsics.cc
+index 4e88e1f..f9888ff 100644
+--- a/src/libmv/simple_pipeline/camera_intrinsics.cc
++++ b/src/libmv/simple_pipeline/camera_intrinsics.cc
+@@ -160,9 +160,13 @@ void CameraIntrinsics::ComputeLookupGrid(Offset* grid, int width, int height) {
+ if( iy < 0 ) { iy = 0, fy = 0; }
+ if( ix >= width-2 ) ix = width-2;
+ if( iy >= height-2 ) iy = height-2;
+- //assert( ix-x > -128 && ix-x < 128 && iy-y > -128 && iy-y < 128 );
+- Offset offset = { ix-x, iy-y, fx, fy };
+- grid[y*width+x] = offset;
++ if ( ix-x > -128 && ix-x < 128 && iy-y > -128 && iy-y < 128 ) {
++ Offset offset = { ix-x, iy-y, fx, fy };
++ grid[y*width+x] = offset;
++ } else {
++ Offset offset = { 0, 0, 0, 0 };
++ grid[y*width+x] = offset;
++ }
+ }
+ }
+ }
diff --git a/extern/libmv/patches/levenberg_marquardt.patch b/extern/libmv/patches/levenberg_marquardt.patch
new file mode 100644
index 00000000000..49ef82d73d2
--- /dev/null
+++ b/extern/libmv/patches/levenberg_marquardt.patch
@@ -0,0 +1,71 @@
+diff --git a/src/libmv/numeric/levenberg_marquardt.h b/src/libmv/numeric/levenberg_marquardt.h
+index 6a54f66..4473b72 100644
+--- a/src/libmv/numeric/levenberg_marquardt.h
++++ b/src/libmv/numeric/levenberg_marquardt.h
+@@ -33,6 +33,7 @@
+
+ #include "libmv/numeric/numeric.h"
+ #include "libmv/numeric/function_derivative.h"
++#include "libmv/logging/logging.h"
+
+ namespace libmv {
+
+@@ -123,26 +124,40 @@ class LevenbergMarquardt {
+ Parameters dx, x_new;
+ int i;
+ for (i = 0; results.status == RUNNING && i < params.max_iterations; ++i) {
+- if (dx.norm() <= params.relative_step_threshold * x.norm()) {
++ VLOG(1) << "iteration: " << i;
++ VLOG(1) << "||f(x)||: " << f_(x).norm();
++ VLOG(1) << "max(g): " << g.array().abs().maxCoeff();
++ VLOG(1) << "u: " << u;
++ VLOG(1) << "v: " << v;
++
++ AMatrixType A_augmented = A + u*AMatrixType::Identity(J.cols(), J.cols());
++ Solver solver(A_augmented);
++ dx = solver.solve(g);
++ bool solved = (A_augmented * dx).isApprox(g);
++ if (!solved) {
++ LOG(ERROR) << "Failed to solve";
++ }
++ if (solved && dx.norm() <= params.relative_step_threshold * x.norm()) {
+ results.status = RELATIVE_STEP_SIZE_TOO_SMALL;
+ break;
+- }
+- x_new = x + dx;
+- // Rho is the ratio of the actual reduction in error to the reduction
+- // in error that would be obtained if the problem was linear.
+- // See [1] for details.
+- Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
+- / dx.dot(u*dx + g));
+- if (rho > 0) {
+- // Accept the Gauss-Newton step because the linear model fits well.
+- x = x_new;
+- results.status = Update(x, params, &J, &A, &error, &g);
+- Scalar tmp = Scalar(2*rho-1);
+- u = u*std::max(1/3., 1 - (tmp*tmp*tmp));
+- v = 2;
+- continue;
+- }
+-
++ }
++ if (solved) {
++ x_new = x + dx;
++ // Rho is the ratio of the actual reduction in error to the reduction
++ // in error that would be obtained if the problem was linear.
++ // See [1] for details.
++ Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
++ / dx.dot(u*dx + g));
++ if (rho > 0) {
++ // Accept the Gauss-Newton step because the linear model fits well.
++ x = x_new;
++ results.status = Update(x, params, &J, &A, &error, &g);
++ Scalar tmp = Scalar(2*rho-1);
++ u = u*std::max(1/3., 1 - (tmp*tmp*tmp));
++ v = 2;
++ continue;
++ }
++ }
+ // Reject the update because either the normal equations failed to solve
+ // or the local linear model was not good (rho < 0). Instead, increase u
+ // to move closer to gradient descent.
diff --git a/extern/libmv/patches/mingw.patch b/extern/libmv/patches/mingw.patch
new file mode 100644
index 00000000000..0b08a483bea
--- /dev/null
+++ b/extern/libmv/patches/mingw.patch
@@ -0,0 +1,13 @@
+diff --git a/src/libmv/numeric/numeric.h b/src/libmv/numeric/numeric.h
+index f39d126..21e0f06 100644
+--- a/src/libmv/numeric/numeric.h
++++ b/src/libmv/numeric/numeric.h
+@@ -40,7 +40,7 @@
+ }
+ #endif //_WIN32 || __APPLE__
+
+-#if _WIN32
++#if (defined(WIN32) || defined(WIN64)) && !defined(__MINGW32__)
+ inline long lround(double d) {
+ return (long)(d>0 ? d+0.5 : ceil(d-0.5));
+ }
diff --git a/extern/libmv/patches/msvc2010.patch b/extern/libmv/patches/msvc2010.patch
new file mode 100644
index 00000000000..c090b070628
--- /dev/null
+++ b/extern/libmv/patches/msvc2010.patch
@@ -0,0 +1,12 @@
+diff --git a/src/libmv/simple_pipeline/tracks.cc b/src/libmv/simple_pipeline/tracks.cc
+index 0e2a1b6..3fb8ddb 100644
+--- a/src/libmv/simple_pipeline/tracks.cc
++++ b/src/libmv/simple_pipeline/tracks.cc
+@@ -20,6 +20,7 @@
+
+ #include <algorithm>
+ #include <vector>
++#include <iterator>
+
+ #include "libmv/numeric/numeric.h"
+ #include "libmv/simple_pipeline/tracks.h"
diff --git a/extern/libmv/patches/scaled_distortion.patch b/extern/libmv/patches/scaled_distortion.patch
new file mode 100644
index 00000000000..2da832931d1
--- /dev/null
+++ b/extern/libmv/patches/scaled_distortion.patch
@@ -0,0 +1,261 @@
+diff --git a/src/libmv/simple_pipeline/camera_intrinsics.cc b/src/libmv/simple_pipeline/camera_intrinsics.cc
+index f9888ff..110a16d 100644
+--- a/src/libmv/simple_pipeline/camera_intrinsics.cc
++++ b/src/libmv/simple_pipeline/camera_intrinsics.cc
+@@ -23,7 +23,32 @@
+
+ namespace libmv {
+
+-struct Offset { signed char ix,iy; unsigned char fx,fy; };
++struct Offset {
++ signed char ix, iy;
++ unsigned char fx,fy;
++};
++
++struct Grid {
++ struct Offset *offset;
++ int width, height;
++};
++
++static struct Grid *copyGrid(struct Grid *from)
++{
++ struct Grid *to = NULL;
++
++ if (from) {
++ to = new Grid;
++
++ to->width = from->width;
++ to->height = from->height;
++
++ to->offset = new Offset[to->width*to->height];
++ memcpy(to->offset, from->offset, sizeof(struct Offset)*to->width*to->height);
++ }
++
++ return to;
++}
+
+ CameraIntrinsics::CameraIntrinsics()
+ : K_(Mat3::Identity()),
+@@ -37,9 +62,22 @@ CameraIntrinsics::CameraIntrinsics()
+ distort_(0),
+ undistort_(0) {}
+
++CameraIntrinsics::CameraIntrinsics(const CameraIntrinsics &from)
++ : K_(from.K_),
++ image_width_(from.image_width_),
++ image_height_(from.image_height_),
++ k1_(from.k1_),
++ k2_(from.k2_),
++ k3_(from.k3_),
++ p1_(from.p1_),
++ p2_(from.p2_)
++{
++ distort_ = copyGrid(from.distort_);
++ undistort_ = copyGrid(from.undistort_);
++}
++
+ CameraIntrinsics::~CameraIntrinsics() {
+- if(distort_) delete[] distort_;
+- if(undistort_) delete[] undistort_;
++ FreeLookupGrid();
+ }
+
+ /// Set the entire calibration matrix at once.
+@@ -146,11 +184,17 @@ void CameraIntrinsics::InvertIntrinsics(double image_x,
+
+ // TODO(MatthiasF): downsample lookup
+ template<typename WarpFunction>
+-void CameraIntrinsics::ComputeLookupGrid(Offset* grid, int width, int height) {
++void CameraIntrinsics::ComputeLookupGrid(Grid* grid, int width, int height) {
++ double aspx = (double)width / image_width_;
++ double aspy = (double)height / image_height_;
++
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
++ double src_x = x / aspx, src_y = y / aspy;
+ double warp_x, warp_y;
+- WarpFunction(this,x,y,&warp_x,&warp_y);
++ WarpFunction(this,src_x,src_y,&warp_x,&warp_y);
++ warp_x = warp_x*aspx;
++ warp_y = warp_y*aspy;
+ int ix = int(warp_x), iy = int(warp_y);
+ int fx = round((warp_x-ix)*256), fy = round((warp_y-iy)*256);
+ if(fx == 256) { fx=0; ix++; }
+@@ -162,10 +206,10 @@ void CameraIntrinsics::ComputeLookupGrid(Offset* grid, int width, int height) {
+ if( iy >= height-2 ) iy = height-2;
+ if ( ix-x > -128 && ix-x < 128 && iy-y > -128 && iy-y < 128 ) {
+ Offset offset = { ix-x, iy-y, fx, fy };
+- grid[y*width+x] = offset;
++ grid->offset[y*width+x] = offset;
+ } else {
+ Offset offset = { 0, 0, 0, 0 };
+- grid[y*width+x] = offset;
++ grid->offset[y*width+x] = offset;
+ }
+ }
+ }
+@@ -173,11 +217,11 @@ void CameraIntrinsics::ComputeLookupGrid(Offset* grid, int width, int height) {
+
+ // TODO(MatthiasF): cubic B-Spline image sampling, bilinear lookup
+ template<typename T,int N>
+-static void Warp(const Offset* grid, const T* src, T* dst,
++static void Warp(const Grid* grid, const T* src, T* dst,
+ int width, int height) {
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+- Offset offset = grid[y*width+x];
++ Offset offset = grid->offset[y*width+x];
+ const T* s = &src[((y+offset.iy)*width+(x+offset.ix))*N];
+ for (int i = 0; i < N; i++) {
+ dst[(y*width+x)*N+i] = ((s[ i] * (256-offset.fx) + s[ N+i] * offset.fx) * (256-offset.fy)
+@@ -188,8 +232,17 @@ static void Warp(const Offset* grid, const T* src, T* dst,
+ }
+
+ void CameraIntrinsics::FreeLookupGrid() {
+- if(distort_) delete distort_, distort_=0;
+- if(undistort_) delete undistort_, undistort_=0;
++ if(distort_) {
++ delete distort_->offset;
++ delete distort_;
++ distort_ = NULL;
++ }
++
++ if(undistort_) {
++ delete undistort_->offset;
++ delete undistort_;
++ undistort_ = NULL;
++ }
+ }
+
+ // FIXME: C++ templates limitations makes thing complicated, but maybe there is a simpler method.
+@@ -211,11 +264,50 @@ struct InvertIntrinsicsFunction {
+ }
+ };
+
+-void CameraIntrinsics::Distort(const float* src, float* dst, int width, int height, int channels) {
+- if(!distort_) {
+- distort_ = new Offset[width*height];
+- ComputeLookupGrid<InvertIntrinsicsFunction>(distort_,width,height);
++void CameraIntrinsics::CheckDistortLookupGrid(int width, int height)
++{
++ if(distort_) {
++ if(distort_->width != width || distort_->height != height) {
++ delete [] distort_->offset;
++ distort_->offset = NULL;
++ }
++ } else {
++ distort_ = new Grid;
++ distort_->offset = NULL;
++ }
++
++ if(!distort_->offset) {
++ distort_->offset = new Offset[width*height];
++ ComputeLookupGrid<InvertIntrinsicsFunction>(distort_,width,height);
+ }
++
++ distort_->width = width;
++ distort_->height = height;
++}
++
++void CameraIntrinsics::CheckUndistortLookupGrid(int width, int height)
++{
++ if(undistort_) {
++ if(undistort_->width != width || undistort_->height != height) {
++ delete [] undistort_->offset;
++ undistort_->offset = NULL;
++ }
++ } else {
++ undistort_ = new Grid;
++ undistort_->offset = NULL;
++ }
++
++ if(!undistort_->offset) {
++ undistort_->offset = new Offset[width*height];
++ ComputeLookupGrid<ApplyIntrinsicsFunction>(undistort_,width,height);
++ }
++
++ undistort_->width = width;
++ undistort_->height = height;
++}
++
++void CameraIntrinsics::Distort(const float* src, float* dst, int width, int height, int channels) {
++ CheckDistortLookupGrid(width, height);
+ if(channels==1) Warp<float,1>(distort_,src,dst,width,height);
+ else if(channels==2) Warp<float,2>(distort_,src,dst,width,height);
+ else if(channels==3) Warp<float,3>(distort_,src,dst,width,height);
+@@ -224,10 +316,7 @@ void CameraIntrinsics::Distort(const float* src, float* dst, int width, int heig
+ }
+
+ void CameraIntrinsics::Distort(const unsigned char* src, unsigned char* dst, int width, int height, int channels) {
+- if(!distort_) {
+- distort_ = new Offset[width*height];
+- ComputeLookupGrid<InvertIntrinsicsFunction>(distort_,width,height);
+- }
++ CheckDistortLookupGrid(width, height);
+ if(channels==1) Warp<unsigned char,1>(distort_,src,dst,width,height);
+ else if(channels==2) Warp<unsigned char,2>(distort_,src,dst,width,height);
+ else if(channels==3) Warp<unsigned char,3>(distort_,src,dst,width,height);
+@@ -236,10 +325,7 @@ void CameraIntrinsics::Distort(const unsigned char* src, unsigned char* dst, int
+ }
+
+ void CameraIntrinsics::Undistort(const float* src, float* dst, int width, int height, int channels) {
+- if(!undistort_) {
+- undistort_ = new Offset[width*height];
+- ComputeLookupGrid<ApplyIntrinsicsFunction>(undistort_,width,height);
+- }
++ CheckUndistortLookupGrid(width, height);
+ if(channels==1) Warp<float,1>(undistort_,src,dst,width,height);
+ else if(channels==2) Warp<float,2>(undistort_,src,dst,width,height);
+ else if(channels==3) Warp<float,3>(undistort_,src,dst,width,height);
+@@ -248,10 +334,7 @@ void CameraIntrinsics::Undistort(const float* src, float* dst, int width, int he
+ }
+
+ void CameraIntrinsics::Undistort(const unsigned char* src, unsigned char* dst, int width, int height, int channels) {
+- if(!undistort_) {
+- undistort_ = new Offset[width*height];
+- ComputeLookupGrid<ApplyIntrinsicsFunction>(undistort_,width,height);
+- }
++ CheckUndistortLookupGrid(width, height);
+ if(channels==1) Warp<unsigned char,1>(undistort_,src,dst,width,height);
+ else if(channels==2) Warp<unsigned char,2>(undistort_,src,dst,width,height);
+ else if(channels==3) Warp<unsigned char,3>(undistort_,src,dst,width,height);
+diff --git a/src/libmv/simple_pipeline/camera_intrinsics.h b/src/libmv/simple_pipeline/camera_intrinsics.h
+index 29bc8a1..f525571 100644
+--- a/src/libmv/simple_pipeline/camera_intrinsics.h
++++ b/src/libmv/simple_pipeline/camera_intrinsics.h
+@@ -26,11 +26,12 @@ typedef Eigen::Matrix<double, 3, 3> Mat3;
+
+ namespace libmv {
+
+-struct Offset;
++struct Grid;
+
+ class CameraIntrinsics {
+ public:
+ CameraIntrinsics();
++ CameraIntrinsics(const CameraIntrinsics &from);
+ ~CameraIntrinsics();
+
+ const Mat3 &K() const { return K_; }
+@@ -123,7 +124,9 @@ class CameraIntrinsics {
+ int width, int height, int channels);
+
+ private:
+- template<typename WarpFunction> void ComputeLookupGrid(Offset* grid, int width, int height);
++ template<typename WarpFunction> void ComputeLookupGrid(struct Grid* grid, int width, int height);
++ void CheckUndistortLookupGrid(int width, int height);
++ void CheckDistortLookupGrid(int width, int height);
+ void FreeLookupGrid();
+
+ // The traditional intrinsics matrix from x = K[R|t]X.
+@@ -140,8 +143,8 @@ class CameraIntrinsics {
+ // independent of image size.
+ double k1_, k2_, k3_, p1_, p2_;
+
+- Offset* distort_;
+- Offset* undistort_;
++ struct Grid *distort_;
++ struct Grid *undistort_;
+ };
+
+ } // namespace libmv
diff --git a/extern/libmv/patches/series b/extern/libmv/patches/series
new file mode 100644
index 00000000000..8f785a659cf
--- /dev/null
+++ b/extern/libmv/patches/series
@@ -0,0 +1,11 @@
+v3d_verbosity.patch
+snrptinf_fix.patch
+bundle_tweaks.patch
+fast.patch
+config_mac.patch
+levenberg_marquardt.patch
+function_derivative.patch
+high_distortion_crash_fix.patch
+mingw.patch
+msvc2010.patch
+scaled_distortion.patch
diff --git a/extern/libmv/patches/snrptinf_fix.patch b/extern/libmv/patches/snrptinf_fix.patch
new file mode 100644
index 00000000000..e886a671de0
--- /dev/null
+++ b/extern/libmv/patches/snrptinf_fix.patch
@@ -0,0 +1,15 @@
+diff --git a/src/libmv/simple_pipeline/pipeline.cc b/src/libmv/simple_pipeline/pipeline.cc
+index 652d70c..25cd2c2 100644
+--- a/src/libmv/simple_pipeline/pipeline.cc
++++ b/src/libmv/simple_pipeline/pipeline.cc
+@@ -28,6 +28,10 @@
+ #include "libmv/simple_pipeline/tracks.h"
+ #include "libmv/simple_pipeline/camera_intrinsics.h"
+
++#ifdef _MSC_VER
++# define snprintf _snprintf
++#endif
++
+ namespace libmv {
+
+ void CompleteReconstruction(const Tracks &tracks,
diff --git a/extern/libmv/patches/v3d_verbosity.patch b/extern/libmv/patches/v3d_verbosity.patch
new file mode 100644
index 00000000000..a54f3dc44be
--- /dev/null
+++ b/extern/libmv/patches/v3d_verbosity.patch
@@ -0,0 +1,12 @@
+diff --git a/src/libmv/simple_pipeline/bundle.cc b/src/libmv/simple_pipeline/bundle.cc
+index 310660d..f819603 100644
+--- a/src/libmv/simple_pipeline/bundle.cc
++++ b/src/libmv/simple_pipeline/bundle.cc
+@@ -141,7 +141,6 @@ void Bundle(const Tracks &tracks, Reconstruction *reconstruction) {
+ v3d_distortion.p2 = 0;
+
+ // Finally, run the bundle adjustment.
+- V3D::optimizerVerbosenessLevel = 1;
+ double const inlierThreshold = 500000.0;
+ V3D::CommonInternalsMetricBundleOptimizer opt(V3D::FULL_BUNDLE_METRIC,
+ inlierThreshold,
diff --git a/extern/libmv/third_party/fast/LICENSE b/extern/libmv/third_party/fast/LICENSE
new file mode 100644
index 00000000000..f347008d6ef
--- /dev/null
+++ b/extern/libmv/third_party/fast/LICENSE
@@ -0,0 +1,30 @@
+Copyright (c) 2006, 2008 Edward Rosten
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+
+ *Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ *Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ *Neither the name of the University of Cambridge nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/extern/libmv/third_party/fast/README b/extern/libmv/third_party/fast/README
new file mode 100644
index 00000000000..77017078d08
--- /dev/null
+++ b/extern/libmv/third_party/fast/README
@@ -0,0 +1,31 @@
+FAST feature detectors in C Version 2.0
+---------------------------------------
+
+The files are valid C and C++ code, and have no special requirements for
+compiling, and they do not depend on any libraries. Just compile them along with
+the rest of your project.
+
+To use the functions, #include "fast.h"
+
+The corner detectors have the following prototype (where X is 9, 10, 11 or 12):
+
+xy* fastX_detect_nonmax(const unsigned char * data, int xsize, int ysize, int stride, int threshold, int* numcorners)
+
+Where xy is the following simple struct typedef:
+
+typedef struct
+{
+ int x, y;
+} xy;
+
+The image is passed in as a block of data and dimensions, and the list of
+corners is returned as an array of xy structs, and an integer (numcorners)
+with the number of corners returned. The data can be deallocated with free().
+Nonmaximal suppression is performed on the corners. Note that the stride
+is the number of bytes between rows. If your image has no padding, then this
+is the same as xsize.
+
+The detection, scoring and nonmaximal suppression are available as individual
+functions. To see how to use the individual functions, see fast.c
+
+
diff --git a/extern/libmv/third_party/fast/README.libmv b/extern/libmv/third_party/fast/README.libmv
new file mode 100644
index 00000000000..2110976dd14
--- /dev/null
+++ b/extern/libmv/third_party/fast/README.libmv
@@ -0,0 +1,9 @@
+Project: FAST (FAST Corner Detection)
+URL: http://mi.eng.cam.ac.uk/~er258/work/fast-C-src/
+License: BSD
+Upstream version: 2.1, released 12-Jan-2009
+
+Local modifications:
+- Created CMakeLists.txt for CMake build.
+- Update CMakeLists to be sure that the library is a compatible with C++ linkage.
+- Update CMakeLists to not include fast.h to compile fast library with VS2005.
diff --git a/extern/libmv/third_party/fast/fast.c b/extern/libmv/third_party/fast/fast.c
new file mode 100644
index 00000000000..c675f0af883
--- /dev/null
+++ b/extern/libmv/third_party/fast/fast.c
@@ -0,0 +1,71 @@
+#include <stdlib.h>
+#include "fast.h"
+
+
+xy* fast9_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners)
+{
+ xy* corners;
+ int num_corners;
+ int* scores;
+ xy* nonmax;
+
+ corners = fast9_detect(im, xsize, ysize, stride, b, &num_corners);
+ scores = fast9_score(im, stride, corners, num_corners, b);
+ nonmax = nonmax_suppression(corners, scores, num_corners, ret_num_corners);
+
+ free(corners);
+ free(scores);
+
+ return nonmax;
+}
+
+xy* fast10_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners)
+{
+ xy* corners;
+ int num_corners;
+ int* scores;
+ xy* nonmax;
+
+ corners = fast10_detect(im, xsize, ysize, stride, b, &num_corners);
+ scores = fast10_score(im, stride, corners, num_corners, b);
+ nonmax = nonmax_suppression(corners, scores, num_corners, ret_num_corners);
+
+ free(corners);
+ free(scores);
+
+ return nonmax;
+}
+
+xy* fast11_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners)
+{
+ xy* corners;
+ int num_corners;
+ int* scores;
+ xy* nonmax;
+
+ corners = fast11_detect(im, xsize, ysize, stride, b, &num_corners);
+ scores = fast11_score(im, stride, corners, num_corners, b);
+ nonmax = nonmax_suppression(corners, scores, num_corners, ret_num_corners);
+
+ free(corners);
+ free(scores);
+
+ return nonmax;
+}
+
+xy* fast12_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners)
+{
+ xy* corners;
+ int num_corners;
+ int* scores;
+ xy* nonmax;
+
+ corners = fast12_detect(im, xsize, ysize, stride, b, &num_corners);
+ scores = fast12_score(im, stride, corners, num_corners, b);
+ nonmax = nonmax_suppression(corners, scores, num_corners, ret_num_corners);
+
+ free(corners);
+ free(scores);
+
+ return nonmax;
+}
diff --git a/extern/libmv/third_party/fast/fast.h b/extern/libmv/third_party/fast/fast.h
new file mode 100644
index 00000000000..06fa90ec98c
--- /dev/null
+++ b/extern/libmv/third_party/fast/fast.h
@@ -0,0 +1,39 @@
+#ifndef FAST_H
+#define FAST_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct { int x, y; } xy;
+typedef unsigned char byte;
+
+int fast9_corner_score(const byte* p, const int pixel[], int bstart);
+int fast10_corner_score(const byte* p, const int pixel[], int bstart);
+int fast11_corner_score(const byte* p, const int pixel[], int bstart);
+int fast12_corner_score(const byte* p, const int pixel[], int bstart);
+
+xy* fast9_detect(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners);
+xy* fast10_detect(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners);
+xy* fast11_detect(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners);
+xy* fast12_detect(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners);
+
+int* fast9_score(const byte* i, int stride, xy* corners, int num_corners, int b);
+int* fast10_score(const byte* i, int stride, xy* corners, int num_corners, int b);
+int* fast11_score(const byte* i, int stride, xy* corners, int num_corners, int b);
+int* fast12_score(const byte* i, int stride, xy* corners, int num_corners, int b);
+
+
+xy* fast9_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners);
+xy* fast10_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners);
+xy* fast11_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners);
+xy* fast12_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners);
+
+xy* nonmax_suppression(const xy* corners, const int* scores, int num_corners, int* ret_num_nonmax);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/extern/libmv/third_party/fast/fast_10.c b/extern/libmv/third_party/fast/fast_10.c
new file mode 100644
index 00000000000..3af63869478
--- /dev/null
+++ b/extern/libmv/third_party/fast/fast_10.c
@@ -0,0 +1,4666 @@
+/*This is mechanically generated code*/
+#include <stdlib.h>
+
+typedef struct { int x, y; } xy;
+typedef unsigned char byte;
+
+int fast10_corner_score(const byte* p, const int pixel[], int bstart)
+{
+ int bmin = bstart;
+ int bmax = 255;
+ int b = (bmax + bmin)/2;
+
+ /*Compute the score using binary search*/
+ for(;;)
+ {
+ int cb = *p + b;
+ int c_b= *p - b;
+
+
+ if( p[pixel[0]] > cb)
+ if( p[pixel[1]] > cb)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[12]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[15]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[3]] < c_b)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[2]] < c_b)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[1]] < c_b)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[0]] < c_b)
+ if( p[pixel[1]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[1]] < c_b)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[15]] < c_b)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[9]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[8]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[1]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[1]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+
+ is_a_corner:
+ bmin=b;
+ goto end_if;
+
+ is_not_a_corner:
+ bmax=b;
+ goto end_if;
+
+ end_if:
+
+ if(bmin == bmax - 1 || bmin == bmax)
+ return bmin;
+ b = (bmin + bmax) / 2;
+ }
+}
+
+static void make_offsets(int pixel[], int row_stride)
+{
+ pixel[0] = 0 + row_stride * 3;
+ pixel[1] = 1 + row_stride * 3;
+ pixel[2] = 2 + row_stride * 2;
+ pixel[3] = 3 + row_stride * 1;
+ pixel[4] = 3 + row_stride * 0;
+ pixel[5] = 3 + row_stride * -1;
+ pixel[6] = 2 + row_stride * -2;
+ pixel[7] = 1 + row_stride * -3;
+ pixel[8] = 0 + row_stride * -3;
+ pixel[9] = -1 + row_stride * -3;
+ pixel[10] = -2 + row_stride * -2;
+ pixel[11] = -3 + row_stride * -1;
+ pixel[12] = -3 + row_stride * 0;
+ pixel[13] = -3 + row_stride * 1;
+ pixel[14] = -2 + row_stride * 2;
+ pixel[15] = -1 + row_stride * 3;
+}
+
+
+
+int* fast10_score(const byte* i, int stride, xy* corners, int num_corners, int b)
+{
+ int* scores = (int*)malloc(sizeof(int)* num_corners);
+ int n;
+
+ int pixel[16];
+ make_offsets(pixel, stride);
+
+ for(n=0; n < num_corners; n++)
+ scores[n] = fast10_corner_score(i + corners[n].y*stride + corners[n].x, pixel, b);
+
+ return scores;
+}
+
+
+xy* fast10_detect(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners)
+{
+ int num_corners=0;
+ xy* ret_corners;
+ int rsize=512;
+ int pixel[16];
+ int x, y;
+
+ ret_corners = (xy*)malloc(sizeof(xy)*rsize);
+ make_offsets(pixel, stride);
+
+ for(y=3; y < ysize - 3; y++)
+ for(x=3; x < xsize - 3; x++)
+ {
+ const byte* p = im + y*stride + x;
+
+ int cb = *p + b;
+ int c_b= *p - b;
+ if(p[pixel[0]] > cb)
+ if(p[pixel[1]] > cb)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[12]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[15]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[3]] < c_b)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[2]] < c_b)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ {}
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ {}
+ else
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[1]] < c_b)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ {}
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ {}
+ else
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[0]] < c_b)
+ if(p[pixel[1]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ {}
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[1]] < c_b)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ {}
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[15]] < c_b)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[9]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[8]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ {}
+ else
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ {}
+ else
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[1]] > cb)
+ {}
+ else
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[1]] < c_b)
+ {}
+ else
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ if(num_corners == rsize)
+ {
+ rsize*=2;
+ ret_corners = (xy*)realloc(ret_corners, sizeof(xy)*rsize);
+ }
+
+ ret_corners[num_corners].x = x;
+ ret_corners[num_corners].y = y;
+ num_corners++;
+ }
+
+ *ret_num_corners = num_corners;
+ return ret_corners;
+
+}
+
+
diff --git a/extern/libmv/third_party/fast/fast_11.c b/extern/libmv/third_party/fast/fast_11.c
new file mode 100644
index 00000000000..b4af4309521
--- /dev/null
+++ b/extern/libmv/third_party/fast/fast_11.c
@@ -0,0 +1,3910 @@
+/*This is mechanically generated code*/
+#include <stdlib.h>
+
+typedef struct { int x, y; } xy;
+typedef unsigned char byte;
+
+int fast11_corner_score(const byte* p, const int pixel[], int bstart)
+{
+ int bmin = bstart;
+ int bmax = 255;
+ int b = (bmax + bmin)/2;
+
+ /*Compute the score using binary search*/
+ for(;;)
+ {
+ int cb = *p + b;
+ int c_b= *p - b;
+
+
+ if( p[pixel[0]] > cb)
+ if( p[pixel[1]] > cb)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[15]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[3]] < c_b)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[2]] < c_b)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[1]] < c_b)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[0]] < c_b)
+ if( p[pixel[1]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[1]] < c_b)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[15]] < c_b)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[9]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[8]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[1]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[1]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+
+ is_a_corner:
+ bmin=b;
+ goto end_if;
+
+ is_not_a_corner:
+ bmax=b;
+ goto end_if;
+
+ end_if:
+
+ if(bmin == bmax - 1 || bmin == bmax)
+ return bmin;
+ b = (bmin + bmax) / 2;
+ }
+}
+
+static void make_offsets(int pixel[], int row_stride)
+{
+ pixel[0] = 0 + row_stride * 3;
+ pixel[1] = 1 + row_stride * 3;
+ pixel[2] = 2 + row_stride * 2;
+ pixel[3] = 3 + row_stride * 1;
+ pixel[4] = 3 + row_stride * 0;
+ pixel[5] = 3 + row_stride * -1;
+ pixel[6] = 2 + row_stride * -2;
+ pixel[7] = 1 + row_stride * -3;
+ pixel[8] = 0 + row_stride * -3;
+ pixel[9] = -1 + row_stride * -3;
+ pixel[10] = -2 + row_stride * -2;
+ pixel[11] = -3 + row_stride * -1;
+ pixel[12] = -3 + row_stride * 0;
+ pixel[13] = -3 + row_stride * 1;
+ pixel[14] = -2 + row_stride * 2;
+ pixel[15] = -1 + row_stride * 3;
+}
+
+
+
+int* fast11_score(const byte* i, int stride, xy* corners, int num_corners, int b)
+{
+ int* scores = (int*)malloc(sizeof(int)* num_corners);
+ int n;
+
+ int pixel[16];
+ make_offsets(pixel, stride);
+
+ for(n=0; n < num_corners; n++)
+ scores[n] = fast11_corner_score(i + corners[n].y*stride + corners[n].x, pixel, b);
+
+ return scores;
+}
+
+
+xy* fast11_detect(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners)
+{
+ int num_corners=0;
+ xy* ret_corners;
+ int rsize=512;
+ int pixel[16];
+ int x, y;
+
+ ret_corners = (xy*)malloc(sizeof(xy)*rsize);
+ make_offsets(pixel, stride);
+
+ for(y=3; y < ysize - 3; y++)
+ for(x=3; x < xsize - 3; x++)
+ {
+ const byte* p = im + y*stride + x;
+
+ int cb = *p + b;
+ int c_b= *p - b;
+ if(p[pixel[0]] > cb)
+ if(p[pixel[1]] > cb)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[15]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[3]] < c_b)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[2]] < c_b)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ {}
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ {}
+ else
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[1]] < c_b)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ {}
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ {}
+ else
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[0]] < c_b)
+ if(p[pixel[1]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ {}
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[1]] < c_b)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ {}
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[15]] < c_b)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[9]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[8]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ {}
+ else
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ {}
+ else
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[1]] > cb)
+ {}
+ else
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[1]] < c_b)
+ {}
+ else
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ if(num_corners == rsize)
+ {
+ rsize*=2;
+ ret_corners = (xy*)realloc(ret_corners, sizeof(xy)*rsize);
+ }
+
+ ret_corners[num_corners].x = x;
+ ret_corners[num_corners].y = y;
+ num_corners++;
+ }
+
+ *ret_num_corners = num_corners;
+ return ret_corners;
+
+}
+
+
diff --git a/extern/libmv/third_party/fast/fast_12.c b/extern/libmv/third_party/fast/fast_12.c
new file mode 100644
index 00000000000..f73f68dd043
--- /dev/null
+++ b/extern/libmv/third_party/fast/fast_12.c
@@ -0,0 +1,3134 @@
+/*This is mechanically generated code*/
+#include <stdlib.h>
+
+typedef struct { int x, y; } xy;
+typedef unsigned char byte;
+
+int fast12_corner_score(const byte* p, const int pixel[], int bstart)
+{
+ int bmin = bstart;
+ int bmax = 255;
+ int b = (bmax + bmin)/2;
+
+ /*Compute the score using binary search*/
+ for(;;)
+ {
+ int cb = *p + b;
+ int c_b= *p - b;
+
+
+ if( p[pixel[0]] > cb)
+ if( p[pixel[1]] > cb)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[3]] < c_b)
+ if( p[pixel[15]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[2]] < c_b)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[3]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[3]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[1]] < c_b)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[0]] < c_b)
+ if( p[pixel[1]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[1]] < c_b)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[3]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[15]] < c_b)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[3]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[1]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[1]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+
+ is_a_corner:
+ bmin=b;
+ goto end_if;
+
+ is_not_a_corner:
+ bmax=b;
+ goto end_if;
+
+ end_if:
+
+ if(bmin == bmax - 1 || bmin == bmax)
+ return bmin;
+ b = (bmin + bmax) / 2;
+ }
+}
+
+static void make_offsets(int pixel[], int row_stride)
+{
+ pixel[0] = 0 + row_stride * 3;
+ pixel[1] = 1 + row_stride * 3;
+ pixel[2] = 2 + row_stride * 2;
+ pixel[3] = 3 + row_stride * 1;
+ pixel[4] = 3 + row_stride * 0;
+ pixel[5] = 3 + row_stride * -1;
+ pixel[6] = 2 + row_stride * -2;
+ pixel[7] = 1 + row_stride * -3;
+ pixel[8] = 0 + row_stride * -3;
+ pixel[9] = -1 + row_stride * -3;
+ pixel[10] = -2 + row_stride * -2;
+ pixel[11] = -3 + row_stride * -1;
+ pixel[12] = -3 + row_stride * 0;
+ pixel[13] = -3 + row_stride * 1;
+ pixel[14] = -2 + row_stride * 2;
+ pixel[15] = -1 + row_stride * 3;
+}
+
+
+
+int* fast12_score(const byte* i, int stride, xy* corners, int num_corners, int b)
+{
+ int* scores = (int*)malloc(sizeof(int)* num_corners);
+ int n;
+
+ int pixel[16];
+ make_offsets(pixel, stride);
+
+ for(n=0; n < num_corners; n++)
+ scores[n] = fast12_corner_score(i + corners[n].y*stride + corners[n].x, pixel, b);
+
+ return scores;
+}
+
+
+xy* fast12_detect(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners)
+{
+ int num_corners=0;
+ xy* ret_corners;
+ int rsize=512;
+ int pixel[16];
+ int x, y;
+
+ ret_corners = (xy*)malloc(sizeof(xy)*rsize);
+ make_offsets(pixel, stride);
+
+ for(y=3; y < ysize - 3; y++)
+ for(x=3; x < xsize - 3; x++)
+ {
+ const byte* p = im + y*stride + x;
+
+ int cb = *p + b;
+ int c_b= *p - b;
+ if(p[pixel[0]] > cb)
+ if(p[pixel[1]] > cb)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[3]] < c_b)
+ if(p[pixel[15]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[2]] < c_b)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[3]] < c_b)
+ {}
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[3]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[1]] < c_b)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ {}
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ {}
+ else
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[0]] < c_b)
+ if(p[pixel[1]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ {}
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[1]] < c_b)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[3]] > cb)
+ {}
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[15]] < c_b)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[3]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ {}
+ else
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[1]] > cb)
+ {}
+ else
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[1]] < c_b)
+ {}
+ else
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ if(num_corners == rsize)
+ {
+ rsize*=2;
+ ret_corners = (xy*)realloc(ret_corners, sizeof(xy)*rsize);
+ }
+
+ ret_corners[num_corners].x = x;
+ ret_corners[num_corners].y = y;
+ num_corners++;
+ }
+
+ *ret_num_corners = num_corners;
+ return ret_corners;
+
+}
+
+
diff --git a/extern/libmv/third_party/fast/fast_9.c b/extern/libmv/third_party/fast/fast_9.c
new file mode 100644
index 00000000000..6d33daeffbb
--- /dev/null
+++ b/extern/libmv/third_party/fast/fast_9.c
@@ -0,0 +1,5910 @@
+/*This is mechanically generated code*/
+#include <stdlib.h>
+
+typedef struct { int x, y; } xy;
+typedef unsigned char byte;
+
+int fast9_corner_score(const byte* p, const int pixel[], int bstart)
+{
+ int bmin = bstart;
+ int bmax = 255;
+ int b = (bmax + bmin)/2;
+
+ /*Compute the score using binary search*/
+ for(;;)
+ {
+ int cb = *p + b;
+ int c_b= *p - b;
+
+
+ if( p[pixel[0]] > cb)
+ if( p[pixel[1]] > cb)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[15]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[12]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[12]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[3]] < c_b)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[2]] < c_b)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[1]] < c_b)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[0]] < c_b)
+ if( p[pixel[1]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[1]] < c_b)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[15]] < c_b)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[9]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[8]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[1]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[1]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+
+ is_a_corner:
+ bmin=b;
+ goto end_if;
+
+ is_not_a_corner:
+ bmax=b;
+ goto end_if;
+
+ end_if:
+
+ if(bmin == bmax - 1 || bmin == bmax)
+ return bmin;
+ b = (bmin + bmax) / 2;
+ }
+}
+
+static void make_offsets(int pixel[], int row_stride)
+{
+ pixel[0] = 0 + row_stride * 3;
+ pixel[1] = 1 + row_stride * 3;
+ pixel[2] = 2 + row_stride * 2;
+ pixel[3] = 3 + row_stride * 1;
+ pixel[4] = 3 + row_stride * 0;
+ pixel[5] = 3 + row_stride * -1;
+ pixel[6] = 2 + row_stride * -2;
+ pixel[7] = 1 + row_stride * -3;
+ pixel[8] = 0 + row_stride * -3;
+ pixel[9] = -1 + row_stride * -3;
+ pixel[10] = -2 + row_stride * -2;
+ pixel[11] = -3 + row_stride * -1;
+ pixel[12] = -3 + row_stride * 0;
+ pixel[13] = -3 + row_stride * 1;
+ pixel[14] = -2 + row_stride * 2;
+ pixel[15] = -1 + row_stride * 3;
+}
+
+
+
+int* fast9_score(const byte* i, int stride, xy* corners, int num_corners, int b)
+{
+ int* scores = (int*)malloc(sizeof(int)* num_corners);
+ int n;
+
+ int pixel[16];
+ make_offsets(pixel, stride);
+
+ for(n=0; n < num_corners; n++)
+ scores[n] = fast9_corner_score(i + corners[n].y*stride + corners[n].x, pixel, b);
+
+ return scores;
+}
+
+
+xy* fast9_detect(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners)
+{
+ int num_corners=0;
+ xy* ret_corners;
+ int rsize=512;
+ int pixel[16];
+ int x, y;
+
+ ret_corners = (xy*)malloc(sizeof(xy)*rsize);
+ make_offsets(pixel, stride);
+
+ for(y=3; y < ysize - 3; y++)
+ for(x=3; x < xsize - 3; x++)
+ {
+ const byte* p = im + y*stride + x;
+
+ int cb = *p + b;
+ int c_b= *p - b;
+ if(p[pixel[0]] > cb)
+ if(p[pixel[1]] > cb)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[15]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[12]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[12]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[3]] < c_b)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[2]] < c_b)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ {}
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ {}
+ else
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[1]] < c_b)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ {}
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ {}
+ else
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[0]] < c_b)
+ if(p[pixel[1]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ {}
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[1]] < c_b)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ {}
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[15]] < c_b)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[9]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ {}
+ else
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[8]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ {}
+ else
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[1]] > cb)
+ {}
+ else
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[1]] < c_b)
+ {}
+ else
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ if(num_corners == rsize)
+ {
+ rsize*=2;
+ ret_corners = (xy*)realloc(ret_corners, sizeof(xy)*rsize);
+ }
+ ret_corners[num_corners].x = x;
+ ret_corners[num_corners].y = y;
+ num_corners++;
+
+ }
+
+ *ret_num_corners = num_corners;
+ return ret_corners;
+
+}
+
+
diff --git a/extern/libmv/third_party/fast/nonmax.c b/extern/libmv/third_party/fast/nonmax.c
new file mode 100644
index 00000000000..6ed0f580906
--- /dev/null
+++ b/extern/libmv/third_party/fast/nonmax.c
@@ -0,0 +1,117 @@
+#include <stdlib.h>
+#include "fast.h"
+
+
+#define Compare(X, Y) ((X)>=(Y))
+
+xy* nonmax_suppression(const xy* corners, const int* scores, int num_corners, int* ret_num_nonmax)
+{
+ int num_nonmax=0;
+ int last_row;
+ int* row_start;
+ int i, j;
+ xy* ret_nonmax;
+ const int sz = (int)num_corners;
+
+ /*Point above points (roughly) to the pixel above the one of interest, if there
+ is a feature there.*/
+ int point_above = 0;
+ int point_below = 0;
+
+
+ if(num_corners < 1)
+ {
+ *ret_num_nonmax = 0;
+ return 0;
+ }
+
+ ret_nonmax = (xy*)malloc(num_corners * sizeof(xy));
+
+ /* Find where each row begins
+ (the corners are output in raster scan order). A beginning of -1 signifies
+ that there are no corners on that row. */
+ last_row = corners[num_corners-1].y;
+ row_start = (int*)malloc((last_row+1)*sizeof(int));
+
+ for(i=0; i < last_row+1; i++)
+ row_start[i] = -1;
+
+ {
+ int prev_row = -1;
+ for(i=0; i< num_corners; i++)
+ if(corners[i].y != prev_row)
+ {
+ row_start[corners[i].y] = i;
+ prev_row = corners[i].y;
+ }
+ }
+
+
+
+ for(i=0; i < sz; i++)
+ {
+ int score = scores[i];
+ xy pos = corners[i];
+
+ /*Check left */
+ if(i > 0)
+ if(corners[i-1].x == pos.x-1 && corners[i-1].y == pos.y && Compare(scores[i-1], score))
+ continue;
+
+ /*Check right*/
+ if(i < (sz - 1))
+ if(corners[i+1].x == pos.x+1 && corners[i+1].y == pos.y && Compare(scores[i+1], score))
+ continue;
+
+ /*Check above (if there is a valid row above)*/
+ if(pos.y != 0 && row_start[pos.y - 1] != -1)
+ {
+ /*Make sure that current point_above is one
+ row above.*/
+ if(corners[point_above].y < pos.y - 1)
+ point_above = row_start[pos.y-1];
+
+ /*Make point_above point to the first of the pixels above the current point,
+ if it exists.*/
+ for(; corners[point_above].y < pos.y && corners[point_above].x < pos.x - 1; point_above++)
+ {}
+
+
+ for(j=point_above; corners[j].y < pos.y && corners[j].x <= pos.x + 1; j++)
+ {
+ int x = corners[j].x;
+ if( (x == pos.x - 1 || x ==pos.x || x == pos.x+1) && Compare(scores[j], score))
+ goto cont;
+ }
+
+ }
+
+ /*Check below (if there is anything below)*/
+ if(pos.y != last_row && row_start[pos.y + 1] != -1 && point_below < sz) /*Nothing below*/
+ {
+ if(corners[point_below].y < pos.y + 1)
+ point_below = row_start[pos.y+1];
+
+ /* Make point below point to one of the pixels belowthe current point, if it
+ exists.*/
+ for(; point_below < sz && corners[point_below].y == pos.y+1 && corners[point_below].x < pos.x - 1; point_below++)
+ {}
+
+ for(j=point_below; j < sz && corners[j].y == pos.y+1 && corners[j].x <= pos.x + 1; j++)
+ {
+ int x = corners[j].x;
+ if( (x == pos.x - 1 || x ==pos.x || x == pos.x+1) && Compare(scores[j],score))
+ goto cont;
+ }
+ }
+
+ ret_nonmax[num_nonmax++] = corners[i];
+ cont:
+ ;
+ }
+
+ free(row_start);
+ *ret_num_nonmax = num_nonmax;
+ return ret_nonmax;
+}
+
diff --git a/extern/libmv/third_party/gflags/README.libmv b/extern/libmv/third_party/gflags/README.libmv
new file mode 100644
index 00000000000..f2bdef6563e
--- /dev/null
+++ b/extern/libmv/third_party/gflags/README.libmv
@@ -0,0 +1,14 @@
+Project: Google Flags
+URL: http://code.google.com/p/google-gflags/
+License: New BSD
+Upstream version: 1.5
+Local modifications:
+
+- Flattened the tree and only included files needed for libmv. This involved
+ changing some of the includes to point to the current directory instead of a
+ nested gflags directory.
+
+- Added a poor-man's version of upstream's port.cc/h to make gflags compile on
+ windows. This isn't sufficient but is a stopgap for now.
+
+ TODO(keir): Import and use gflags for Windows from upstream.
diff --git a/extern/libmv/third_party/gflags/config.h b/extern/libmv/third_party/gflags/config.h
new file mode 100644
index 00000000000..ca2c1276c44
--- /dev/null
+++ b/extern/libmv/third_party/gflags/config.h
@@ -0,0 +1,110 @@
+/* src/config.h. Generated from config.h.in by configure. */
+/* src/config.h.in. Generated from configure.ac by autoheader. */
+
+/* Always the empty-string on non-windows systems. On windows, should be
+ "__declspec(dllexport)". This way, when we compile the dll, we export our
+ functions/classes. It's safe to define this here because config.h is only
+ used internally, to compile the DLL, and every DLL source file #includes
+ "config.h" before anything else. */
+#define GFLAGS_DLL_DECL /**/
+
+/* Namespace for Google classes */
+#define GOOGLE_NAMESPACE ::google
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define to 1 if you have the <fnmatch.h> header file. */
+#undef HAVE_FNMATCH_H
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* define if the compiler implements namespaces */
+#define HAVE_NAMESPACES 1
+
+/* Define if you have POSIX threads libraries and header files. */
+#define HAVE_PTHREAD 1
+
+/* Define to 1 if you have the `putenv' function. */
+#define HAVE_PUTENV 1
+
+/* Define to 1 if you have the `setenv' function. */
+#define HAVE_SETENV 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoll' function. */
+#define HAVE_STRTOLL 1
+
+/* Define to 1 if you have the `strtoq' function. */
+#define HAVE_STRTOQ 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* define if your compiler has __attribute__ */
+#define HAVE___ATTRIBUTE__ 1
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#define LT_OBJDIR ".libs/"
+
+/* Name of package */
+#define PACKAGE "gflags"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "opensource@google.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "gflags"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "gflags 1.5"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "gflags"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "1.5"
+
+/* Define to necessary symbol if this constant uses a non-standard name on
+ your system. */
+/* #undef PTHREAD_CREATE_JOINABLE */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* the namespace where STL code like vector<> is defined */
+#define STL_NAMESPACE std
+
+/* Version number of package */
+#define VERSION "1.5"
+
+/* Stops putting the code inside the Google namespace */
+#define _END_GOOGLE_NAMESPACE_ }
+
+/* Puts following code inside the Google namespace */
+#define _START_GOOGLE_NAMESPACE_ namespace google {
diff --git a/extern/libmv/third_party/gflags/gflags.cc b/extern/libmv/third_party/gflags/gflags.cc
new file mode 100644
index 00000000000..34fe95dac59
--- /dev/null
+++ b/extern/libmv/third_party/gflags/gflags.cc
@@ -0,0 +1,1971 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Ray Sidney
+// Revamped and reorganized by Craig Silverstein
+//
+// This file contains the implementation of all our command line flags
+// stuff. Here's how everything fits together
+//
+// * FlagRegistry owns CommandLineFlags owns FlagValue.
+// * FlagSaver holds a FlagRegistry (saves it at construct time,
+// restores it at destroy time).
+// * CommandLineFlagParser lives outside that hierarchy, but works on
+// CommandLineFlags (modifying the FlagValues).
+// * Free functions like SetCommandLineOption() work via one of the
+// above (such as CommandLineFlagParser).
+//
+// In more detail:
+//
+// -- The main classes that hold flag data:
+//
+// FlagValue holds the current value of a flag. It's
+// pseudo-templatized: every operation on a FlagValue is typed. It
+// also deals with storage-lifetime issues (so flag values don't go
+// away in a destructor), which is why we need a whole class to hold a
+// variable's value.
+//
+// CommandLineFlag is all the information about a single command-line
+// flag. It has a FlagValue for the flag's current value, but also
+// the flag's name, type, etc.
+//
+// FlagRegistry is a collection of CommandLineFlags. There's the
+// global registry, which is where flags defined via DEFINE_foo()
+// live. But it's possible to define your own flag, manually, in a
+// different registry you create. (In practice, multiple registries
+// are used only by FlagSaver).
+//
+// A given FlagValue is owned by exactly one CommandLineFlag. A given
+// CommandLineFlag is owned by exactly one FlagRegistry. FlagRegistry
+// has a lock; any operation that writes to a FlagValue or
+// CommandLineFlag owned by that registry must acquire the
+// FlagRegistry lock before doing so.
+//
+// --- Some other classes and free functions:
+//
+// CommandLineFlagInfo is a client-exposed version of CommandLineFlag.
+// Once it's instantiated, it has no dependencies or relationships
+// with any other part of this file.
+//
+// FlagRegisterer is the helper class used by the DEFINE_* macros to
+// allow work to be done at global initialization time.
+//
+// CommandLineFlagParser is the class that reads from the commandline
+// and instantiates flag values based on that. It needs to poke into
+// the innards of the FlagValue->CommandLineFlag->FlagRegistry class
+// hierarchy to do that. It's careful to acquire the FlagRegistry
+// lock before doing any writing or other non-const actions.
+//
+// GetCommandLineOption is just a hook into registry routines to
+// retrieve a flag based on its name. SetCommandLineOption, on the
+// other hand, hooks into CommandLineFlagParser. Other API functions
+// are, similarly, mostly hooks into the functionality described above.
+
+#include "config.h"
+// This comes first to ensure we define __STDC_FORMAT_MACROS in time.
+#ifdef HAVE_INTTYPES_H
+#ifndef __STDC_FORMAT_MACROS
+# define __STDC_FORMAT_MACROS 1 // gcc requires this to get PRId64, etc.
+#endif
+#include <inttypes.h>
+#endif // HAVE_INTTYPES_H
+#include <stdio.h> // for snprintf
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdarg.h> // For va_list and related operations
+#include <string.h>
+#include <assert.h>
+#ifdef HAVE_FNMATCH_H
+#include <fnmatch.h>
+#endif // HAVE_FNMATCH_H
+#include <string>
+#include <map>
+#include <vector>
+#include <utility> // for pair<>
+#include <algorithm>
+#include "gflags.h"
+#include "mutex.h"
+
+#ifndef PATH_SEPARATOR
+#define PATH_SEPARATOR '/'
+#endif
+
+// Work properly if either strtoll or strtoq is on this system
+#ifdef HAVE_STRTOLL
+# define strtoint64 strtoll
+# define strtouint64 strtoull
+#elif HAVE_STRTOQ
+# define strtoint64 strtoq
+# define strtouint64 strtouq
+#else
+// Neither strtoll nor strtoq are defined. I hope strtol works!
+# define strtoint64 strtol
+# define strtouint64 strtoul
+#endif
+
+// If we have inttypes.h, it will have defined PRId32/etc for us. If
+// not, take our best guess.
+#ifndef PRId32
+# define PRId32 "d"
+#endif
+#ifndef PRId64
+# define PRId64 "lld"
+#endif
+#ifndef PRIu64
+# define PRIu64 "llu"
+#endif
+
+// Windows is missing random bits like strcasecmp, strtoll, strtoull, and
+// snprintf in the usual locations. Put them somewhere sensible.
+//
+// TODO(keir): Get the upstream Windows port and use that instead.
+#ifdef _MSC_VER
+# define snprintf _snprintf
+# undef strtoint64
+# define strtoint64 _strtoi64
+# undef strtouint64
+# define strtouint64 _strtoui64
+# define strcasecmp _stricmp
+#endif
+
+typedef signed char int8;
+typedef unsigned char uint8;
+
+// Special flags, type 1: the 'recursive' flags. They set another flag's val.
+DEFINE_string(flagfile, "",
+ "load flags from file");
+DEFINE_string(fromenv, "",
+ "set flags from the environment"
+ " [use 'export FLAGS_flag1=value']");
+DEFINE_string(tryfromenv, "",
+ "set flags from the environment if present");
+
+// Special flags, type 2: the 'parsing' flags. They modify how we parse.
+DEFINE_string(undefok, "",
+ "comma-separated list of flag names that it is okay to specify "
+ "on the command line even if the program does not define a flag "
+ "with that name. IMPORTANT: flags in this list that have "
+ "arguments MUST use the flag=value format");
+
+_START_GOOGLE_NAMESPACE_
+
+using std::map;
+using std::pair;
+using std::sort;
+using std::string;
+using std::vector;
+
+// The help message indicating that the commandline flag has been
+// 'stripped'. It will not show up when doing "-help" and its
+// variants. The flag is stripped if STRIP_FLAG_HELP is set to 1
+// before including gflags/gflags.h.
+
+// This is used by this file, and also in commandlineflags_reporting.cc
+const char kStrippedFlagHelp[] = "\001\002\003\004 (unknown) \004\003\002\001";
+
+// This is used by the unittest to test error-exit code
+void GFLAGS_DLL_DECL (*commandlineflags_exitfunc)(int) = &exit; // from stdlib.h
+
+namespace {
+
+// There are also 'reporting' flags, in commandlineflags_reporting.cc.
+
+static const char kError[] = "ERROR: ";
+
+// Indicates that undefined options are to be ignored.
+// Enables deferred processing of flags in dynamically loaded libraries.
+static bool allow_command_line_reparsing = false;
+
+static bool logging_is_probably_set_up = false;
+
+// This is a 'prototype' validate-function. 'Real' validate
+// functions, take a flag-value as an argument: ValidateFn(bool) or
+// ValidateFn(uint64). However, for easier storage, we strip off this
+// argument and then restore it when actually calling the function on
+// a flag value.
+typedef bool (*ValidateFnProto)();
+
+// Whether we should die when reporting an error.
+enum DieWhenReporting { DIE, DO_NOT_DIE };
+
+// Report Error and exit if requested.
+static void ReportError(DieWhenReporting should_die, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ vfprintf(stderr, format, ap);
+ va_end(ap);
+ if (should_die == DIE)
+ commandlineflags_exitfunc(1); // almost certainly exit()
+}
+
+
+// --------------------------------------------------------------------
+// FlagValue
+// This represent the value a single flag might have. The major
+// functionality is to convert from a string to an object of a
+// given type, and back. Thread-compatible.
+// --------------------------------------------------------------------
+
+class CommandLineFlag;
+class FlagValue {
+ public:
+ FlagValue(void* valbuf, const char* type, bool transfer_ownership_of_value);
+ ~FlagValue();
+
+ bool ParseFrom(const char* spec);
+ string ToString() const;
+
+ private:
+ friend class CommandLineFlag; // for many things, including Validate()
+ friend class GOOGLE_NAMESPACE::FlagSaverImpl; // calls New()
+ friend class FlagRegistry; // checks value_buffer_ for flags_by_ptr_ map
+ template <typename T> friend T GetFromEnv(const char*, const char*, T);
+ friend bool TryParseLocked(const CommandLineFlag*, FlagValue*,
+ const char*, string*); // for New(), CopyFrom()
+
+ enum ValueType {
+ FV_BOOL = 0,
+ FV_INT32 = 1,
+ FV_INT64 = 2,
+ FV_UINT64 = 3,
+ FV_DOUBLE = 4,
+ FV_STRING = 5,
+ FV_MAX_INDEX = 5,
+ };
+ const char* TypeName() const;
+ bool Equal(const FlagValue& x) const;
+ FlagValue* New() const; // creates a new one with default value
+ void CopyFrom(const FlagValue& x);
+ int ValueSize() const;
+
+ // Calls the given validate-fn on value_buffer_, and returns
+ // whatever it returns. But first casts validate_fn_proto to a
+ // function that takes our value as an argument (eg void
+ // (*validate_fn)(bool) for a bool flag).
+ bool Validate(const char* flagname, ValidateFnProto validate_fn_proto) const;
+
+ void* value_buffer_; // points to the buffer holding our data
+ int8 type_; // how to interpret value_
+ bool owns_value_; // whether to free value on destruct
+
+ FlagValue(const FlagValue&); // no copying!
+ void operator=(const FlagValue&);
+};
+
+
+// This could be a templated method of FlagValue, but doing so adds to the
+// size of the .o. Since there's no type-safety here anyway, macro is ok.
+#define VALUE_AS(type) *reinterpret_cast<type*>(value_buffer_)
+#define OTHER_VALUE_AS(fv, type) *reinterpret_cast<type*>(fv.value_buffer_)
+#define SET_VALUE_AS(type, value) VALUE_AS(type) = (value)
+
+FlagValue::FlagValue(void* valbuf, const char* type,
+ bool transfer_ownership_of_value)
+ : value_buffer_(valbuf),
+ owns_value_(transfer_ownership_of_value) {
+ for (type_ = 0; type_ <= FV_MAX_INDEX; ++type_) {
+ if (!strcmp(type, TypeName())) {
+ break;
+ }
+ }
+ assert(type_ <= FV_MAX_INDEX); // Unknown typename
+}
+
+FlagValue::~FlagValue() {
+ if (!owns_value_) {
+ return;
+ }
+ switch (type_) {
+ case FV_BOOL: delete reinterpret_cast<bool*>(value_buffer_); break;
+ case FV_INT32: delete reinterpret_cast<int32*>(value_buffer_); break;
+ case FV_INT64: delete reinterpret_cast<int64*>(value_buffer_); break;
+ case FV_UINT64: delete reinterpret_cast<uint64*>(value_buffer_); break;
+ case FV_DOUBLE: delete reinterpret_cast<double*>(value_buffer_); break;
+ case FV_STRING: delete reinterpret_cast<string*>(value_buffer_); break;
+ }
+}
+
+bool FlagValue::ParseFrom(const char* value) {
+ if (type_ == FV_BOOL) {
+ const char* kTrue[] = { "1", "t", "true", "y", "yes" };
+ const char* kFalse[] = { "0", "f", "false", "n", "no" };
+ for (size_t i = 0; i < sizeof(kTrue)/sizeof(*kTrue); ++i) {
+ if (strcasecmp(value, kTrue[i]) == 0) {
+ SET_VALUE_AS(bool, true);
+ return true;
+ } else if (strcasecmp(value, kFalse[i]) == 0) {
+ SET_VALUE_AS(bool, false);
+ return true;
+ }
+ }
+ return false; // didn't match a legal input
+
+ } else if (type_ == FV_STRING) {
+ SET_VALUE_AS(string, value);
+ return true;
+ }
+
+ // OK, it's likely to be numeric, and we'll be using a strtoXXX method.
+ if (value[0] == '\0') // empty-string is only allowed for string type.
+ return false;
+ char* end;
+ // Leading 0x puts us in base 16. But leading 0 does not put us in base 8!
+ // It caused too many bugs when we had that behavior.
+ int base = 10; // by default
+ if (value[0] == '0' && (value[1] == 'x' || value[1] == 'X'))
+ base = 16;
+ errno = 0;
+
+ switch (type_) {
+ case FV_INT32: {
+ const int64 r = strtoint64(value, &end, base);
+ if (errno || end != value + strlen(value)) return false; // bad parse
+ if (static_cast<int32>(r) != r) // worked, but number out of range
+ return false;
+ SET_VALUE_AS(int32, static_cast<int32>(r));
+ return true;
+ }
+ case FV_INT64: {
+ const int64 r = strtoint64(value, &end, base);
+ if (errno || end != value + strlen(value)) return false; // bad parse
+ SET_VALUE_AS(int64, r);
+ return true;
+ }
+ case FV_UINT64: {
+ while (*value == ' ') value++;
+ if (*value == '-') return false; // negative number
+ const uint64 r = strtouint64(value, &end, base);
+ if (errno || end != value + strlen(value)) return false; // bad parse
+ SET_VALUE_AS(uint64, r);
+ return true;
+ }
+ case FV_DOUBLE: {
+ const double r = strtod(value, &end);
+ if (errno || end != value + strlen(value)) return false; // bad parse
+ SET_VALUE_AS(double, r);
+ return true;
+ }
+ default: {
+ assert(false); // unknown type
+ return false;
+ }
+ }
+}
+
+string FlagValue::ToString() const {
+ char intbuf[64]; // enough to hold even the biggest number
+ switch (type_) {
+ case FV_BOOL:
+ return VALUE_AS(bool) ? "true" : "false";
+ case FV_INT32:
+ snprintf(intbuf, sizeof(intbuf), "%"PRId32, VALUE_AS(int32));
+ return intbuf;
+ case FV_INT64:
+ snprintf(intbuf, sizeof(intbuf), "%"PRId64, VALUE_AS(int64));
+ return intbuf;
+ case FV_UINT64:
+ snprintf(intbuf, sizeof(intbuf), "%"PRIu64, VALUE_AS(uint64));
+ return intbuf;
+ case FV_DOUBLE:
+ snprintf(intbuf, sizeof(intbuf), "%.17g", VALUE_AS(double));
+ return intbuf;
+ case FV_STRING:
+ return VALUE_AS(string);
+ default:
+ assert(false);
+ return ""; // unknown type
+ }
+}
+
+bool FlagValue::Validate(const char* flagname,
+ ValidateFnProto validate_fn_proto) const {
+ switch (type_) {
+ case FV_BOOL:
+ return reinterpret_cast<bool (*)(const char*, bool)>(
+ validate_fn_proto)(flagname, VALUE_AS(bool));
+ case FV_INT32:
+ return reinterpret_cast<bool (*)(const char*, int32)>(
+ validate_fn_proto)(flagname, VALUE_AS(int32));
+ case FV_INT64:
+ return reinterpret_cast<bool (*)(const char*, int64)>(
+ validate_fn_proto)(flagname, VALUE_AS(int64));
+ case FV_UINT64:
+ return reinterpret_cast<bool (*)(const char*, uint64)>(
+ validate_fn_proto)(flagname, VALUE_AS(uint64));
+ case FV_DOUBLE:
+ return reinterpret_cast<bool (*)(const char*, double)>(
+ validate_fn_proto)(flagname, VALUE_AS(double));
+ case FV_STRING:
+ return reinterpret_cast<bool (*)(const char*, const string&)>(
+ validate_fn_proto)(flagname, VALUE_AS(string));
+ default:
+ assert(false); // unknown type
+ return false;
+ }
+}
+
+const char* FlagValue::TypeName() const {
+ static const char types[] =
+ "bool\0xx"
+ "int32\0x"
+ "int64\0x"
+ "uint64\0"
+ "double\0"
+ "string";
+ if (type_ > FV_MAX_INDEX) {
+ assert(false);
+ return "";
+ }
+ // Directly indexing the strigns in the 'types' string, each of them
+ // is 7 bytes long.
+ return &types[type_ * 7];
+}
+
+bool FlagValue::Equal(const FlagValue& x) const {
+ if (type_ != x.type_)
+ return false;
+ switch (type_) {
+ case FV_BOOL: return VALUE_AS(bool) == OTHER_VALUE_AS(x, bool);
+ case FV_INT32: return VALUE_AS(int32) == OTHER_VALUE_AS(x, int32);
+ case FV_INT64: return VALUE_AS(int64) == OTHER_VALUE_AS(x, int64);
+ case FV_UINT64: return VALUE_AS(uint64) == OTHER_VALUE_AS(x, uint64);
+ case FV_DOUBLE: return VALUE_AS(double) == OTHER_VALUE_AS(x, double);
+ case FV_STRING: return VALUE_AS(string) == OTHER_VALUE_AS(x, string);
+ default: assert(false); return false; // unknown type
+ }
+}
+
+FlagValue* FlagValue::New() const {
+ const char *type = TypeName();
+ switch (type_) {
+ case FV_BOOL: return new FlagValue(new bool(false), type, true);
+ case FV_INT32: return new FlagValue(new int32(0), type, true);
+ case FV_INT64: return new FlagValue(new int64(0), type, true);
+ case FV_UINT64: return new FlagValue(new uint64(0), type, true);
+ case FV_DOUBLE: return new FlagValue(new double(0.0), type, true);
+ case FV_STRING: return new FlagValue(new string, type, true);
+ default: assert(false); return NULL; // unknown type
+ }
+}
+
+void FlagValue::CopyFrom(const FlagValue& x) {
+ assert(type_ == x.type_);
+ switch (type_) {
+ case FV_BOOL: SET_VALUE_AS(bool, OTHER_VALUE_AS(x, bool)); break;
+ case FV_INT32: SET_VALUE_AS(int32, OTHER_VALUE_AS(x, int32)); break;
+ case FV_INT64: SET_VALUE_AS(int64, OTHER_VALUE_AS(x, int64)); break;
+ case FV_UINT64: SET_VALUE_AS(uint64, OTHER_VALUE_AS(x, uint64)); break;
+ case FV_DOUBLE: SET_VALUE_AS(double, OTHER_VALUE_AS(x, double)); break;
+ case FV_STRING: SET_VALUE_AS(string, OTHER_VALUE_AS(x, string)); break;
+ default: assert(false); // unknown type
+ }
+}
+
+int FlagValue::ValueSize() const {
+ if (type_ > FV_MAX_INDEX) {
+ assert(false); // unknown type
+ return 0;
+ }
+ static const uint8 valuesize[] = {
+ sizeof(bool),
+ sizeof(int32),
+ sizeof(int64),
+ sizeof(uint64),
+ sizeof(double),
+ sizeof(string),
+ };
+ return valuesize[type_];
+}
+
+// --------------------------------------------------------------------
+// CommandLineFlag
+// This represents a single flag, including its name, description,
+// default value, and current value. Mostly this serves as a
+// struct, though it also knows how to register itself.
+// All CommandLineFlags are owned by a (exactly one)
+// FlagRegistry. If you wish to modify fields in this class, you
+// should acquire the FlagRegistry lock for the registry that owns
+// this flag.
+// --------------------------------------------------------------------
+
+class CommandLineFlag {
+ public:
+ // Note: we take over memory-ownership of current_val and default_val.
+ CommandLineFlag(const char* name, const char* help, const char* filename,
+ FlagValue* current_val, FlagValue* default_val);
+ ~CommandLineFlag();
+
+ const char* name() const { return name_; }
+ const char* help() const { return help_; }
+ const char* filename() const { return file_; }
+ const char* CleanFileName() const; // nixes irrelevant prefix such as homedir
+ string current_value() const { return current_->ToString(); }
+ string default_value() const { return defvalue_->ToString(); }
+ const char* type_name() const { return defvalue_->TypeName(); }
+ ValidateFnProto validate_function() const { return validate_fn_proto_; }
+
+ void FillCommandLineFlagInfo(struct CommandLineFlagInfo* result);
+
+ // If validate_fn_proto_ is non-NULL, calls it on value, returns result.
+ bool Validate(const FlagValue& value) const;
+ bool ValidateCurrent() const { return Validate(*current_); }
+
+ private:
+ // for SetFlagLocked() and setting flags_by_ptr_
+ friend class FlagRegistry;
+ friend class GOOGLE_NAMESPACE::FlagSaverImpl; // for cloning the values
+ // set validate_fn
+ friend bool AddFlagValidator(const void*, ValidateFnProto);
+
+ // This copies all the non-const members: modified, processed, defvalue, etc.
+ void CopyFrom(const CommandLineFlag& src);
+
+ void UpdateModifiedBit();
+
+ const char* const name_; // Flag name
+ const char* const help_; // Help message
+ const char* const file_; // Which file did this come from?
+ bool modified_; // Set after default assignment?
+ FlagValue* defvalue_; // Default value for flag
+ FlagValue* current_; // Current value for flag
+ // This is a casted, 'generic' version of validate_fn, which actually
+ // takes a flag-value as an arg (void (*validate_fn)(bool), say).
+ // When we pass this to current_->Validate(), it will cast it back to
+ // the proper type. This may be NULL to mean we have no validate_fn.
+ ValidateFnProto validate_fn_proto_;
+
+ CommandLineFlag(const CommandLineFlag&); // no copying!
+ void operator=(const CommandLineFlag&);
+};
+
+CommandLineFlag::CommandLineFlag(const char* name, const char* help,
+ const char* filename,
+ FlagValue* current_val, FlagValue* default_val)
+ : name_(name), help_(help), file_(filename), modified_(false),
+ defvalue_(default_val), current_(current_val), validate_fn_proto_(NULL) {
+}
+
+CommandLineFlag::~CommandLineFlag() {
+ delete current_;
+ delete defvalue_;
+}
+
+const char* CommandLineFlag::CleanFileName() const {
+ // Compute top-level directory & file that this appears in
+ // search full path backwards.
+ // Stop going backwards at kRootDir; and skip by the first slash.
+ static const char kRootDir[] = ""; // can set this to root directory,
+ // e.g. "myproject"
+
+ if (sizeof(kRootDir)-1 == 0) // no prefix to strip
+ return filename();
+
+ const char* clean_name = filename() + strlen(filename()) - 1;
+ while ( clean_name > filename() ) {
+ if (*clean_name == PATH_SEPARATOR) {
+ if (strncmp(clean_name, kRootDir, sizeof(kRootDir)-1) == 0) {
+ // ".../myproject/base/logging.cc" ==> "base/logging.cc"
+ clean_name += sizeof(kRootDir)-1; // past "/myproject/"
+ break;
+ }
+ }
+ --clean_name;
+ }
+ while ( *clean_name == PATH_SEPARATOR ) ++clean_name; // Skip any slashes
+ return clean_name;
+}
+
+void CommandLineFlag::FillCommandLineFlagInfo(
+ CommandLineFlagInfo* result) {
+ result->name = name();
+ result->type = type_name();
+ result->description = help();
+ result->current_value = current_value();
+ result->default_value = default_value();
+ result->filename = CleanFileName();
+ UpdateModifiedBit();
+ result->is_default = !modified_;
+ result->has_validator_fn = validate_function() != NULL;
+}
+
+void CommandLineFlag::UpdateModifiedBit() {
+ // Update the "modified" bit in case somebody bypassed the
+ // Flags API and wrote directly through the FLAGS_name variable.
+ if (!modified_ && !current_->Equal(*defvalue_)) {
+ modified_ = true;
+ }
+}
+
+void CommandLineFlag::CopyFrom(const CommandLineFlag& src) {
+ // Note we only copy the non-const members; others are fixed at construct time
+ if (modified_ != src.modified_) modified_ = src.modified_;
+ if (!current_->Equal(*src.current_)) current_->CopyFrom(*src.current_);
+ if (!defvalue_->Equal(*src.defvalue_)) defvalue_->CopyFrom(*src.defvalue_);
+ if (validate_fn_proto_ != src.validate_fn_proto_)
+ validate_fn_proto_ = src.validate_fn_proto_;
+}
+
+bool CommandLineFlag::Validate(const FlagValue& value) const {
+ if (validate_function() == NULL)
+ return true;
+ else
+ return value.Validate(name(), validate_function());
+}
+
+
+// --------------------------------------------------------------------
+// FlagRegistry
+// A FlagRegistry singleton object holds all flag objects indexed
+// by their names so that if you know a flag's name (as a C
+// string), you can access or set it. If the function is named
+// FooLocked(), you must own the registry lock before calling
+// the function; otherwise, you should *not* hold the lock, and
+// the function will acquire it itself if needed.
+// --------------------------------------------------------------------
+
+struct StringCmp { // Used by the FlagRegistry map class to compare char*'s
+ bool operator() (const char* s1, const char* s2) const {
+ return (strcmp(s1, s2) < 0);
+ }
+};
+
+class FlagRegistry {
+ public:
+ FlagRegistry() { }
+ ~FlagRegistry() {
+ for (FlagMap::iterator p = flags_.begin(), e = flags_.end(); p != e; ++p) {
+ CommandLineFlag* flag = p->second;
+ delete flag;
+ }
+ }
+
+ static void DeleteGlobalRegistry() {
+ delete global_registry_;
+ global_registry_ = NULL;
+ }
+
+ void Lock() { lock_.Lock(); }
+ void Unlock() { lock_.Unlock(); }
+
+ // Store a flag in this registry. Takes ownership of the given pointer.
+ void RegisterFlag(CommandLineFlag* flag);
+
+ // Returns the flag object for the specified name, or NULL if not found.
+ CommandLineFlag* FindFlagLocked(const char* name);
+
+ // Returns the flag object whose current-value is stored at flag_ptr.
+ // That is, for whom current_->value_buffer_ == flag_ptr
+ CommandLineFlag* FindFlagViaPtrLocked(const void* flag_ptr);
+
+ // A fancier form of FindFlag that works correctly if name is of the
+ // form flag=value. In that case, we set key to point to flag, and
+ // modify v to point to the value (if present), and return the flag
+ // with the given name. If the flag does not exist, returns NULL
+ // and sets error_message.
+ CommandLineFlag* SplitArgumentLocked(const char* argument,
+ string* key, const char** v,
+ string* error_message);
+
+ // Set the value of a flag. If the flag was successfully set to
+ // value, set msg to indicate the new flag-value, and return true.
+ // Otherwise, set msg to indicate the error, leave flag unchanged,
+ // and return false. msg can be NULL.
+ bool SetFlagLocked(CommandLineFlag* flag, const char* value,
+ FlagSettingMode set_mode, string* msg);
+
+ static FlagRegistry* GlobalRegistry(); // returns a singleton registry
+
+ private:
+ friend class GOOGLE_NAMESPACE::FlagSaverImpl; // reads all the flags in order to copy them
+ friend class CommandLineFlagParser; // for ValidateAllFlags
+ friend void GOOGLE_NAMESPACE::GetAllFlags(vector<CommandLineFlagInfo>*);
+
+ // The map from name to flag, for FindFlagLocked().
+ typedef map<const char*, CommandLineFlag*, StringCmp> FlagMap;
+ typedef FlagMap::iterator FlagIterator;
+ typedef FlagMap::const_iterator FlagConstIterator;
+ FlagMap flags_;
+
+ // The map from current-value pointer to flag, fo FindFlagViaPtrLocked().
+ typedef map<const void*, CommandLineFlag*> FlagPtrMap;
+ FlagPtrMap flags_by_ptr_;
+
+ Mutex lock_;
+
+ static FlagRegistry* global_registry_; // a singleton registry
+ static Mutex global_registry_lock_; // guards creation of global_registry_
+
+ // Disallow
+ FlagRegistry(const FlagRegistry&);
+ FlagRegistry& operator=(const FlagRegistry&);
+};
+
+FlagRegistry* FlagRegistry::global_registry_ = NULL;
+Mutex FlagRegistry::global_registry_lock_(Mutex::LINKER_INITIALIZED);
+
+FlagRegistry* FlagRegistry::GlobalRegistry() {
+ MutexLock acquire_lock(&global_registry_lock_);
+ if (!global_registry_) {
+ global_registry_ = new FlagRegistry;
+ }
+ return global_registry_;
+}
+
+void FlagRegistry::RegisterFlag(CommandLineFlag* flag) {
+ Lock();
+ pair<FlagIterator, bool> ins =
+ flags_.insert(pair<const char*, CommandLineFlag*>(flag->name(), flag));
+ if (ins.second == false) { // means the name was already in the map
+ if (strcmp(ins.first->second->filename(), flag->filename()) != 0) {
+ ReportError(DIE, "ERROR: flag '%s' was defined more than once "
+ "(in files '%s' and '%s').\n",
+ flag->name(),
+ ins.first->second->filename(),
+ flag->filename());
+ } else {
+ ReportError(DIE, "ERROR: something wrong with flag '%s' in file '%s'. "
+ "One possibility: file '%s' is being linked both statically "
+ "and dynamically into this executable.\n",
+ flag->name(),
+ flag->filename(), flag->filename());
+ }
+ }
+ // Also add to the flags_by_ptr_ map.
+ flags_by_ptr_[flag->current_->value_buffer_] = flag;
+ Unlock();
+}
+
+CommandLineFlag* FlagRegistry::FindFlagLocked(const char* name) {
+ FlagConstIterator i = flags_.find(name);
+ if (i == flags_.end()) {
+ return NULL;
+ } else {
+ return i->second;
+ }
+}
+
+CommandLineFlag* FlagRegistry::FindFlagViaPtrLocked(const void* flag_ptr) {
+ FlagPtrMap::const_iterator i = flags_by_ptr_.find(flag_ptr);
+ if (i == flags_by_ptr_.end()) {
+ return NULL;
+ } else {
+ return i->second;
+ }
+}
+
+CommandLineFlag* FlagRegistry::SplitArgumentLocked(const char* arg,
+ string* key,
+ const char** v,
+ string* error_message) {
+ // Find the flag object for this option
+ const char* flag_name;
+ const char* value = strchr(arg, '=');
+ if (value == NULL) {
+ key->assign(arg);
+ *v = NULL;
+ } else {
+ // Strip out the "=value" portion from arg
+ key->assign(arg, value-arg);
+ *v = ++value; // advance past the '='
+ }
+ flag_name = key->c_str();
+
+ CommandLineFlag* flag = FindFlagLocked(flag_name);
+
+ if (flag == NULL) {
+ // If we can't find the flag-name, then we should return an error.
+ // The one exception is if 1) the flag-name is 'nox', 2) there
+ // exists a flag named 'x', and 3) 'x' is a boolean flag.
+ // In that case, we want to return flag 'x'.
+ if (!(flag_name[0] == 'n' && flag_name[1] == 'o')) {
+ // flag-name is not 'nox', so we're not in the exception case.
+ *error_message = (string(kError) +
+ "unknown command line flag '" + *key + "'\n");
+ return NULL;
+ }
+ flag = FindFlagLocked(flag_name+2);
+ if (flag == NULL) {
+ // No flag named 'x' exists, so we're not in the exception case.
+ *error_message = (string(kError) +
+ "unknown command line flag '" + *key + "'\n");
+ return NULL;
+ }
+ if (strcmp(flag->type_name(), "bool") != 0) {
+ // 'x' exists but is not boolean, so we're not in the exception case.
+ *error_message = (string(kError) +
+ "boolean value (" + *key + ") specified for " +
+ flag->type_name() + " command line flag\n");
+ return NULL;
+ }
+ // We're in the exception case!
+ // Make up a fake value to replace the "no" we stripped out
+ key->assign(flag_name+2); // the name without the "no"
+ *v = "0";
+ }
+
+ // Assign a value if this is a boolean flag
+ if (*v == NULL && strcmp(flag->type_name(), "bool") == 0) {
+ *v = "1"; // the --nox case was already handled, so this is the --x case
+ }
+
+ return flag;
+}
+
+bool TryParseLocked(const CommandLineFlag* flag, FlagValue* flag_value,
+ const char* value, string* msg) {
+ // Use tenative_value, not flag_value, until we know value is valid.
+ FlagValue* tentative_value = flag_value->New();
+ if (!tentative_value->ParseFrom(value)) {
+ if (msg) {
+ *msg += (string(kError) + "illegal value '" + value +
+ + "' specified for " + flag->type_name() + " flag '"
+ + flag->name() + "'\n");
+ }
+ delete tentative_value;
+ return false;
+ } else if (!flag->Validate(*tentative_value)) {
+ if (msg) {
+ *msg += (string(kError) + "failed validation of new value "
+ + "'" + tentative_value->ToString() + "' for flag '" +
+ + flag->name() + "'\n");
+ }
+ delete tentative_value;
+ return false;
+ } else {
+ flag_value->CopyFrom(*tentative_value);
+ if (msg) {
+ *msg += (string(flag->name()) + " set to " + flag_value->ToString()
+ + "\n");
+ }
+ delete tentative_value;
+ return true;
+ }
+}
+
+bool FlagRegistry::SetFlagLocked(CommandLineFlag* flag,
+ const char* value,
+ FlagSettingMode set_mode,
+ string* msg) {
+ flag->UpdateModifiedBit();
+ switch (set_mode) {
+ case SET_FLAGS_VALUE: {
+ // set or modify the flag's value
+ if (!TryParseLocked(flag, flag->current_, value, msg))
+ return false;
+ flag->modified_ = true;
+ break;
+ }
+ case SET_FLAG_IF_DEFAULT: {
+ // set the flag's value, but only if it hasn't been set by someone else
+ if (!flag->modified_) {
+ if (!TryParseLocked(flag, flag->current_, value, msg))
+ return false;
+ flag->modified_ = true;
+ } else {
+ *msg = string(flag->name()) + " set to " + flag->current_value();
+ }
+ break;
+ }
+ case SET_FLAGS_DEFAULT: {
+ // modify the flag's default-value
+ if (!TryParseLocked(flag, flag->defvalue_, value, msg))
+ return false;
+ if (!flag->modified_) {
+ // Need to set both defvalue *and* current, in this case
+ TryParseLocked(flag, flag->current_, value, NULL);
+ }
+ break;
+ }
+ default: {
+ // unknown set_mode
+ assert(false);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+class FlagRegistryLock {
+ public:
+ explicit FlagRegistryLock(FlagRegistry* fr) : fr_(fr) { fr_->Lock(); }
+ ~FlagRegistryLock() { fr_->Unlock(); }
+ private:
+ FlagRegistry *const fr_;
+};
+
+// --------------------------------------------------------------------
+// CommandLineFlagParser
+// Parsing is done in two stages. In the first, we go through
+// argv. For every flag-like arg we can make sense of, we parse
+// it and set the appropriate FLAGS_* variable. For every flag-
+// like arg we can't make sense of, we store it in a vector,
+// along with an explanation of the trouble. In stage 2, we
+// handle the 'reporting' flags like --help and --mpm_version.
+// (This is via a call to HandleCommandLineHelpFlags(), in
+// gflags_reporting.cc.)
+// An optional stage 3 prints out the error messages.
+// This is a bit of a simplification. For instance, --flagfile
+// is handled as soon as it's seen in stage 1, not in stage 2.
+// --------------------------------------------------------------------
+
+class CommandLineFlagParser {
+ public:
+ // The argument is the flag-registry to register the parsed flags in
+ explicit CommandLineFlagParser(FlagRegistry* reg) : registry_(reg) {}
+ ~CommandLineFlagParser() {}
+
+ // Stage 1: Every time this is called, it reads all flags in argv.
+ // However, it ignores all flags that have been successfully set
+ // before. Typically this is only called once, so this 'reparsing'
+ // behavior isn't important. It can be useful when trying to
+ // reparse after loading a dll, though.
+ uint32 ParseNewCommandLineFlags(int* argc, char*** argv, bool remove_flags);
+
+ // Stage 2: print reporting info and exit, if requested.
+ // In gflags_reporting.cc:HandleCommandLineHelpFlags().
+
+ // Stage 3: validate all the commandline flags that have validators
+ // registered.
+ void ValidateAllFlags();
+
+ // Stage 4: report any errors and return true if any were found.
+ bool ReportErrors();
+
+ // Set a particular command line option. "newval" is a string
+ // describing the new value that the option has been set to. If
+ // option_name does not specify a valid option name, or value is not
+ // a valid value for option_name, newval is empty. Does recursive
+ // processing for --flagfile and --fromenv. Returns the new value
+ // if everything went ok, or empty-string if not. (Actually, the
+ // return-string could hold many flag/value pairs due to --flagfile.)
+ // NB: Must have called registry_->Lock() before calling this function.
+ string ProcessSingleOptionLocked(CommandLineFlag* flag,
+ const char* value,
+ FlagSettingMode set_mode);
+
+ // Set a whole batch of command line options as specified by contentdata,
+ // which is in flagfile format (and probably has been read from a flagfile).
+ // Returns the new value if everything went ok, or empty-string if
+ // not. (Actually, the return-string could hold many flag/value
+ // pairs due to --flagfile.)
+ // NB: Must have called registry_->Lock() before calling this function.
+ string ProcessOptionsFromStringLocked(const string& contentdata,
+ FlagSettingMode set_mode);
+
+ // These are the 'recursive' flags, defined at the top of this file.
+ // Whenever we see these flags on the commandline, we must take action.
+ // These are called by ProcessSingleOptionLocked and, similarly, return
+ // new values if everything went ok, or the empty-string if not.
+ string ProcessFlagfileLocked(const string& flagval, FlagSettingMode set_mode);
+ // diff fromenv/tryfromenv
+ string ProcessFromenvLocked(const string& flagval, FlagSettingMode set_mode,
+ bool errors_are_fatal);
+
+ private:
+ FlagRegistry* const registry_;
+ map<string, string> error_flags_; // map from name to error message
+ // This could be a set<string>, but we reuse the map to minimize the .o size
+ map<string, string> undefined_names_; // --[flag] name was not registered
+};
+
+
+// Parse a list of (comma-separated) flags.
+static void ParseFlagList(const char* value, vector<string>* flags) {
+ for (const char *p = value; p && *p; value = p) {
+ p = strchr(value, ',');
+ int len;
+ if (p) {
+ len = static_cast<int>(p - value);
+ p++;
+ } else {
+ len = static_cast<int>(strlen(value));
+ }
+
+ if (len == 0)
+ ReportError(DIE, "ERROR: empty flaglist entry\n");
+ if (value[0] == '-')
+ ReportError(DIE, "ERROR: flag \"%*s\" begins with '-'\n", len, value);
+
+ flags->push_back(string(value, len));
+ }
+}
+
+// Snarf an entire file into a C++ string. This is just so that we
+// can do all the I/O in one place and not worry about it everywhere.
+// Plus, it's convenient to have the whole file contents at hand.
+// Adds a newline at the end of the file.
+#define PFATAL(s) do { perror(s); commandlineflags_exitfunc(1); } while (0)
+
+static string ReadFileIntoString(const char* filename) {
+ const int kBufSize = 8092;
+ char buffer[kBufSize];
+ string s;
+ FILE* fp = fopen(filename, "r");
+ if (!fp) PFATAL(filename);
+ size_t n;
+ while ( (n=fread(buffer, 1, kBufSize, fp)) > 0 ) {
+ if (ferror(fp)) PFATAL(filename);
+ s.append(buffer, n);
+ }
+ fclose(fp);
+ return s;
+}
+
+uint32 CommandLineFlagParser::ParseNewCommandLineFlags(int* argc, char*** argv,
+ bool remove_flags) {
+ const char *program_name = strrchr((*argv)[0], PATH_SEPARATOR); // nix path
+ program_name = (program_name == NULL ? (*argv)[0] : program_name+1);
+
+ int first_nonopt = *argc; // for non-options moved to the end
+
+ registry_->Lock();
+ for (int i = 1; i < first_nonopt; i++) {
+ char* arg = (*argv)[i];
+
+ // Like getopt(), we permute non-option flags to be at the end.
+ if (arg[0] != '-' || // must be a program argument
+ (arg[0] == '-' && arg[1] == '\0')) { // "-" is an argument, not a flag
+ memmove((*argv) + i, (*argv) + i+1, (*argc - (i+1)) * sizeof((*argv)[i]));
+ (*argv)[*argc-1] = arg; // we go last
+ first_nonopt--; // we've been pushed onto the stack
+ i--; // to undo the i++ in the loop
+ continue;
+ }
+
+ if (arg[0] == '-') arg++; // allow leading '-'
+ if (arg[0] == '-') arg++; // or leading '--'
+
+ // -- alone means what it does for GNU: stop options parsing
+ if (*arg == '\0') {
+ first_nonopt = i+1;
+ break;
+ }
+
+ // Find the flag object for this option
+ string key;
+ const char* value;
+ string error_message;
+ CommandLineFlag* flag = registry_->SplitArgumentLocked(arg, &key, &value,
+ &error_message);
+ if (flag == NULL) {
+ undefined_names_[key] = ""; // value isn't actually used
+ error_flags_[key] = error_message;
+ continue;
+ }
+
+ if (value == NULL) {
+ // Boolean options are always assigned a value by SplitArgumentLocked()
+ assert(strcmp(flag->type_name(), "bool") != 0);
+ if (i+1 >= first_nonopt) {
+ // This flag needs a value, but there is nothing available
+ error_flags_[key] = (string(kError) + "flag '" + (*argv)[i] + "'"
+ + " is missing its argument");
+ if (flag->help() && flag->help()[0] > '\001') {
+ // Be useful in case we have a non-stripped description.
+ error_flags_[key] += string("; flag description: ") + flag->help();
+ }
+ error_flags_[key] += "\n";
+ break; // we treat this as an unrecoverable error
+ } else {
+ value = (*argv)[++i]; // read next arg for value
+
+ // Heuristic to detect the case where someone treats a string arg
+ // like a bool:
+ // --my_string_var --foo=bar
+ // We look for a flag of string type, whose value begins with a
+ // dash, and where the flag-name and value are separated by a
+ // space rather than an '='.
+ // To avoid false positives, we also require the word "true"
+ // or "false" in the help string. Without this, a valid usage
+ // "-lat -30.5" would trigger the warning. The common cases we
+ // want to solve talk about true and false as values.
+ if (value[0] == '-'
+ && strcmp(flag->type_name(), "string") == 0
+ && (strstr(flag->help(), "true")
+ || strstr(flag->help(), "false"))) {
+ fprintf(stderr, "Did you really mean to set flag '%s'"
+ " to the value '%s'?\n",
+ flag->name(), value);
+ }
+ }
+ }
+
+ // TODO(csilvers): only set a flag if we hadn't set it before here
+ ProcessSingleOptionLocked(flag, value, SET_FLAGS_VALUE);
+ }
+ registry_->Unlock();
+
+ if (remove_flags) { // Fix up argc and argv by removing command line flags
+ (*argv)[first_nonopt-1] = (*argv)[0];
+ (*argv) += (first_nonopt-1);
+ (*argc) -= (first_nonopt-1);
+ first_nonopt = 1; // because we still don't count argv[0]
+ }
+
+ logging_is_probably_set_up = true; // because we've parsed --logdir, etc.
+
+ return first_nonopt;
+}
+
+string CommandLineFlagParser::ProcessFlagfileLocked(const string& flagval,
+ FlagSettingMode set_mode) {
+ if (flagval.empty())
+ return "";
+
+ string msg;
+ vector<string> filename_list;
+ ParseFlagList(flagval.c_str(), &filename_list); // take a list of filenames
+ for (size_t i = 0; i < filename_list.size(); ++i) {
+ const char* file = filename_list[i].c_str();
+ msg += ProcessOptionsFromStringLocked(ReadFileIntoString(file), set_mode);
+ }
+ return msg;
+}
+
+string CommandLineFlagParser::ProcessFromenvLocked(const string& flagval,
+ FlagSettingMode set_mode,
+ bool errors_are_fatal) {
+ if (flagval.empty())
+ return "";
+
+ string msg;
+ vector<string> flaglist;
+ ParseFlagList(flagval.c_str(), &flaglist);
+
+ for (size_t i = 0; i < flaglist.size(); ++i) {
+ const char* flagname = flaglist[i].c_str();
+ CommandLineFlag* flag = registry_->FindFlagLocked(flagname);
+ if (flag == NULL) {
+ error_flags_[flagname] = (string(kError) + "unknown command line flag"
+ + " '" + flagname + "'"
+ + " (via --fromenv or --tryfromenv)\n");
+ undefined_names_[flagname] = "";
+ continue;
+ }
+
+ const string envname = string("FLAGS_") + string(flagname);
+ const char* envval = getenv(envname.c_str());
+ if (!envval) {
+ if (errors_are_fatal) {
+ error_flags_[flagname] = (string(kError) + envname +
+ " not found in environment\n");
+ }
+ continue;
+ }
+
+ // Avoid infinite recursion.
+ if ((strcmp(envval, "fromenv") == 0) ||
+ (strcmp(envval, "tryfromenv") == 0)) {
+ error_flags_[flagname] = (string(kError) + "infinite recursion on " +
+ "environment flag '" + envval + "'\n");
+ continue;
+ }
+
+ msg += ProcessSingleOptionLocked(flag, envval, set_mode);
+ }
+ return msg;
+}
+
+string CommandLineFlagParser::ProcessSingleOptionLocked(
+ CommandLineFlag* flag, const char* value, FlagSettingMode set_mode) {
+ string msg;
+ if (value && !registry_->SetFlagLocked(flag, value, set_mode, &msg)) {
+ error_flags_[flag->name()] = msg;
+ return "";
+ }
+
+ // The recursive flags, --flagfile and --fromenv and --tryfromenv,
+ // must be dealt with as soon as they're seen. They will emit
+ // messages of their own.
+ if (strcmp(flag->name(), "flagfile") == 0) {
+ msg += ProcessFlagfileLocked(FLAGS_flagfile, set_mode);
+
+ } else if (strcmp(flag->name(), "fromenv") == 0) {
+ // last arg indicates envval-not-found is fatal (unlike in --tryfromenv)
+ msg += ProcessFromenvLocked(FLAGS_fromenv, set_mode, true);
+
+ } else if (strcmp(flag->name(), "tryfromenv") == 0) {
+ msg += ProcessFromenvLocked(FLAGS_tryfromenv, set_mode, false);
+ }
+
+ return msg;
+}
+
+void CommandLineFlagParser::ValidateAllFlags() {
+ FlagRegistryLock frl(registry_);
+ for (FlagRegistry::FlagConstIterator i = registry_->flags_.begin();
+ i != registry_->flags_.end(); ++i) {
+ if (!i->second->ValidateCurrent()) {
+ // only set a message if one isn't already there. (If there's
+ // an error message, our job is done, even if it's not exactly
+ // the same error.)
+ if (error_flags_[i->second->name()].empty())
+ error_flags_[i->second->name()] =
+ string(kError) + "--" + i->second->name() +
+ " must be set on the commandline"
+ " (default value fails validation)\n";
+ }
+ }
+}
+
+bool CommandLineFlagParser::ReportErrors() {
+ // error_flags_ indicates errors we saw while parsing.
+ // But we ignore undefined-names if ok'ed by --undef_ok
+ if (!FLAGS_undefok.empty()) {
+ vector<string> flaglist;
+ ParseFlagList(FLAGS_undefok.c_str(), &flaglist);
+ for (size_t i = 0; i < flaglist.size(); ++i) {
+ // We also deal with --no<flag>, in case the flagname was boolean
+ const string no_version = string("no") + flaglist[i];
+ if (undefined_names_.find(flaglist[i]) != undefined_names_.end()) {
+ error_flags_[flaglist[i]] = ""; // clear the error message
+ } else if (undefined_names_.find(no_version) != undefined_names_.end()) {
+ error_flags_[no_version] = "";
+ }
+ }
+ }
+ // Likewise, if they decided to allow reparsing, all undefined-names
+ // are ok; we just silently ignore them now, and hope that a future
+ // parse will pick them up somehow.
+ if (allow_command_line_reparsing) {
+ for (map<string, string>::const_iterator it = undefined_names_.begin();
+ it != undefined_names_.end(); ++it)
+ error_flags_[it->first] = ""; // clear the error message
+ }
+
+ bool found_error = false;
+ string error_message;
+ for (map<string, string>::const_iterator it = error_flags_.begin();
+ it != error_flags_.end(); ++it) {
+ if (!it->second.empty()) {
+ error_message.append(it->second.data(), it->second.size());
+ found_error = true;
+ }
+ }
+ if (found_error)
+ ReportError(DO_NOT_DIE, "%s", error_message.c_str());
+ return found_error;
+}
+
+string CommandLineFlagParser::ProcessOptionsFromStringLocked(
+ const string& contentdata, FlagSettingMode set_mode) {
+ string retval;
+ const char* flagfile_contents = contentdata.c_str();
+ bool flags_are_relevant = true; // set to false when filenames don't match
+ bool in_filename_section = false;
+
+ const char* line_end = flagfile_contents;
+ // We read this file a line at a time.
+ for (; line_end; flagfile_contents = line_end + 1) {
+ while (*flagfile_contents && isspace(*flagfile_contents))
+ ++flagfile_contents;
+ line_end = strchr(flagfile_contents, '\n');
+ size_t len = line_end ? static_cast<size_t>(line_end - flagfile_contents)
+ : strlen(flagfile_contents);
+ string line(flagfile_contents, len);
+
+ // Each line can be one of four things:
+ // 1) A comment line -- we skip it
+ // 2) An empty line -- we skip it
+ // 3) A list of filenames -- starts a new filenames+flags section
+ // 4) A --flag=value line -- apply if previous filenames match
+ if (line.empty() || line[0] == '#') {
+ // comment or empty line; just ignore
+
+ } else if (line[0] == '-') { // flag
+ in_filename_section = false; // instead, it was a flag-line
+ if (!flags_are_relevant) // skip this flag; applies to someone else
+ continue;
+
+ const char* name_and_val = line.c_str() + 1; // skip the leading -
+ if (*name_and_val == '-')
+ name_and_val++; // skip second - too
+ string key;
+ const char* value;
+ string error_message;
+ CommandLineFlag* flag = registry_->SplitArgumentLocked(name_and_val,
+ &key, &value,
+ &error_message);
+ // By API, errors parsing flagfile lines are silently ignored.
+ if (flag == NULL) {
+ // "WARNING: flagname '" + key + "' not found\n"
+ } else if (value == NULL) {
+ // "WARNING: flagname '" + key + "' missing a value\n"
+ } else {
+ retval += ProcessSingleOptionLocked(flag, value, set_mode);
+ }
+
+ } else { // a filename!
+ if (!in_filename_section) { // start over: assume filenames don't match
+ in_filename_section = true;
+ flags_are_relevant = false;
+ }
+
+ // Split the line up at spaces into glob-patterns
+ const char* space = line.c_str(); // just has to be non-NULL
+ for (const char* word = line.c_str(); *space; word = space+1) {
+ if (flags_are_relevant) // we can stop as soon as we match
+ break;
+ space = strchr(word, ' ');
+ if (space == NULL)
+ space = word + strlen(word);
+ const string glob(word, space - word);
+ // We try matching both against the full argv0 and basename(argv0)
+#ifdef HAVE_FNMATCH_H
+ if (fnmatch(glob.c_str(),
+ ProgramInvocationName(),
+ FNM_PATHNAME) == 0 ||
+ fnmatch(glob.c_str(),
+ ProgramInvocationShortName(),
+ FNM_PATHNAME) == 0) {
+#else // !HAVE_FNMATCH_H
+ if ((glob == ProgramInvocationName()) ||
+ (glob == ProgramInvocationShortName())) {
+#endif // HAVE_FNMATCH_H
+ flags_are_relevant = true;
+ }
+ }
+ }
+ }
+ return retval;
+}
+
+// --------------------------------------------------------------------
+// GetFromEnv()
+// AddFlagValidator()
+// These are helper functions for routines like BoolFromEnv() and
+// RegisterFlagValidator, defined below. They're defined here so
+// they can live in the unnamed namespace (which makes friendship
+// declarations for these classes possible).
+// --------------------------------------------------------------------
+
+template<typename T>
+T GetFromEnv(const char *varname, const char* type, T dflt) {
+ const char* const valstr = getenv(varname);
+ if (!valstr)
+ return dflt;
+ FlagValue ifv(new T, type, true);
+ if (!ifv.ParseFrom(valstr))
+ ReportError(DIE, "ERROR: error parsing env variable '%s' with value '%s'\n",
+ varname, valstr);
+ return OTHER_VALUE_AS(ifv, T);
+}
+
+bool AddFlagValidator(const void* flag_ptr, ValidateFnProto validate_fn_proto) {
+ // We want a lock around this routine, in case two threads try to
+ // add a validator (hopefully the same one!) at once. We could use
+ // our own thread, but we need to loook at the registry anyway, so
+ // we just steal that one.
+ FlagRegistry* const registry = FlagRegistry::GlobalRegistry();
+ FlagRegistryLock frl(registry);
+ // First, find the flag whose current-flag storage is 'flag'.
+ // This is the CommandLineFlag whose current_->value_buffer_ == flag
+ CommandLineFlag* flag = registry->FindFlagViaPtrLocked(flag_ptr);
+ if (!flag) {
+ // WARNING << "Ignoring RegisterValidateFunction() for flag pointer "
+ // << flag_ptr << ": no flag found at that address";
+ return false;
+ } else if (validate_fn_proto == flag->validate_function()) {
+ return true; // ok to register the same function over and over again
+ } else if (validate_fn_proto != NULL && flag->validate_function() != NULL) {
+ // WARNING << "Ignoring RegisterValidateFunction() for flag '"
+ // << flag->name() << "': validate-fn already registered";
+ return false;
+ } else {
+ flag->validate_fn_proto_ = validate_fn_proto;
+ return true;
+ }
+}
+
+} // end unnamed namespaces
+
+
+// Now define the functions that are exported via the .h file
+
+// --------------------------------------------------------------------
+// FlagRegisterer
+// This class exists merely to have a global constructor (the
+// kind that runs before main(), that goes an initializes each
+// flag that's been declared. Note that it's very important we
+// don't have a destructor that deletes flag_, because that would
+// cause us to delete current_storage/defvalue_storage as well,
+// which can cause a crash if anything tries to access the flag
+// values in a global destructor.
+// --------------------------------------------------------------------
+
+FlagRegisterer::FlagRegisterer(const char* name, const char* type,
+ const char* help, const char* filename,
+ void* current_storage, void* defvalue_storage) {
+ if (help == NULL)
+ help = "";
+ // FlagValue expects the type-name to not include any namespace
+ // components, so we get rid of those, if any.
+ if (strchr(type, ':'))
+ type = strrchr(type, ':') + 1;
+ FlagValue* current = new FlagValue(current_storage, type, false);
+ FlagValue* defvalue = new FlagValue(defvalue_storage, type, false);
+ // Importantly, flag_ will never be deleted, so storage is always good.
+ CommandLineFlag* flag = new CommandLineFlag(name, help, filename,
+ current, defvalue);
+ FlagRegistry::GlobalRegistry()->RegisterFlag(flag); // default registry
+}
+
+// --------------------------------------------------------------------
+// GetAllFlags()
+// The main way the FlagRegistry class exposes its data. This
+// returns, as strings, all the info about all the flags in
+// the main registry, sorted first by filename they are defined
+// in, and then by flagname.
+// --------------------------------------------------------------------
+
+struct FilenameFlagnameCmp {
+ bool operator()(const CommandLineFlagInfo& a,
+ const CommandLineFlagInfo& b) const {
+ int cmp = strcmp(a.filename.c_str(), b.filename.c_str());
+ if (cmp == 0)
+ cmp = strcmp(a.name.c_str(), b.name.c_str()); // secondary sort key
+ return cmp < 0;
+ }
+};
+
+void GetAllFlags(vector<CommandLineFlagInfo>* OUTPUT) {
+ FlagRegistry* const registry = FlagRegistry::GlobalRegistry();
+ registry->Lock();
+ for (FlagRegistry::FlagConstIterator i = registry->flags_.begin();
+ i != registry->flags_.end(); ++i) {
+ CommandLineFlagInfo fi;
+ i->second->FillCommandLineFlagInfo(&fi);
+ OUTPUT->push_back(fi);
+ }
+ registry->Unlock();
+ // Now sort the flags, first by filename they occur in, then alphabetically
+ sort(OUTPUT->begin(), OUTPUT->end(), FilenameFlagnameCmp());
+}
+
+// --------------------------------------------------------------------
+// SetArgv()
+// GetArgvs()
+// GetArgv()
+// GetArgv0()
+// ProgramInvocationName()
+// ProgramInvocationShortName()
+// SetUsageMessage()
+// ProgramUsage()
+// Functions to set and get argv. Typically the setter is called
+// by ParseCommandLineFlags. Also can get the ProgramUsage string,
+// set by SetUsageMessage.
+// --------------------------------------------------------------------
+
+// These values are not protected by a Mutex because they are normally
+// set only once during program startup.
+static const char* argv0 = "UNKNOWN"; // just the program name
+static const char* cmdline = ""; // the entire command-line
+static vector<string> argvs;
+static uint32 argv_sum = 0;
+static const char* program_usage = NULL;
+
+void SetArgv(int argc, const char** argv) {
+ static bool called_set_argv = false;
+ if (called_set_argv) // we already have an argv for you
+ return;
+
+ called_set_argv = true;
+
+ assert(argc > 0); // every program has at least a progname
+ argv0 = strdup(argv[0]); // small memory leak, but fn only called once
+ assert(argv0);
+
+ string cmdline_string; // easier than doing strcats
+ for (int i = 0; i < argc; i++) {
+ if (i != 0) {
+ cmdline_string += " ";
+ }
+ cmdline_string += argv[i];
+ argvs.push_back(argv[i]);
+ }
+ cmdline = strdup(cmdline_string.c_str()); // another small memory leak
+ assert(cmdline);
+
+ // Compute a simple sum of all the chars in argv
+ for (const char* c = cmdline; *c; c++)
+ argv_sum += *c;
+}
+
+const vector<string>& GetArgvs() { return argvs; }
+const char* GetArgv() { return cmdline; }
+const char* GetArgv0() { return argv0; }
+uint32 GetArgvSum() { return argv_sum; }
+const char* ProgramInvocationName() { // like the GNU libc fn
+ return GetArgv0();
+}
+const char* ProgramInvocationShortName() { // like the GNU libc fn
+ const char* slash = strrchr(argv0, '/');
+#ifdef OS_WINDOWS
+ if (!slash) slash = strrchr(argv0, '\\');
+#endif
+ return slash ? slash + 1 : argv0;
+}
+
+void SetUsageMessage(const string& usage) {
+ if (program_usage != NULL)
+ ReportError(DIE, "ERROR: SetUsageMessage() called twice\n");
+ program_usage = strdup(usage.c_str()); // small memory leak
+}
+
+const char* ProgramUsage() {
+ if (program_usage) {
+ return program_usage;
+ }
+ return "Warning: SetUsageMessage() never called";
+}
+
+// --------------------------------------------------------------------
+// GetCommandLineOption()
+// GetCommandLineFlagInfo()
+// GetCommandLineFlagInfoOrDie()
+// SetCommandLineOption()
+// SetCommandLineOptionWithMode()
+// The programmatic way to set a flag's value, using a string
+// for its name rather than the variable itself (that is,
+// SetCommandLineOption("foo", x) rather than FLAGS_foo = x).
+// There's also a bit more flexibility here due to the various
+// set-modes, but typically these are used when you only have
+// that flag's name as a string, perhaps at runtime.
+// All of these work on the default, global registry.
+// For GetCommandLineOption, return false if no such flag
+// is known, true otherwise. We clear "value" if a suitable
+// flag is found.
+// --------------------------------------------------------------------
+
+
+bool GetCommandLineOption(const char* name, string* value) {
+ if (NULL == name)
+ return false;
+ assert(value);
+
+ FlagRegistry* const registry = FlagRegistry::GlobalRegistry();
+ FlagRegistryLock frl(registry);
+ CommandLineFlag* flag = registry->FindFlagLocked(name);
+ if (flag == NULL) {
+ return false;
+ } else {
+ *value = flag->current_value();
+ return true;
+ }
+}
+
+bool GetCommandLineFlagInfo(const char* name, CommandLineFlagInfo* OUTPUT) {
+ if (NULL == name) return false;
+ FlagRegistry* const registry = FlagRegistry::GlobalRegistry();
+ FlagRegistryLock frl(registry);
+ CommandLineFlag* flag = registry->FindFlagLocked(name);
+ if (flag == NULL) {
+ return false;
+ } else {
+ assert(OUTPUT);
+ flag->FillCommandLineFlagInfo(OUTPUT);
+ return true;
+ }
+}
+
+CommandLineFlagInfo GetCommandLineFlagInfoOrDie(const char* name) {
+ CommandLineFlagInfo info;
+ if (!GetCommandLineFlagInfo(name, &info)) {
+ fprintf(stderr, "FATAL ERROR: flag name '%s' doesn't exist\n", name);
+ commandlineflags_exitfunc(1); // almost certainly exit()
+ }
+ return info;
+}
+
+string SetCommandLineOptionWithMode(const char* name, const char* value,
+ FlagSettingMode set_mode) {
+ string result;
+ FlagRegistry* const registry = FlagRegistry::GlobalRegistry();
+ FlagRegistryLock frl(registry);
+ CommandLineFlag* flag = registry->FindFlagLocked(name);
+ if (flag) {
+ CommandLineFlagParser parser(registry);
+ result = parser.ProcessSingleOptionLocked(flag, value, set_mode);
+ if (!result.empty()) { // in the error case, we've already logged
+ // You could consider logging this change, if you wanted to know it:
+ //fprintf(stderr, "%sFLAGS_%s\n",
+ // (set_mode == SET_FLAGS_DEFAULT ? "default value of " : ""),
+ // result);
+ }
+ }
+ // The API of this function is that we return empty string on error
+ return result;
+}
+
+string SetCommandLineOption(const char* name, const char* value) {
+ return SetCommandLineOptionWithMode(name, value, SET_FLAGS_VALUE);
+}
+
+// --------------------------------------------------------------------
+// FlagSaver
+// FlagSaverImpl
+// This class stores the states of all flags at construct time,
+// and restores all flags to that state at destruct time.
+// Its major implementation challenge is that it never modifies
+// pointers in the 'main' registry, so global FLAG_* vars always
+// point to the right place.
+// --------------------------------------------------------------------
+
+class FlagSaverImpl {
+ public:
+ // Constructs an empty FlagSaverImpl object.
+ explicit FlagSaverImpl(FlagRegistry* main_registry)
+ : main_registry_(main_registry) { }
+ ~FlagSaverImpl() {
+ // reclaim memory from each of our CommandLineFlags
+ vector<CommandLineFlag*>::const_iterator it;
+ for (it = backup_registry_.begin(); it != backup_registry_.end(); ++it)
+ delete *it;
+ }
+
+ // Saves the flag states from the flag registry into this object.
+ // It's an error to call this more than once.
+ // Must be called when the registry mutex is not held.
+ void SaveFromRegistry() {
+ FlagRegistryLock frl(main_registry_);
+ assert(backup_registry_.empty()); // call only once!
+ for (FlagRegistry::FlagConstIterator it = main_registry_->flags_.begin();
+ it != main_registry_->flags_.end();
+ ++it) {
+ const CommandLineFlag* main = it->second;
+ // Sets up all the const variables in backup correctly
+ CommandLineFlag* backup = new CommandLineFlag(
+ main->name(), main->help(), main->filename(),
+ main->current_->New(), main->defvalue_->New());
+ // Sets up all the non-const variables in backup correctly
+ backup->CopyFrom(*main);
+ backup_registry_.push_back(backup); // add it to a convenient list
+ }
+ }
+
+ // Restores the saved flag states into the flag registry. We
+ // assume no flags were added or deleted from the registry since
+ // the SaveFromRegistry; if they were, that's trouble! Must be
+ // called when the registry mutex is not held.
+ void RestoreToRegistry() {
+ FlagRegistryLock frl(main_registry_);
+ vector<CommandLineFlag*>::const_iterator it;
+ for (it = backup_registry_.begin(); it != backup_registry_.end(); ++it) {
+ CommandLineFlag* main = main_registry_->FindFlagLocked((*it)->name());
+ if (main != NULL) { // if NULL, flag got deleted from registry(!)
+ main->CopyFrom(**it);
+ }
+ }
+ }
+
+ private:
+ FlagRegistry* const main_registry_;
+ vector<CommandLineFlag*> backup_registry_;
+
+ FlagSaverImpl(const FlagSaverImpl&); // no copying!
+ void operator=(const FlagSaverImpl&);
+};
+
+FlagSaver::FlagSaver()
+ : impl_(new FlagSaverImpl(FlagRegistry::GlobalRegistry())) {
+ impl_->SaveFromRegistry();
+}
+
+FlagSaver::~FlagSaver() {
+ impl_->RestoreToRegistry();
+ delete impl_;
+}
+
+
+// --------------------------------------------------------------------
+// CommandlineFlagsIntoString()
+// ReadFlagsFromString()
+// AppendFlagsIntoFile()
+// ReadFromFlagsFile()
+// These are mostly-deprecated routines that stick the
+// commandline flags into a file/string and read them back
+// out again. I can see a use for CommandlineFlagsIntoString,
+// for creating a flagfile, but the rest don't seem that useful
+// -- some, I think, are a poor-man's attempt at FlagSaver --
+// and are included only until we can delete them from callers.
+// Note they don't save --flagfile flags (though they do save
+// the result of having called the flagfile, of course).
+// --------------------------------------------------------------------
+
+static string TheseCommandlineFlagsIntoString(
+ const vector<CommandLineFlagInfo>& flags) {
+ vector<CommandLineFlagInfo>::const_iterator i;
+
+ size_t retval_space = 0;
+ for (i = flags.begin(); i != flags.end(); ++i) {
+ // An (over)estimate of how much space it will take to print this flag
+ retval_space += i->name.length() + i->current_value.length() + 5;
+ }
+
+ string retval;
+ retval.reserve(retval_space);
+ for (i = flags.begin(); i != flags.end(); ++i) {
+ retval += "--";
+ retval += i->name;
+ retval += "=";
+ retval += i->current_value;
+ retval += "\n";
+ }
+ return retval;
+}
+
+string CommandlineFlagsIntoString() {
+ vector<CommandLineFlagInfo> sorted_flags;
+ GetAllFlags(&sorted_flags);
+ return TheseCommandlineFlagsIntoString(sorted_flags);
+}
+
+bool ReadFlagsFromString(const string& flagfilecontents,
+ const char* /*prog_name*/, // TODO(csilvers): nix this
+ bool errors_are_fatal) {
+ FlagRegistry* const registry = FlagRegistry::GlobalRegistry();
+ FlagSaverImpl saved_states(registry);
+ saved_states.SaveFromRegistry();
+
+ CommandLineFlagParser parser(registry);
+ registry->Lock();
+ parser.ProcessOptionsFromStringLocked(flagfilecontents, SET_FLAGS_VALUE);
+ registry->Unlock();
+ // Should we handle --help and such when reading flags from a string? Sure.
+ HandleCommandLineHelpFlags();
+ if (parser.ReportErrors()) {
+ // Error. Restore all global flags to their previous values.
+ if (errors_are_fatal)
+ commandlineflags_exitfunc(1); // almost certainly exit()
+ saved_states.RestoreToRegistry();
+ return false;
+ }
+ return true;
+}
+
+// TODO(csilvers): nix prog_name in favor of ProgramInvocationShortName()
+bool AppendFlagsIntoFile(const string& filename, const char *prog_name) {
+ FILE *fp = fopen(filename.c_str(), "a");
+ if (!fp) {
+ return false;
+ }
+
+ if (prog_name)
+ fprintf(fp, "%s\n", prog_name);
+
+ vector<CommandLineFlagInfo> flags;
+ GetAllFlags(&flags);
+ // But we don't want --flagfile, which leads to weird recursion issues
+ vector<CommandLineFlagInfo>::iterator i;
+ for (i = flags.begin(); i != flags.end(); ++i) {
+ if (strcmp(i->name.c_str(), "flagfile") == 0) {
+ flags.erase(i);
+ break;
+ }
+ }
+ fprintf(fp, "%s", TheseCommandlineFlagsIntoString(flags).c_str());
+
+ fclose(fp);
+ return true;
+}
+
+bool ReadFromFlagsFile(const string& filename, const char* prog_name,
+ bool errors_are_fatal) {
+ return ReadFlagsFromString(ReadFileIntoString(filename.c_str()),
+ prog_name, errors_are_fatal);
+}
+
+
+// --------------------------------------------------------------------
+// BoolFromEnv()
+// Int32FromEnv()
+// Int64FromEnv()
+// Uint64FromEnv()
+// DoubleFromEnv()
+// StringFromEnv()
+// Reads the value from the environment and returns it.
+// We use an FlagValue to make the parsing easy.
+// Example usage:
+// DEFINE_bool(myflag, BoolFromEnv("MYFLAG_DEFAULT", false), "whatever");
+// --------------------------------------------------------------------
+
+bool BoolFromEnv(const char *v, bool dflt) {
+ return GetFromEnv(v, "bool", dflt);
+}
+int32 Int32FromEnv(const char *v, int32 dflt) {
+ return GetFromEnv(v, "int32", dflt);
+}
+int64 Int64FromEnv(const char *v, int64 dflt) {
+ return GetFromEnv(v, "int64", dflt);
+}
+uint64 Uint64FromEnv(const char *v, uint64 dflt) {
+ return GetFromEnv(v, "uint64", dflt);
+}
+double DoubleFromEnv(const char *v, double dflt) {
+ return GetFromEnv(v, "double", dflt);
+}
+const char *StringFromEnv(const char *varname, const char *dflt) {
+ const char* const val = getenv(varname);
+ return val ? val : dflt;
+}
+
+
+// --------------------------------------------------------------------
+// RegisterFlagValidator()
+// RegisterFlagValidator() is the function that clients use to
+// 'decorate' a flag with a validation function. Once this is
+// done, every time the flag is set (including when the flag
+// is parsed from argv), the validator-function is called.
+// These functions return true if the validator was added
+// successfully, or false if not: the flag already has a validator,
+// (only one allowed per flag), the 1st arg isn't a flag, etc.
+// This function is not thread-safe.
+// --------------------------------------------------------------------
+
+bool RegisterFlagValidator(const bool* flag,
+ bool (*validate_fn)(const char*, bool)) {
+ return AddFlagValidator(flag, reinterpret_cast<ValidateFnProto>(validate_fn));
+}
+bool RegisterFlagValidator(const int32* flag,
+ bool (*validate_fn)(const char*, int32)) {
+ return AddFlagValidator(flag, reinterpret_cast<ValidateFnProto>(validate_fn));
+}
+bool RegisterFlagValidator(const int64* flag,
+ bool (*validate_fn)(const char*, int64)) {
+ return AddFlagValidator(flag, reinterpret_cast<ValidateFnProto>(validate_fn));
+}
+bool RegisterFlagValidator(const uint64* flag,
+ bool (*validate_fn)(const char*, uint64)) {
+ return AddFlagValidator(flag, reinterpret_cast<ValidateFnProto>(validate_fn));
+}
+bool RegisterFlagValidator(const double* flag,
+ bool (*validate_fn)(const char*, double)) {
+ return AddFlagValidator(flag, reinterpret_cast<ValidateFnProto>(validate_fn));
+}
+bool RegisterFlagValidator(const string* flag,
+ bool (*validate_fn)(const char*, const string&)) {
+ return AddFlagValidator(flag, reinterpret_cast<ValidateFnProto>(validate_fn));
+}
+
+
+// --------------------------------------------------------------------
+// ParseCommandLineFlags()
+// ParseCommandLineNonHelpFlags()
+// HandleCommandLineHelpFlags()
+// This is the main function called from main(), to actually
+// parse the commandline. It modifies argc and argv as described
+// at the top of gflags.h. You can also divide this
+// function into two parts, if you want to do work between
+// the parsing of the flags and the printing of any help output.
+// --------------------------------------------------------------------
+
+static uint32 ParseCommandLineFlagsInternal(int* argc, char*** argv,
+ bool remove_flags, bool do_report) {
+ SetArgv(*argc, const_cast<const char**>(*argv)); // save it for later
+
+ FlagRegistry* const registry = FlagRegistry::GlobalRegistry();
+ CommandLineFlagParser parser(registry);
+
+ // When we parse the commandline flags, we'll handle --flagfile,
+ // --tryfromenv, etc. as we see them (since flag-evaluation order
+ // may be important). But sometimes apps set FLAGS_tryfromenv/etc.
+ // manually before calling ParseCommandLineFlags. We want to evaluate
+ // those too, as if they were the first flags on the commandline.
+ registry->Lock();
+ parser.ProcessFlagfileLocked(FLAGS_flagfile, SET_FLAGS_VALUE);
+ // Last arg here indicates whether flag-not-found is a fatal error or not
+ parser.ProcessFromenvLocked(FLAGS_fromenv, SET_FLAGS_VALUE, true);
+ parser.ProcessFromenvLocked(FLAGS_tryfromenv, SET_FLAGS_VALUE, false);
+ registry->Unlock();
+
+ // Now get the flags specified on the commandline
+ const int r = parser.ParseNewCommandLineFlags(argc, argv, remove_flags);
+
+ if (do_report)
+ HandleCommandLineHelpFlags(); // may cause us to exit on --help, etc.
+
+ // See if any of the unset flags fail their validation checks
+ parser.ValidateAllFlags();
+
+ if (parser.ReportErrors()) // may cause us to exit on illegal flags
+ commandlineflags_exitfunc(1); // almost certainly exit()
+ return r;
+}
+
+uint32 ParseCommandLineFlags(int* argc, char*** argv, bool remove_flags) {
+ return ParseCommandLineFlagsInternal(argc, argv, remove_flags, true);
+}
+
+uint32 ParseCommandLineNonHelpFlags(int* argc, char*** argv,
+ bool remove_flags) {
+ return ParseCommandLineFlagsInternal(argc, argv, remove_flags, false);
+}
+
+// --------------------------------------------------------------------
+// AllowCommandLineReparsing()
+// ReparseCommandLineNonHelpFlags()
+// This is most useful for shared libraries. The idea is if
+// a flag is defined in a shared library that is dlopen'ed
+// sometime after main(), you can ParseCommandLineFlags before
+// the dlopen, then ReparseCommandLineNonHelpFlags() after the
+// dlopen, to get the new flags. But you have to explicitly
+// Allow() it; otherwise, you get the normal default behavior
+// of unrecognized flags calling a fatal error.
+// TODO(csilvers): this isn't used. Just delete it?
+// --------------------------------------------------------------------
+
+void AllowCommandLineReparsing() {
+ allow_command_line_reparsing = true;
+}
+
+uint32 ReparseCommandLineNonHelpFlags() {
+ // We make a copy of argc and argv to pass in
+ const vector<string>& argvs = GetArgvs();
+ int tmp_argc = static_cast<int>(argvs.size());
+ char** tmp_argv = new char* [tmp_argc + 1];
+ for (int i = 0; i < tmp_argc; ++i)
+ tmp_argv[i] = strdup(argvs[i].c_str()); // TODO(csilvers): don't dup
+
+ const int retval = ParseCommandLineNonHelpFlags(&tmp_argc, &tmp_argv, false);
+
+ for (int i = 0; i < tmp_argc; ++i)
+ free(tmp_argv[i]);
+ delete[] tmp_argv;
+
+ return retval;
+}
+
+void ShutDownCommandLineFlags() {
+ FlagRegistry::DeleteGlobalRegistry();
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/gflags/gflags.h b/extern/libmv/third_party/gflags/gflags.h
new file mode 100644
index 00000000000..cefbd62ae51
--- /dev/null
+++ b/extern/libmv/third_party/gflags/gflags.h
@@ -0,0 +1,589 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Ray Sidney
+// Revamped and reorganized by Craig Silverstein
+//
+// This is the file that should be included by any file which declares
+// or defines a command line flag or wants to parse command line flags
+// or print a program usage message (which will include information about
+// flags). Executive summary, in the form of an example foo.cc file:
+//
+// #include "foo.h" // foo.h has a line "DECLARE_int32(start);"
+// #include "validators.h" // hypothetical file defining ValidateIsFile()
+//
+// DEFINE_int32(end, 1000, "The last record to read");
+//
+// DEFINE_string(filename, "my_file.txt", "The file to read");
+// // Crash if the specified file does not exist.
+// static bool dummy = RegisterFlagValidator(&FLAGS_filename,
+// &ValidateIsFile);
+//
+// DECLARE_bool(verbose); // some other file has a DEFINE_bool(verbose, ...)
+//
+// void MyFunc() {
+// if (FLAGS_verbose) printf("Records %d-%d\n", FLAGS_start, FLAGS_end);
+// }
+//
+// Then, at the command-line:
+// ./foo --noverbose --start=5 --end=100
+//
+// For more details, see
+// doc/gflags.html
+//
+// --- A note about thread-safety:
+//
+// We describe many functions in this routine as being thread-hostile,
+// thread-compatible, or thread-safe. Here are the meanings we use:
+//
+// thread-safe: it is safe for multiple threads to call this routine
+// (or, when referring to a class, methods of this class)
+// concurrently.
+// thread-hostile: it is not safe for multiple threads to call this
+// routine (or methods of this class) concurrently. In gflags,
+// most thread-hostile routines are intended to be called early in,
+// or even before, main() -- that is, before threads are spawned.
+// thread-compatible: it is safe for multiple threads to read from
+// this variable (when applied to variables), or to call const
+// methods of this class (when applied to classes), as long as no
+// other thread is writing to the variable or calling non-const
+// methods of this class.
+
+#ifndef GOOGLE_GFLAGS_H_
+#define GOOGLE_GFLAGS_H_
+
+#include <string>
+#include <vector>
+
+// We care a lot about number of bits things take up. Unfortunately,
+// systems define their bit-specific ints in a lot of different ways.
+// We use our own way, and have a typedef to get there.
+// Note: these commands below may look like "#if 1" or "#if 0", but
+// that's because they were constructed that way at ./configure time.
+// Look at gflags.h.in to see how they're calculated (based on your config).
+#if 1
+#include <stdint.h> // the normal place uint16_t is defined
+#endif
+#if 1
+#include <sys/types.h> // the normal place u_int16_t is defined
+#endif
+#if 1
+#include <inttypes.h> // a third place for uint16_t or u_int16_t
+#endif
+
+namespace google {
+
+#if 1 // the C99 format
+typedef int32_t int32;
+typedef uint32_t uint32;
+typedef int64_t int64;
+typedef uint64_t uint64;
+#elif 1 // the BSD format
+typedef int32_t int32;
+typedef u_int32_t uint32;
+typedef int64_t int64;
+typedef u_int64_t uint64;
+#elif 0 // the windows (vc7) format
+typedef __int32 int32;
+typedef unsigned __int32 uint32;
+typedef __int64 int64;
+typedef unsigned __int64 uint64;
+#else
+#error Do not know how to define a 32-bit integer quantity on your system
+#endif
+
+// --------------------------------------------------------------------
+// To actually define a flag in a file, use DEFINE_bool,
+// DEFINE_string, etc. at the bottom of this file. You may also find
+// it useful to register a validator with the flag. This ensures that
+// when the flag is parsed from the commandline, or is later set via
+// SetCommandLineOption, we call the validation function. It is _not_
+// called when you assign the value to the flag directly using the = operator.
+//
+// The validation function should return true if the flag value is valid, and
+// false otherwise. If the function returns false for the new setting of the
+// flag, the flag will retain its current value. If it returns false for the
+// default value, ParseCommandLineFlags() will die.
+//
+// This function is safe to call at global construct time (as in the
+// example below).
+//
+// Example use:
+// static bool ValidatePort(const char* flagname, int32 value) {
+// if (value > 0 && value < 32768) // value is ok
+// return true;
+// printf("Invalid value for --%s: %d\n", flagname, (int)value);
+// return false;
+// }
+// DEFINE_int32(port, 0, "What port to listen on");
+// static bool dummy = RegisterFlagValidator(&FLAGS_port, &ValidatePort);
+
+// Returns true if successfully registered, false if not (because the
+// first argument doesn't point to a command-line flag, or because a
+// validator is already registered for this flag).
+bool RegisterFlagValidator(const bool* flag,
+ bool (*validate_fn)(const char*, bool));
+bool RegisterFlagValidator(const int32* flag,
+ bool (*validate_fn)(const char*, int32));
+bool RegisterFlagValidator(const int64* flag,
+ bool (*validate_fn)(const char*, int64));
+bool RegisterFlagValidator(const uint64* flag,
+ bool (*validate_fn)(const char*, uint64));
+bool RegisterFlagValidator(const double* flag,
+ bool (*validate_fn)(const char*, double));
+bool RegisterFlagValidator(const std::string* flag,
+ bool (*validate_fn)(const char*, const std::string&));
+
+
+// --------------------------------------------------------------------
+// These methods are the best way to get access to info about the
+// list of commandline flags. Note that these routines are pretty slow.
+// GetAllFlags: mostly-complete info about the list, sorted by file.
+// ShowUsageWithFlags: pretty-prints the list to stdout (what --help does)
+// ShowUsageWithFlagsRestrict: limit to filenames with restrict as a substr
+//
+// In addition to accessing flags, you can also access argv[0] (the program
+// name) and argv (the entire commandline), which we sock away a copy of.
+// These variables are static, so you should only set them once.
+
+struct CommandLineFlagInfo {
+ std::string name; // the name of the flag
+ std::string type; // the type of the flag: int32, etc
+ std::string description; // the "help text" associated with the flag
+ std::string current_value; // the current value, as a string
+ std::string default_value; // the default value, as a string
+ std::string filename; // 'cleaned' version of filename holding the flag
+ bool has_validator_fn; // true if RegisterFlagValidator called on flag
+ bool is_default; // true if the flag has the default value and
+ // has not been set explicitly from the cmdline
+ // or via SetCommandLineOption
+};
+
+// Using this inside of a validator is a recipe for a deadlock.
+// TODO(wojtekm) Fix locking when validators are running, to make it safe to
+// call validators during ParseAllFlags.
+// Also make sure then to uncomment the corresponding unit test in
+// commandlineflags_unittest.sh
+extern void GetAllFlags(std::vector<CommandLineFlagInfo>* OUTPUT);
+// These two are actually defined in commandlineflags_reporting.cc.
+extern void ShowUsageWithFlags(const char *argv0); // what --help does
+extern void ShowUsageWithFlagsRestrict(const char *argv0, const char *restrict);
+
+// Create a descriptive string for a flag.
+// Goes to some trouble to make pretty line breaks.
+extern std::string DescribeOneFlag(const CommandLineFlagInfo& flag);
+
+// Thread-hostile; meant to be called before any threads are spawned.
+extern void SetArgv(int argc, const char** argv);
+// The following functions are thread-safe as long as SetArgv() is
+// only called before any threads start.
+extern const std::vector<std::string>& GetArgvs(); // all of argv as a vector
+extern const char* GetArgv(); // all of argv as a string
+extern const char* GetArgv0(); // only argv0
+extern uint32 GetArgvSum(); // simple checksum of argv
+extern const char* ProgramInvocationName(); // argv0, or "UNKNOWN" if not set
+extern const char* ProgramInvocationShortName(); // basename(argv0)
+// ProgramUsage() is thread-safe as long as SetUsageMessage() is only
+// called before any threads start.
+extern const char* ProgramUsage(); // string set by SetUsageMessage()
+
+
+// --------------------------------------------------------------------
+// Normally you access commandline flags by just saying "if (FLAGS_foo)"
+// or whatever, and set them by calling "FLAGS_foo = bar" (or, more
+// commonly, via the DEFINE_foo macro). But if you need a bit more
+// control, we have programmatic ways to get/set the flags as well.
+// These programmatic ways to access flags are thread-safe, but direct
+// access is only thread-compatible.
+
+// Return true iff the flagname was found.
+// OUTPUT is set to the flag's value, or unchanged if we return false.
+extern bool GetCommandLineOption(const char* name, std::string* OUTPUT);
+
+// Return true iff the flagname was found. OUTPUT is set to the flag's
+// CommandLineFlagInfo or unchanged if we return false.
+extern bool GetCommandLineFlagInfo(const char* name,
+ CommandLineFlagInfo* OUTPUT);
+
+// Return the CommandLineFlagInfo of the flagname. exit() if name not found.
+// Example usage, to check if a flag's value is currently the default value:
+// if (GetCommandLineFlagInfoOrDie("foo").is_default) ...
+extern CommandLineFlagInfo GetCommandLineFlagInfoOrDie(const char* name);
+
+enum FlagSettingMode {
+ // update the flag's value (can call this multiple times).
+ SET_FLAGS_VALUE,
+ // update the flag's value, but *only if* it has not yet been updated
+ // with SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef".
+ SET_FLAG_IF_DEFAULT,
+ // set the flag's default value to this. If the flag has not yet updated
+ // yet (via SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef")
+ // change the flag's current value to the new default value as well.
+ SET_FLAGS_DEFAULT
+};
+
+// Set a particular flag ("command line option"). Returns a string
+// describing the new value that the option has been set to. The
+// return value API is not well-specified, so basically just depend on
+// it to be empty if the setting failed for some reason -- the name is
+// not a valid flag name, or the value is not a valid value -- and
+// non-empty else.
+
+// SetCommandLineOption uses set_mode == SET_FLAGS_VALUE (the common case)
+extern std::string SetCommandLineOption(const char* name, const char* value);
+extern std::string SetCommandLineOptionWithMode(const char* name, const char* value,
+ FlagSettingMode set_mode);
+
+
+// --------------------------------------------------------------------
+// Saves the states (value, default value, whether the user has set
+// the flag, registered validators, etc) of all flags, and restores
+// them when the FlagSaver is destroyed. This is very useful in
+// tests, say, when you want to let your tests change the flags, but
+// make sure that they get reverted to the original states when your
+// test is complete.
+//
+// Example usage:
+// void TestFoo() {
+// FlagSaver s1;
+// FLAG_foo = false;
+// FLAG_bar = "some value";
+//
+// // test happens here. You can return at any time
+// // without worrying about restoring the FLAG values.
+// }
+//
+// Note: This class is marked with __attribute__((unused)) because all the
+// work is done in the constructor and destructor, so in the standard
+// usage example above, the compiler would complain that it's an
+// unused variable.
+//
+// This class is thread-safe.
+
+class FlagSaver {
+ public:
+ FlagSaver();
+ ~FlagSaver();
+
+ private:
+ class FlagSaverImpl* impl_; // we use pimpl here to keep API steady
+
+ FlagSaver(const FlagSaver&); // no copying!
+ void operator=(const FlagSaver&);
+}
+#ifndef _MSC_VER
+__attribute__ ((unused))
+#endif
+;
+
+// --------------------------------------------------------------------
+// Some deprecated or hopefully-soon-to-be-deprecated functions.
+
+// This is often used for logging. TODO(csilvers): figure out a better way
+extern std::string CommandlineFlagsIntoString();
+// Usually where this is used, a FlagSaver should be used instead.
+extern bool ReadFlagsFromString(const std::string& flagfilecontents,
+ const char* prog_name,
+ bool errors_are_fatal); // uses SET_FLAGS_VALUE
+
+// These let you manually implement --flagfile functionality.
+// DEPRECATED.
+extern bool AppendFlagsIntoFile(const std::string& filename, const char* prog_name);
+extern bool SaveCommandFlags(); // actually defined in google.cc !
+extern bool ReadFromFlagsFile(const std::string& filename, const char* prog_name,
+ bool errors_are_fatal); // uses SET_FLAGS_VALUE
+
+
+// --------------------------------------------------------------------
+// Useful routines for initializing flags from the environment.
+// In each case, if 'varname' does not exist in the environment
+// return defval. If 'varname' does exist but is not valid
+// (e.g., not a number for an int32 flag), abort with an error.
+// Otherwise, return the value. NOTE: for booleans, for true use
+// 't' or 'T' or 'true' or '1', for false 'f' or 'F' or 'false' or '0'.
+
+extern bool BoolFromEnv(const char *varname, bool defval);
+extern int32 Int32FromEnv(const char *varname, int32 defval);
+extern int64 Int64FromEnv(const char *varname, int64 defval);
+extern uint64 Uint64FromEnv(const char *varname, uint64 defval);
+extern double DoubleFromEnv(const char *varname, double defval);
+extern const char *StringFromEnv(const char *varname, const char *defval);
+
+
+// --------------------------------------------------------------------
+// The next two functions parse commandlineflags from main():
+
+// Set the "usage" message for this program. For example:
+// string usage("This program does nothing. Sample usage:\n");
+// usage += argv[0] + " <uselessarg1> <uselessarg2>";
+// SetUsageMessage(usage);
+// Do not include commandline flags in the usage: we do that for you!
+// Thread-hostile; meant to be called before any threads are spawned.
+extern void SetUsageMessage(const std::string& usage);
+
+// Looks for flags in argv and parses them. Rearranges argv to put
+// flags first, or removes them entirely if remove_flags is true.
+// If a flag is defined more than once in the command line or flag
+// file, the last definition is used. Returns the index (into argv)
+// of the first non-flag argument.
+// See top-of-file for more details on this function.
+#ifndef SWIG // In swig, use ParseCommandLineFlagsScript() instead.
+extern uint32 ParseCommandLineFlags(int *argc, char*** argv,
+ bool remove_flags);
+#endif
+
+
+// Calls to ParseCommandLineNonHelpFlags and then to
+// HandleCommandLineHelpFlags can be used instead of a call to
+// ParseCommandLineFlags during initialization, in order to allow for
+// changing default values for some FLAGS (via
+// e.g. SetCommandLineOptionWithMode calls) between the time of
+// command line parsing and the time of dumping help information for
+// the flags as a result of command line parsing. If a flag is
+// defined more than once in the command line or flag file, the last
+// definition is used. Returns the index (into argv) of the first
+// non-flag argument. (If remove_flags is true, will always return 1.)
+extern uint32 ParseCommandLineNonHelpFlags(int *argc, char*** argv,
+ bool remove_flags);
+// This is actually defined in commandlineflags_reporting.cc.
+// This function is misnamed (it also handles --version, etc.), but
+// it's too late to change that now. :-(
+extern void HandleCommandLineHelpFlags(); // in commandlineflags_reporting.cc
+
+// Allow command line reparsing. Disables the error normally
+// generated when an unknown flag is found, since it may be found in a
+// later parse. Thread-hostile; meant to be called before any threads
+// are spawned.
+extern void AllowCommandLineReparsing();
+
+// Reparse the flags that have not yet been recognized. Only flags
+// registered since the last parse will be recognized. Any flag value
+// must be provided as part of the argument using "=", not as a
+// separate command line argument that follows the flag argument.
+// Intended for handling flags from dynamically loaded libraries,
+// since their flags are not registered until they are loaded.
+// Returns the index (into the original argv) of the first non-flag
+// argument. (If remove_flags is true, will always return 1.)
+extern uint32 ReparseCommandLineNonHelpFlags();
+
+// Clean up memory allocated by flags. This is only needed to reduce
+// the quantity of "potentially leaked" reports emitted by memory
+// debugging tools such as valgrind. It is not required for normal
+// operation, or for the perftools heap-checker. It must only be called
+// when the process is about to exit, and all threads that might
+// access flags are quiescent. Referencing flags after this is called
+// will have unexpected consequences. This is not safe to run when
+// multiple threads might be running: the function is thread-hostile.
+extern void ShutDownCommandLineFlags();
+
+
+// --------------------------------------------------------------------
+// Now come the command line flag declaration/definition macros that
+// will actually be used. They're kind of hairy. A major reason
+// for this is initialization: we want people to be able to access
+// variables in global constructors and have that not crash, even if
+// their global constructor runs before the global constructor here.
+// (Obviously, we can't guarantee the flags will have the correct
+// default value in that case, but at least accessing them is safe.)
+// The only way to do that is have flags point to a static buffer.
+// So we make one, using a union to ensure proper alignment, and
+// then use placement-new to actually set up the flag with the
+// correct default value. In the same vein, we have to worry about
+// flag access in global destructors, so FlagRegisterer has to be
+// careful never to destroy the flag-values it constructs.
+//
+// Note that when we define a flag variable FLAGS_<name>, we also
+// preemptively define a junk variable, FLAGS_no<name>. This is to
+// cause a link-time error if someone tries to define 2 flags with
+// names like "logging" and "nologging". We do this because a bool
+// flag FLAG can be set from the command line to true with a "-FLAG"
+// argument, and to false with a "-noFLAG" argument, and so this can
+// potentially avert confusion.
+//
+// We also put flags into their own namespace. It is purposefully
+// named in an opaque way that people should have trouble typing
+// directly. The idea is that DEFINE puts the flag in the weird
+// namespace, and DECLARE imports the flag from there into the current
+// namespace. The net result is to force people to use DECLARE to get
+// access to a flag, rather than saying "extern bool FLAGS_whatever;"
+// or some such instead. We want this so we can put extra
+// functionality (like sanity-checking) in DECLARE if we want, and
+// make sure it is picked up everywhere.
+//
+// We also put the type of the variable in the namespace, so that
+// people can't DECLARE_int32 something that they DEFINE_bool'd
+// elsewhere.
+
+class FlagRegisterer {
+ public:
+ FlagRegisterer(const char* name, const char* type,
+ const char* help, const char* filename,
+ void* current_storage, void* defvalue_storage);
+};
+
+extern bool FlagsTypeWarn(const char *name);
+
+// If your application #defines STRIP_FLAG_HELP to a non-zero value
+// before #including this file, we remove the help message from the
+// binary file. This can reduce the size of the resulting binary
+// somewhat, and may also be useful for security reasons.
+
+extern const char kStrippedFlagHelp[];
+
+}
+
+#ifndef SWIG // In swig, ignore the main flag declarations
+
+#if defined(STRIP_FLAG_HELP) && STRIP_FLAG_HELP > 0
+// Need this construct to avoid the 'defined but not used' warning.
+#define MAYBE_STRIPPED_HELP(txt) (false ? (txt) : ::google::kStrippedFlagHelp)
+#else
+#define MAYBE_STRIPPED_HELP(txt) txt
+#endif
+
+// Each command-line flag has two variables associated with it: one
+// with the current value, and one with the default value. However,
+// we have a third variable, which is where value is assigned; it's a
+// constant. This guarantees that FLAG_##value is initialized at
+// static initialization time (e.g. before program-start) rather than
+// than global construction time (which is after program-start but
+// before main), at least when 'value' is a compile-time constant. We
+// use a small trick for the "default value" variable, and call it
+// FLAGS_no<name>. This serves the second purpose of assuring a
+// compile error if someone tries to define a flag named no<name>
+// which is illegal (--foo and --nofoo both affect the "foo" flag).
+#define DEFINE_VARIABLE(type, shorttype, name, value, help) \
+ namespace fL##shorttype { \
+ static const type FLAGS_nono##name = value; \
+ type FLAGS_##name = FLAGS_nono##name; \
+ type FLAGS_no##name = FLAGS_nono##name; \
+ static ::google::FlagRegisterer o_##name( \
+ #name, #type, MAYBE_STRIPPED_HELP(help), __FILE__, \
+ &FLAGS_##name, &FLAGS_no##name); \
+ } \
+ using fL##shorttype::FLAGS_##name
+
+#define DECLARE_VARIABLE(type, shorttype, name) \
+ namespace fL##shorttype { \
+ extern type FLAGS_##name; \
+ } \
+ using fL##shorttype::FLAGS_##name
+
+// For DEFINE_bool, we want to do the extra check that the passed-in
+// value is actually a bool, and not a string or something that can be
+// coerced to a bool. These declarations (no definition needed!) will
+// help us do that, and never evaluate From, which is important.
+// We'll use 'sizeof(IsBool(val))' to distinguish. This code requires
+// that the compiler have different sizes for bool & double. Since
+// this is not guaranteed by the standard, we check it with a
+// compile-time assert (msg[-1] will give a compile-time error).
+namespace fLB {
+struct CompileAssert {};
+typedef CompileAssert expected_sizeof_double_neq_sizeof_bool[
+ (sizeof(double) != sizeof(bool)) ? 1 : -1];
+template<typename From> double IsBoolFlag(const From& from);
+bool IsBoolFlag(bool from);
+} // namespace fLB
+
+#define DECLARE_bool(name) DECLARE_VARIABLE(bool, B, name)
+#define DEFINE_bool(name, val, txt) \
+ namespace fLB { \
+ typedef ::fLB::CompileAssert FLAG_##name##_value_is_not_a_bool[ \
+ (sizeof(::fLB::IsBoolFlag(val)) != sizeof(double)) ? 1 : -1]; \
+ } \
+ DEFINE_VARIABLE(bool, B, name, val, txt)
+
+#define DECLARE_int32(name) DECLARE_VARIABLE(::google::int32, I, name)
+#define DEFINE_int32(name,val,txt) DEFINE_VARIABLE(::google::int32, I, name, val, txt)
+
+#define DECLARE_int64(name) DECLARE_VARIABLE(::google::int64, I64, name)
+#define DEFINE_int64(name,val,txt) DEFINE_VARIABLE(::google::int64, I64, name, val, txt)
+
+#define DECLARE_uint64(name) DECLARE_VARIABLE(::google::uint64, U64, name)
+#define DEFINE_uint64(name,val,txt) DEFINE_VARIABLE(::google::uint64, U64, name, val, txt)
+
+#define DECLARE_double(name) DECLARE_VARIABLE(double, D, name)
+#define DEFINE_double(name, val, txt) DEFINE_VARIABLE(double, D, name, val, txt)
+
+// Strings are trickier, because they're not a POD, so we can't
+// construct them at static-initialization time (instead they get
+// constructed at global-constructor time, which is much later). To
+// try to avoid crashes in that case, we use a char buffer to store
+// the string, which we can static-initialize, and then placement-new
+// into it later. It's not perfect, but the best we can do.
+
+namespace fLS {
+// The meaning of "string" might be different between now and when the
+// macros below get invoked (e.g., if someone is experimenting with
+// other string implementations that get defined after this file is
+// included). Save the current meaning now and use it in the macros.
+typedef std::string clstring;
+
+inline clstring* dont_pass0toDEFINE_string(char *stringspot,
+ const char *value) {
+ return new(stringspot) clstring(value);
+}
+inline clstring* dont_pass0toDEFINE_string(char *stringspot,
+ const clstring &value) {
+ return new(stringspot) clstring(value);
+}
+inline clstring* dont_pass0toDEFINE_string(char *stringspot,
+ int value);
+} // namespace fLS
+
+#define DECLARE_string(name) namespace fLS { extern ::fLS::clstring& FLAGS_##name; } \
+ using fLS::FLAGS_##name
+
+// We need to define a var named FLAGS_no##name so people don't define
+// --string and --nostring. And we need a temporary place to put val
+// so we don't have to evaluate it twice. Two great needs that go
+// great together!
+// The weird 'using' + 'extern' inside the fLS namespace is to work around
+// an unknown compiler bug/issue with the gcc 4.2.1 on SUSE 10. See
+// http://code.google.com/p/google-gflags/issues/detail?id=20
+#define DEFINE_string(name, val, txt) \
+ namespace fLS { \
+ using ::fLS::clstring; \
+ static union { void* align; char s[sizeof(clstring)]; } s_##name[2]; \
+ clstring* const FLAGS_no##name = ::fLS:: \
+ dont_pass0toDEFINE_string(s_##name[0].s, \
+ val); \
+ static ::google::FlagRegisterer o_##name( \
+ #name, "string", MAYBE_STRIPPED_HELP(txt), __FILE__, \
+ s_##name[0].s, new (s_##name[1].s) clstring(*FLAGS_no##name)); \
+ extern clstring& FLAGS_##name; \
+ using fLS::FLAGS_##name; \
+ clstring& FLAGS_##name = *FLAGS_no##name; \
+ } \
+ using fLS::FLAGS_##name
+
+#endif // SWIG
+
+#endif // GOOGLE_GFLAGS_H_
diff --git a/extern/libmv/third_party/gflags/gflags_completions.cc b/extern/libmv/third_party/gflags/gflags_completions.cc
new file mode 100644
index 00000000000..a129611d8a1
--- /dev/null
+++ b/extern/libmv/third_party/gflags/gflags_completions.cc
@@ -0,0 +1,765 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// ---
+// Author: Dave Nicponski
+//
+// Bash-style command line flag completion for C++ binaries
+//
+// This module implements bash-style completions. It achieves this
+// goal in the following broad chunks:
+//
+// 1) Take a to-be-completed word, and examine it for search hints
+// 2) Identify all potentially matching flags
+// 2a) If there are no matching flags, do nothing.
+// 2b) If all matching flags share a common prefix longer than the
+// completion word, output just that matching prefix
+// 3) Categorize those flags to produce a rough ordering of relevence.
+// 4) Potentially trim the set of flags returned to a smaller number
+// that bash is happier with
+// 5) Output the matching flags in groups ordered by relevence.
+// 5a) Force bash to place most-relevent groups at the top of the list
+// 5b) Trim most flag's descriptions to fit on a single terminal line
+
+
+#include "config.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h> // for strlen
+
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "gflags.h"
+
+#ifndef PATH_SEPARATOR
+#define PATH_SEPARATOR '/'
+#endif
+
+DEFINE_string(tab_completion_word, "",
+ "If non-empty, HandleCommandLineCompletions() will hijack the "
+ "process and attempt to do bash-style command line flag "
+ "completion on this value.");
+DEFINE_int32(tab_completion_columns, 80,
+ "Number of columns to use in output for tab completion");
+
+_START_GOOGLE_NAMESPACE_
+
+namespace {
+
+using std::set;
+using std::string;
+using std::vector;
+
+// Function prototypes and Type forward declarations. Code may be
+// more easily understood if it is roughly ordered according to
+// control flow, rather than by C's "declare before use" ordering
+struct CompletionOptions;
+struct NotableFlags;
+
+// The entry point if flag completion is to be used.
+static void PrintFlagCompletionInfo(void);
+
+
+// 1) Examine search word
+static void CanonicalizeCursorWordAndSearchOptions(
+ const string &cursor_word,
+ string *canonical_search_token,
+ CompletionOptions *options);
+
+static bool RemoveTrailingChar(string *str, char c);
+
+
+// 2) Find all matches
+static void FindMatchingFlags(
+ const vector<CommandLineFlagInfo> &all_flags,
+ const CompletionOptions &options,
+ const string &match_token,
+ set<const CommandLineFlagInfo *> *all_matches,
+ string *longest_common_prefix);
+
+static bool DoesSingleFlagMatch(
+ const CommandLineFlagInfo &flag,
+ const CompletionOptions &options,
+ const string &match_token);
+
+
+// 3) Categorize matches
+static void CategorizeAllMatchingFlags(
+ const set<const CommandLineFlagInfo *> &all_matches,
+ const string &search_token,
+ const string &module,
+ const string &package_dir,
+ NotableFlags *notable_flags);
+
+static void TryFindModuleAndPackageDir(
+ const vector<CommandLineFlagInfo> all_flags,
+ string *module,
+ string *package_dir);
+
+
+// 4) Decide which flags to use
+static void FinalizeCompletionOutput(
+ const set<const CommandLineFlagInfo *> &matching_flags,
+ CompletionOptions *options,
+ NotableFlags *notable_flags,
+ vector<string> *completions);
+
+static void RetrieveUnusedFlags(
+ const set<const CommandLineFlagInfo *> &matching_flags,
+ const NotableFlags &notable_flags,
+ set<const CommandLineFlagInfo *> *unused_flags);
+
+
+// 5) Output matches
+static void OutputSingleGroupWithLimit(
+ const set<const CommandLineFlagInfo *> &group,
+ const string &line_indentation,
+ const string &header,
+ const string &footer,
+ bool long_output_format,
+ int *remaining_line_limit,
+ size_t *completion_elements_added,
+ vector<string> *completions);
+
+// (helpers for #5)
+static string GetShortFlagLine(
+ const string &line_indentation,
+ const CommandLineFlagInfo &info);
+
+static string GetLongFlagLine(
+ const string &line_indentation,
+ const CommandLineFlagInfo &info);
+
+
+//
+// Useful types
+
+// Try to deduce the intentions behind this completion attempt. Return the
+// canonical search term in 'canonical_search_token'. Binary search options
+// are returned in the various booleans, which should all have intuitive
+// semantics, possibly except:
+// - return_all_matching_flags: Generally, we'll trim the number of
+// returned candidates to some small number, showing those that are
+// most likely to be useful first. If this is set, however, the user
+// really does want us to return every single flag as an option.
+// - force_no_update: Any time we output lines, all of which share a
+// common prefix, bash will 'helpfully' not even bother to show the
+// output, instead changing the current word to be that common prefix.
+// If it's clear this shouldn't happen, we'll set this boolean
+struct CompletionOptions {
+ bool flag_name_substring_search;
+ bool flag_location_substring_search;
+ bool flag_description_substring_search;
+ bool return_all_matching_flags;
+ bool force_no_update;
+};
+
+// Notable flags are flags that are special or preferred for some
+// reason. For example, flags that are defined in the binary's module
+// are expected to be much more relevent than flags defined in some
+// other random location. These sets are specified roughly in precedence
+// order. Once a flag is placed in one of these 'higher' sets, it won't
+// be placed in any of the 'lower' sets.
+struct NotableFlags {
+ typedef set<const CommandLineFlagInfo *> FlagSet;
+ FlagSet perfect_match_flag;
+ FlagSet module_flags; // Found in module file
+ FlagSet package_flags; // Found in same directory as module file
+ FlagSet most_common_flags; // One of the XXX most commonly supplied flags
+ FlagSet subpackage_flags; // Found in subdirectories of package
+};
+
+
+//
+// Tab completion implementation - entry point
+static void PrintFlagCompletionInfo(void) {
+ string cursor_word = FLAGS_tab_completion_word;
+ string canonical_token;
+ CompletionOptions options = { };
+ CanonicalizeCursorWordAndSearchOptions(
+ cursor_word,
+ &canonical_token,
+ &options);
+
+ //VLOG(1) << "Identified canonical_token: '" << canonical_token << "'";
+
+ vector<CommandLineFlagInfo> all_flags;
+ set<const CommandLineFlagInfo *> matching_flags;
+ GetAllFlags(&all_flags);
+ //VLOG(2) << "Found " << all_flags.size() << " flags overall";
+
+ string longest_common_prefix;
+ FindMatchingFlags(
+ all_flags,
+ options,
+ canonical_token,
+ &matching_flags,
+ &longest_common_prefix);
+ //VLOG(1) << "Identified " << matching_flags.size() << " matching flags";
+ //VLOG(1) << "Identified " << longest_common_prefix
+ // << " as longest common prefix.";
+ if (longest_common_prefix.size() > canonical_token.size()) {
+ // There's actually a shared common prefix to all matching flags,
+ // so may as well output that and quit quickly.
+ //VLOG(1) << "The common prefix '" << longest_common_prefix
+ // << "' was longer than the token '" << canonical_token
+ // << "'. Returning just this prefix for completion.";
+ fprintf(stdout, "--%s", longest_common_prefix.c_str());
+ return;
+ }
+ if (matching_flags.empty()) {
+ //VLOG(1) << "There were no matching flags, returning nothing.";
+ return;
+ }
+
+ string module;
+ string package_dir;
+ TryFindModuleAndPackageDir(all_flags, &module, &package_dir);
+ //VLOG(1) << "Identified module: '" << module << "'";
+ //VLOG(1) << "Identified package_dir: '" << package_dir << "'";
+
+ NotableFlags notable_flags;
+ CategorizeAllMatchingFlags(
+ matching_flags,
+ canonical_token,
+ module,
+ package_dir,
+ &notable_flags);
+ //VLOG(2) << "Categorized matching flags:";
+ //VLOG(2) << " perfect_match: " << notable_flags.perfect_match_flag.size();
+ //VLOG(2) << " module: " << notable_flags.module_flags.size();
+ //VLOG(2) << " package: " << notable_flags.package_flags.size();
+ //VLOG(2) << " most common: " << notable_flags.most_common_flags.size();
+ //VLOG(2) << " subpackage: " << notable_flags.subpackage_flags.size();
+
+ vector<string> completions;
+ FinalizeCompletionOutput(
+ matching_flags,
+ &options,
+ &notable_flags,
+ &completions);
+
+ if (options.force_no_update)
+ completions.push_back("~");
+
+ //VLOG(1) << "Finalized with " << completions.size()
+ // << " chosen completions";
+
+ for (vector<string>::const_iterator it = completions.begin();
+ it != completions.end();
+ ++it) {
+ //VLOG(9) << " Completion entry: '" << *it << "'";
+ fprintf(stdout, "%s\n", it->c_str());
+ }
+}
+
+
+// 1) Examine search word (and helper method)
+static void CanonicalizeCursorWordAndSearchOptions(
+ const string &cursor_word,
+ string *canonical_search_token,
+ CompletionOptions *options) {
+ *canonical_search_token = cursor_word;
+ if (canonical_search_token->empty()) return;
+
+ // Get rid of leading quotes and dashes in the search term
+ if ((*canonical_search_token)[0] == '"')
+ *canonical_search_token = canonical_search_token->substr(1);
+ while ((*canonical_search_token)[0] == '-')
+ *canonical_search_token = canonical_search_token->substr(1);
+
+ options->flag_name_substring_search = false;
+ options->flag_location_substring_search = false;
+ options->flag_description_substring_search = false;
+ options->return_all_matching_flags = false;
+ options->force_no_update = false;
+
+ // Look for all search options we can deduce now. Do this by walking
+ // backwards through the term, looking for up to three '?' and up to
+ // one '+' as suffixed characters. Consume them if found, and remove
+ // them from the canonical search token.
+ int found_question_marks = 0;
+ int found_plusses = 0;
+ while (true) {
+ if (found_question_marks < 3 &&
+ RemoveTrailingChar(canonical_search_token, '?')) {
+ ++found_question_marks;
+ continue;
+ }
+ if (found_plusses < 1 &&
+ RemoveTrailingChar(canonical_search_token, '+')) {
+ ++found_plusses;
+ continue;
+ }
+ break;
+ }
+
+ switch (found_question_marks) { // all fallthroughs
+ case 3: options->flag_description_substring_search = true;
+ case 2: options->flag_location_substring_search = true;
+ case 1: options->flag_name_substring_search = true;
+ };
+
+ options->return_all_matching_flags = (found_plusses > 0);
+}
+
+// Returns true if a char was removed
+static bool RemoveTrailingChar(string *str, char c) {
+ if (str->empty()) return false;
+ if ((*str)[str->size() - 1] == c) {
+ *str = str->substr(0, str->size() - 1);
+ return true;
+ }
+ return false;
+}
+
+
+// 2) Find all matches (and helper methods)
+static void FindMatchingFlags(
+ const vector<CommandLineFlagInfo> &all_flags,
+ const CompletionOptions &options,
+ const string &match_token,
+ set<const CommandLineFlagInfo *> *all_matches,
+ string *longest_common_prefix) {
+ all_matches->clear();
+ bool first_match = true;
+ for (vector<CommandLineFlagInfo>::const_iterator it = all_flags.begin();
+ it != all_flags.end();
+ ++it) {
+ if (DoesSingleFlagMatch(*it, options, match_token)) {
+ all_matches->insert(&*it);
+ if (first_match) {
+ first_match = false;
+ *longest_common_prefix = it->name;
+ } else {
+ if (longest_common_prefix->empty() || it->name.empty()) {
+ longest_common_prefix->clear();
+ continue;
+ }
+ string::size_type pos = 0;
+ while (pos < longest_common_prefix->size() &&
+ pos < it->name.size() &&
+ (*longest_common_prefix)[pos] == it->name[pos])
+ ++pos;
+ longest_common_prefix->erase(pos);
+ }
+ }
+ }
+}
+
+// Given the set of all flags, the parsed match options, and the
+// canonical search token, produce the set of all candidate matching
+// flags for subsequent analysis or filtering.
+static bool DoesSingleFlagMatch(
+ const CommandLineFlagInfo &flag,
+ const CompletionOptions &options,
+ const string &match_token) {
+ // Is there a prefix match?
+ string::size_type pos = flag.name.find(match_token);
+ if (pos == 0) return true;
+
+ // Is there a substring match if we want it?
+ if (options.flag_name_substring_search &&
+ pos != string::npos)
+ return true;
+
+ // Is there a location match if we want it?
+ if (options.flag_location_substring_search &&
+ flag.filename.find(match_token) != string::npos)
+ return true;
+
+ // TODO(daven): All searches should probably be case-insensitive
+ // (especially this one...)
+ if (options.flag_description_substring_search &&
+ flag.description.find(match_token) != string::npos)
+ return true;
+
+ return false;
+}
+
+// 3) Categorize matches (and helper method)
+
+// Given a set of matching flags, categorize them by
+// likely relevence to this specific binary
+static void CategorizeAllMatchingFlags(
+ const set<const CommandLineFlagInfo *> &all_matches,
+ const string &search_token,
+ const string &module, // empty if we couldn't find any
+ const string &package_dir, // empty if we couldn't find any
+ NotableFlags *notable_flags) {
+ notable_flags->perfect_match_flag.clear();
+ notable_flags->module_flags.clear();
+ notable_flags->package_flags.clear();
+ notable_flags->most_common_flags.clear();
+ notable_flags->subpackage_flags.clear();
+
+ for (set<const CommandLineFlagInfo *>::const_iterator it =
+ all_matches.begin();
+ it != all_matches.end();
+ ++it) {
+ //VLOG(2) << "Examining match '" << (*it)->name << "'";
+ //VLOG(7) << " filename: '" << (*it)->filename << "'";
+ string::size_type pos = string::npos;
+ if (!package_dir.empty())
+ pos = (*it)->filename.find(package_dir);
+ string::size_type slash = string::npos;
+ if (pos != string::npos) // candidate for package or subpackage match
+ slash = (*it)->filename.find(
+ PATH_SEPARATOR,
+ pos + package_dir.size() + 1);
+
+ if ((*it)->name == search_token) {
+ // Exact match on some flag's name
+ notable_flags->perfect_match_flag.insert(*it);
+ //VLOG(3) << "Result: perfect match";
+ } else if (!module.empty() && (*it)->filename == module) {
+ // Exact match on module filename
+ notable_flags->module_flags.insert(*it);
+ //VLOG(3) << "Result: module match";
+ } else if (!package_dir.empty() &&
+ pos != string::npos && slash == string::npos) {
+ // In the package, since there was no slash after the package portion
+ notable_flags->package_flags.insert(*it);
+ //VLOG(3) << "Result: package match";
+ } else if (false) {
+ // In the list of the XXX most commonly supplied flags overall
+ // TODO(daven): Compile this list.
+ //VLOG(3) << "Result: most-common match";
+ } else if (!package_dir.empty() &&
+ pos != string::npos && slash != string::npos) {
+ // In a subdirectory of the package
+ notable_flags->subpackage_flags.insert(*it);
+ //VLOG(3) << "Result: subpackage match";
+ }
+
+ //VLOG(3) << "Result: not special match";
+ }
+}
+
+static void PushNameWithSuffix(vector<string>* suffixes, const char* suffix) {
+ string s("/");
+ s += ProgramInvocationShortName();
+ s += suffix;
+ suffixes->push_back(s);
+}
+
+static void TryFindModuleAndPackageDir(
+ const vector<CommandLineFlagInfo> all_flags,
+ string *module,
+ string *package_dir) {
+ module->clear();
+ package_dir->clear();
+
+ vector<string> suffixes;
+ // TODO(daven): There's some inherant ambiguity here - multiple directories
+ // could share the same trailing folder and file structure (and even worse,
+ // same file names), causing us to be unsure as to which of the two is the
+ // actual package for this binary. In this case, we'll arbitrarily choose.
+ PushNameWithSuffix(&suffixes, ".");
+ PushNameWithSuffix(&suffixes, "-main.");
+ PushNameWithSuffix(&suffixes, "_main.");
+ // These four are new but probably merited?
+ PushNameWithSuffix(&suffixes, "-test.");
+ PushNameWithSuffix(&suffixes, "_test.");
+ PushNameWithSuffix(&suffixes, "-unittest.");
+ PushNameWithSuffix(&suffixes, "_unittest.");
+
+ for (vector<CommandLineFlagInfo>::const_iterator it = all_flags.begin();
+ it != all_flags.end();
+ ++it) {
+ for (vector<string>::const_iterator suffix = suffixes.begin();
+ suffix != suffixes.end();
+ ++suffix) {
+ // TODO(daven): Make sure the match is near the end of the string
+ if (it->filename.find(*suffix) != string::npos) {
+ *module = it->filename;
+ string::size_type sep = it->filename.rfind(PATH_SEPARATOR);
+ *package_dir = it->filename.substr(0, (sep == string::npos) ? 0 : sep);
+ return;
+ }
+ }
+ }
+}
+
+// Can't specialize template type on a locally defined type. Silly C++...
+struct DisplayInfoGroup {
+ const char* header;
+ const char* footer;
+ set<const CommandLineFlagInfo *> *group;
+
+ int SizeInLines() const {
+ int size_in_lines = static_cast<int>(group->size()) + 1;
+ if (strlen(header) > 0) {
+ size_in_lines++;
+ }
+ if (strlen(footer) > 0) {
+ size_in_lines++;
+ }
+ return size_in_lines;
+ }
+};
+
+// 4) Finalize and trim output flag set
+static void FinalizeCompletionOutput(
+ const set<const CommandLineFlagInfo *> &matching_flags,
+ CompletionOptions *options,
+ NotableFlags *notable_flags,
+ vector<string> *completions) {
+
+ // We want to output lines in groups. Each group needs to be indented
+ // the same to keep its lines together. Unless otherwise required,
+ // only 99 lines should be output to prevent bash from harassing the
+ // user.
+
+ // First, figure out which output groups we'll actually use. For each
+ // nonempty group, there will be ~3 lines of header & footer, plus all
+ // output lines themselves.
+ int max_desired_lines = // "999999 flags should be enough for anyone. -dave"
+ (options->return_all_matching_flags ? 999999 : 98);
+ int lines_so_far = 0;
+
+ vector<DisplayInfoGroup> output_groups;
+ bool perfect_match_found = false;
+ if (lines_so_far < max_desired_lines &&
+ !notable_flags->perfect_match_flag.empty()) {
+ perfect_match_found = true;
+ DisplayInfoGroup group =
+ { "",
+ "==========",
+ &notable_flags->perfect_match_flag };
+ lines_so_far += group.SizeInLines();
+ output_groups.push_back(group);
+ }
+ if (lines_so_far < max_desired_lines &&
+ !notable_flags->module_flags.empty()) {
+ DisplayInfoGroup group = {
+ "-* Matching module flags *-",
+ "===========================",
+ &notable_flags->module_flags };
+ lines_so_far += group.SizeInLines();
+ output_groups.push_back(group);
+ }
+ if (lines_so_far < max_desired_lines &&
+ !notable_flags->package_flags.empty()) {
+ DisplayInfoGroup group = {
+ "-* Matching package flags *-",
+ "============================",
+ &notable_flags->package_flags };
+ lines_so_far += group.SizeInLines();
+ output_groups.push_back(group);
+ }
+ if (lines_so_far < max_desired_lines &&
+ !notable_flags->most_common_flags.empty()) {
+ DisplayInfoGroup group = {
+ "-* Commonly used flags *-",
+ "=========================",
+ &notable_flags->most_common_flags };
+ lines_so_far += group.SizeInLines();
+ output_groups.push_back(group);
+ }
+ if (lines_so_far < max_desired_lines &&
+ !notable_flags->subpackage_flags.empty()) {
+ DisplayInfoGroup group = {
+ "-* Matching sub-package flags *-",
+ "================================",
+ &notable_flags->subpackage_flags };
+ lines_so_far += group.SizeInLines();
+ output_groups.push_back(group);
+ }
+
+ set<const CommandLineFlagInfo *> obscure_flags; // flags not notable
+ if (lines_so_far < max_desired_lines) {
+ RetrieveUnusedFlags(matching_flags, *notable_flags, &obscure_flags);
+ if (!obscure_flags.empty()) {
+ DisplayInfoGroup group = {
+ "-* Other flags *-",
+ "",
+ &obscure_flags };
+ lines_so_far += group.SizeInLines();
+ output_groups.push_back(group);
+ }
+ }
+
+ // Second, go through each of the chosen output groups and output
+ // as many of those flags as we can, while remaining below our limit
+ int remaining_lines = max_desired_lines;
+ size_t completions_output = 0;
+ int indent = static_cast<int>(output_groups.size()) - 1;
+ for (vector<DisplayInfoGroup>::const_iterator it =
+ output_groups.begin();
+ it != output_groups.end();
+ ++it, --indent) {
+ OutputSingleGroupWithLimit(
+ *it->group, // group
+ string(indent, ' '), // line indentation
+ string(it->header), // header
+ string(it->footer), // footer
+ perfect_match_found, // long format
+ &remaining_lines, // line limit - reduces this by number printed
+ &completions_output, // completions (not lines) added
+ completions); // produced completions
+ perfect_match_found = false;
+ }
+
+ if (completions_output != matching_flags.size()) {
+ options->force_no_update = false;
+ completions->push_back("~ (Remaining flags hidden) ~");
+ } else {
+ options->force_no_update = true;
+ }
+}
+
+static void RetrieveUnusedFlags(
+ const set<const CommandLineFlagInfo *> &matching_flags,
+ const NotableFlags &notable_flags,
+ set<const CommandLineFlagInfo *> *unused_flags) {
+ // Remove from 'matching_flags' set all members of the sets of
+ // flags we've already printed (specifically, those in notable_flags)
+ for (set<const CommandLineFlagInfo *>::const_iterator it =
+ matching_flags.begin();
+ it != matching_flags.end();
+ ++it) {
+ if (notable_flags.perfect_match_flag.count(*it) ||
+ notable_flags.module_flags.count(*it) ||
+ notable_flags.package_flags.count(*it) ||
+ notable_flags.most_common_flags.count(*it) ||
+ notable_flags.subpackage_flags.count(*it))
+ continue;
+ unused_flags->insert(*it);
+ }
+}
+
+// 5) Output matches (and helper methods)
+
+static void OutputSingleGroupWithLimit(
+ const set<const CommandLineFlagInfo *> &group,
+ const string &line_indentation,
+ const string &header,
+ const string &footer,
+ bool long_output_format,
+ int *remaining_line_limit,
+ size_t *completion_elements_output,
+ vector<string> *completions) {
+ if (group.empty()) return;
+ if (!header.empty()) {
+ if (*remaining_line_limit < 2) return;
+ *remaining_line_limit -= 2;
+ completions->push_back(line_indentation + header);
+ completions->push_back(line_indentation + string(header.size(), '-'));
+ }
+ for (set<const CommandLineFlagInfo *>::const_iterator it = group.begin();
+ it != group.end() && *remaining_line_limit > 0;
+ ++it) {
+ --*remaining_line_limit;
+ ++*completion_elements_output;
+ completions->push_back(
+ (long_output_format
+ ? GetLongFlagLine(line_indentation, **it)
+ : GetShortFlagLine(line_indentation, **it)));
+ }
+ if (!footer.empty()) {
+ if (*remaining_line_limit < 1) return;
+ --*remaining_line_limit;
+ completions->push_back(line_indentation + footer);
+ }
+}
+
+static string GetShortFlagLine(
+ const string &line_indentation,
+ const CommandLineFlagInfo &info) {
+ string prefix =
+ line_indentation + "--" + info.name + " [" +
+ (info.type == "string" ?
+ ("'" + info.default_value + "'") :
+ info.default_value)
+ + "] ";
+ int remainder =
+ FLAGS_tab_completion_columns - static_cast<int>(prefix.size());
+ string suffix;
+ if (remainder > 0)
+ suffix =
+ (static_cast<int>(info.description.size()) > remainder ?
+ (info.description.substr(0, remainder - 3) + "...").c_str() :
+ info.description.c_str());
+ return prefix + suffix;
+}
+
+static string GetLongFlagLine(
+ const string &line_indentation,
+ const CommandLineFlagInfo &info) {
+
+ string output = DescribeOneFlag(info);
+
+ // Replace '-' with '--', and remove trailing newline before appending
+ // the module definition location.
+ string old_flagname = "-" + info.name;
+ output.replace(
+ output.find(old_flagname),
+ old_flagname.size(),
+ "-" + old_flagname);
+ // Stick a newline and indentation in front of the type and default
+ // portions of DescribeOneFlag()s description
+ static const char kNewlineWithIndent[] = "\n ";
+ output.replace(output.find(" type:"), 1, string(kNewlineWithIndent));
+ output.replace(output.find(" default:"), 1, string(kNewlineWithIndent));
+ output = line_indentation + " Details for '--" + info.name + "':\n" +
+ output + " defined: " + info.filename;
+
+ // Eliminate any doubled newlines that crept in. Specifically, if
+ // DescribeOneFlag() decided to break the line just before "type"
+ // or "default", we don't want to introduce an extra blank line
+ static const string line_of_spaces(FLAGS_tab_completion_columns, ' ');
+ static const char kDoubledNewlines[] = "\n \n";
+ for (string::size_type newlines = output.find(kDoubledNewlines);
+ newlines != string::npos;
+ newlines = output.find(kDoubledNewlines))
+ // Replace each 'doubled newline' with a single newline
+ output.replace(newlines, sizeof(kDoubledNewlines) - 1, string("\n"));
+
+ for (string::size_type newline = output.find('\n');
+ newline != string::npos;
+ newline = output.find('\n')) {
+ int newline_pos = static_cast<int>(newline) % FLAGS_tab_completion_columns;
+ int missing_spaces = FLAGS_tab_completion_columns - newline_pos;
+ output.replace(newline, 1, line_of_spaces, 1, missing_spaces);
+ }
+ return output;
+}
+} // anonymous
+
+void HandleCommandLineCompletions(void) {
+ if (FLAGS_tab_completion_word.empty()) return;
+ PrintFlagCompletionInfo();
+ exit(0);
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/gflags/gflags_completions.h b/extern/libmv/third_party/gflags/gflags_completions.h
new file mode 100644
index 00000000000..9d9ce7a5f75
--- /dev/null
+++ b/extern/libmv/third_party/gflags/gflags_completions.h
@@ -0,0 +1,121 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// ---
+// Author: Dave Nicponski
+//
+// Implement helpful bash-style command line flag completions
+//
+// ** Functional API:
+// HandleCommandLineCompletions() should be called early during
+// program startup, but after command line flag code has been
+// initialized, such as the beginning of HandleCommandLineHelpFlags().
+// It checks the value of the flag --tab_completion_word. If this
+// flag is empty, nothing happens here. If it contains a string,
+// however, then HandleCommandLineCompletions() will hijack the
+// process, attempting to identify the intention behind this
+// completion. Regardless of the outcome of this deduction, the
+// process will be terminated, similar to --helpshort flag
+// handling.
+//
+// ** Overview of Bash completions:
+// Bash can be told to programatically determine completions for the
+// current 'cursor word'. It does this by (in this case) invoking a
+// command with some additional arguments identifying the command
+// being executed, the word being completed, and the previous word
+// (if any). Bash then expects a sequence of output lines to be
+// printed to stdout. If these lines all contain a common prefix
+// longer than the cursor word, bash will replace the cursor word
+// with that common prefix, and display nothing. If there isn't such
+// a common prefix, bash will display the lines in pages using 'more'.
+//
+// ** Strategy taken for command line completions:
+// If we can deduce either the exact flag intended, or a common flag
+// prefix, we'll output exactly that. Otherwise, if information
+// must be displayed to the user, we'll take the opportunity to add
+// some helpful information beyond just the flag name (specifically,
+// we'll include the default flag value and as much of the flag's
+// description as can fit on a single terminal line width, as specified
+// by the flag --tab_completion_columns). Furthermore, we'll try to
+// make bash order the output such that the most useful or relevent
+// flags are the most likely to be shown at the top.
+//
+// ** Additional features:
+// To assist in finding that one really useful flag, substring matching
+// was implemented. Before pressing a <TAB> to get completion for the
+// current word, you can append one or more '?' to the flag to do
+// substring matching. Here's the semantics:
+// --foo<TAB> Show me all flags with names prefixed by 'foo'
+// --foo?<TAB> Show me all flags with 'foo' somewhere in the name
+// --foo??<TAB> Same as prior case, but also search in module
+// definition path for 'foo'
+// --foo???<TAB> Same as prior case, but also search in flag
+// descriptions for 'foo'
+// Finally, we'll trim the output to a relatively small number of
+// flags to keep bash quiet about the verbosity of output. If one
+// really wanted to see all possible matches, appending a '+' to the
+// search word will force the exhaustive list of matches to be printed.
+//
+// ** How to have bash accept completions from a binary:
+// Bash requires that it be informed about each command that programmatic
+// completion should be enabled for. Example addition to a .bashrc
+// file would be (your path to gflags_completions.sh file may differ):
+
+/*
+$ complete -o bashdefault -o default -o nospace -C \
+ '/usr/local/bin/gflags_completions.sh --tab_completion_columns $COLUMNS' \
+ time env binary_name another_binary [...]
+*/
+
+// This would allow the following to work:
+// $ /path/to/binary_name --vmodule<TAB>
+// Or:
+// $ ./bin/path/another_binary --gfs_u<TAB>
+// (etc)
+//
+// Sadly, it appears that bash gives no easy way to force this behavior for
+// all commands. That's where the "time" in the above example comes in.
+// If you haven't specifically added a command to the list of completion
+// supported commands, you can still get completions by prefixing the
+// entire command with "env".
+// $ env /some/brand/new/binary --vmod<TAB>
+// Assuming that "binary" is a newly compiled binary, this should still
+// produce the expected completion output.
+
+
+#ifndef GOOGLE_GFLAGS_COMPLETIONS_H_
+#define GOOGLE_GFLAGS_COMPLETIONS_H_
+
+namespace google {
+
+void HandleCommandLineCompletions(void);
+
+}
+
+#endif // GOOGLE_GFLAGS_COMPLETIONS_H_
diff --git a/extern/libmv/third_party/gflags/gflags_reporting.cc b/extern/libmv/third_party/gflags/gflags_reporting.cc
new file mode 100644
index 00000000000..fa3024d974e
--- /dev/null
+++ b/extern/libmv/third_party/gflags/gflags_reporting.cc
@@ -0,0 +1,446 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Ray Sidney
+// Revamped and reorganized by Craig Silverstein
+//
+// This file contains code for handling the 'reporting' flags. These
+// are flags that, when present, cause the program to report some
+// information and then exit. --help and --version are the canonical
+// reporting flags, but we also have flags like --helpxml, etc.
+//
+// There's only one function that's meant to be called externally:
+// HandleCommandLineHelpFlags(). (Well, actually, ShowUsageWithFlags(),
+// ShowUsageWithFlagsRestrict(), and DescribeOneFlag() can be called
+// externally too, but there's little need for it.) These are all
+// declared in the main commandlineflags.h header file.
+//
+// HandleCommandLineHelpFlags() will check what 'reporting' flags have
+// been defined, if any -- the "help" part of the function name is a
+// bit misleading -- and do the relevant reporting. It should be
+// called after all flag-values have been assigned, that is, after
+// parsing the command-line.
+
+#include "config.h"
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include <assert.h>
+#include <string>
+#include <vector>
+#include "gflags.h"
+#include "gflags_completions.h"
+
+#ifndef PATH_SEPARATOR
+#define PATH_SEPARATOR '/'
+#endif
+
+// The 'reporting' flags. They all call exit().
+DEFINE_bool(help, false,
+ "show help on all flags [tip: all flags can have two dashes]");
+DEFINE_bool(helpfull, false,
+ "show help on all flags -- same as -help");
+DEFINE_bool(helpshort, false,
+ "show help on only the main module for this program");
+DEFINE_string(helpon, "",
+ "show help on the modules named by this flag value");
+DEFINE_string(helpmatch, "",
+ "show help on modules whose name contains the specified substr");
+DEFINE_bool(helppackage, false,
+ "show help on all modules in the main package");
+DEFINE_bool(helpxml, false,
+ "produce an xml version of help");
+DEFINE_bool(version, false,
+ "show version and build info and exit");
+
+_START_GOOGLE_NAMESPACE_
+
+using std::string;
+using std::vector;
+
+// --------------------------------------------------------------------
+// DescribeOneFlag()
+// DescribeOneFlagInXML()
+// Routines that pretty-print info about a flag. These use
+// a CommandLineFlagInfo, which is the way the commandlineflags
+// API exposes static info about a flag.
+// --------------------------------------------------------------------
+
+static const int kLineLength = 80;
+
+static void AddString(const string& s,
+ string* final_string, int* chars_in_line) {
+ const int slen = static_cast<int>(s.length());
+ if (*chars_in_line + 1 + slen >= kLineLength) { // < 80 chars/line
+ *final_string += "\n ";
+ *chars_in_line = 6;
+ } else {
+ *final_string += " ";
+ *chars_in_line += 1;
+ }
+ *final_string += s;
+ *chars_in_line += slen;
+}
+
+static string PrintStringFlagsWithQuotes(const CommandLineFlagInfo& flag,
+ const string& text, bool current) {
+ const char* c_string = (current ? flag.current_value.c_str() :
+ flag.default_value.c_str());
+ if (strcmp(flag.type.c_str(), "string") == 0) { // add quotes for strings
+ return text + ": \"" + c_string + "\"";
+ } else {
+ return text + ": " + c_string;
+ }
+}
+
+// Create a descriptive string for a flag.
+// Goes to some trouble to make pretty line breaks.
+string DescribeOneFlag(const CommandLineFlagInfo& flag) {
+ string main_part = (string(" -") + flag.name +
+ " (" + flag.description + ')');
+ const char* c_string = main_part.c_str();
+ int chars_left = static_cast<int>(main_part.length());
+ string final_string = "";
+ int chars_in_line = 0; // how many chars in current line so far?
+ while (1) {
+ assert(chars_left == strlen(c_string)); // Unless there's a \0 in there?
+ const char* newline = strchr(c_string, '\n');
+ if (newline == NULL && chars_in_line+chars_left < kLineLength) {
+ // The whole remainder of the string fits on this line
+ final_string += c_string;
+ chars_in_line += chars_left;
+ break;
+ }
+ if (newline != NULL && newline - c_string < kLineLength - chars_in_line) {
+ int n = static_cast<int>(newline - c_string);
+ final_string.append(c_string, n);
+ chars_left -= n + 1;
+ c_string += n + 1;
+ } else {
+ // Find the last whitespace on this 80-char line
+ int whitespace = kLineLength-chars_in_line-1; // < 80 chars/line
+ while ( whitespace > 0 && !isspace(c_string[whitespace]) ) {
+ --whitespace;
+ }
+ if (whitespace <= 0) {
+ // Couldn't find any whitespace to make a line break. Just dump the
+ // rest out!
+ final_string += c_string;
+ chars_in_line = kLineLength; // next part gets its own line for sure!
+ break;
+ }
+ final_string += string(c_string, whitespace);
+ chars_in_line += whitespace;
+ while (isspace(c_string[whitespace])) ++whitespace;
+ c_string += whitespace;
+ chars_left -= whitespace;
+ }
+ if (*c_string == '\0')
+ break;
+ final_string += "\n ";
+ chars_in_line = 6;
+ }
+
+ // Append data type
+ AddString(string("type: ") + flag.type, &final_string, &chars_in_line);
+ // The listed default value will be the actual default from the flag
+ // definition in the originating source file, unless the value has
+ // subsequently been modified using SetCommandLineOptionWithMode() with mode
+ // SET_FLAGS_DEFAULT, or by setting FLAGS_foo = bar before initializing.
+ AddString(PrintStringFlagsWithQuotes(flag, "default", false), &final_string,
+ &chars_in_line);
+ if (!flag.is_default) {
+ AddString(PrintStringFlagsWithQuotes(flag, "currently", true),
+ &final_string, &chars_in_line);
+ }
+
+ final_string += '\n';
+ return final_string;
+}
+
+// Simple routine to xml-escape a string: escape & and < only.
+static string XMLText(const string& txt) {
+ string ans = txt;
+ for (string::size_type pos = 0; (pos = ans.find("&", pos)) != string::npos; )
+ ans.replace(pos++, 1, "&amp;");
+ for (string::size_type pos = 0; (pos = ans.find("<", pos)) != string::npos; )
+ ans.replace(pos++, 1, "&lt;");
+ return ans;
+}
+
+static void AddXMLTag(string* r, const char* tag, const string& txt) {
+ *r += ('<');
+ *r += (tag);
+ *r += ('>');
+ *r += (XMLText(txt));
+ *r += ("</");
+ *r += (tag);
+ *r += ('>');
+}
+
+static string DescribeOneFlagInXML(const CommandLineFlagInfo& flag) {
+ // The file and flagname could have been attributes, but default
+ // and meaning need to avoid attribute normalization. This way it
+ // can be parsed by simple programs, in addition to xml parsers.
+ string r("<flag>");
+ AddXMLTag(&r, "file", flag.filename);
+ AddXMLTag(&r, "name", flag.name);
+ AddXMLTag(&r, "meaning", flag.description);
+ AddXMLTag(&r, "default", flag.default_value);
+ AddXMLTag(&r, "current", flag.current_value);
+ AddXMLTag(&r, "type", flag.type);
+ r += "</flag>";
+ return r;
+}
+
+// --------------------------------------------------------------------
+// ShowUsageWithFlags()
+// ShowUsageWithFlagsRestrict()
+// ShowXMLOfFlags()
+// These routines variously expose the registry's list of flag
+// values. ShowUsage*() prints the flag-value information
+// to stdout in a user-readable format (that's what --help uses).
+// The Restrict() version limits what flags are shown.
+// ShowXMLOfFlags() prints the flag-value information to stdout
+// in a machine-readable format. In all cases, the flags are
+// sorted: first by filename they are defined in, then by flagname.
+// --------------------------------------------------------------------
+
+static const char* Basename(const char* filename) {
+ const char* sep = strrchr(filename, PATH_SEPARATOR);
+ return sep ? sep + 1 : filename;
+}
+
+static string Dirname(const string& filename) {
+ string::size_type sep = filename.rfind(PATH_SEPARATOR);
+ return filename.substr(0, (sep == string::npos) ? 0 : sep);
+}
+
+// Test whether a filename contains at least one of the substrings.
+static bool FileMatchesSubstring(const string& filename,
+ const vector<string>& substrings) {
+ for (vector<string>::const_iterator target = substrings.begin();
+ target != substrings.end();
+ ++target) {
+ if (strstr(filename.c_str(), target->c_str()) != NULL)
+ return true;
+ // If the substring starts with a '/', that means that we want
+ // the string to be at the beginning of a directory component.
+ // That should match the first directory component as well, so
+ // we allow '/foo' to match a filename of 'foo'.
+ if (!target->empty() && (*target)[0] == '/' &&
+ strncmp(filename.c_str(), target->c_str() + 1,
+ strlen(target->c_str() + 1)) == 0)
+ return true;
+ }
+ return false;
+}
+
+// Show help for every filename which matches any of the target substrings.
+// If substrings is empty, shows help for every file. If a flag's help message
+// has been stripped (e.g. by adding '#define STRIP_FLAG_HELP 1' before
+// including gflags/gflags.h), then this flag will not be displayed by
+// '--help' and its variants.
+static void ShowUsageWithFlagsMatching(const char *argv0,
+ const vector<string> &substrings) {
+ fprintf(stdout, "%s: %s\n", Basename(argv0), ProgramUsage());
+
+ vector<CommandLineFlagInfo> flags;
+ GetAllFlags(&flags); // flags are sorted by filename, then flagname
+
+ string last_filename; // so we know when we're at a new file
+ bool first_directory = true; // controls blank lines between dirs
+ bool found_match = false; // stays false iff no dir matches restrict
+ for (vector<CommandLineFlagInfo>::const_iterator flag = flags.begin();
+ flag != flags.end();
+ ++flag) {
+ if (substrings.empty() ||
+ FileMatchesSubstring(flag->filename, substrings)) {
+ // If the flag has been stripped, pretend that it doesn't exist.
+ if (flag->description == kStrippedFlagHelp) continue;
+ found_match = true; // this flag passed the match!
+ if (flag->filename != last_filename) { // new file
+ if (Dirname(flag->filename) != Dirname(last_filename)) { // new dir!
+ if (!first_directory)
+ fprintf(stdout, "\n\n"); // put blank lines between directories
+ first_directory = false;
+ }
+ fprintf(stdout, "\n Flags from %s:\n", flag->filename.c_str());
+ last_filename = flag->filename;
+ }
+ // Now print this flag
+ fprintf(stdout, "%s", DescribeOneFlag(*flag).c_str());
+ }
+ }
+ if (!found_match && !substrings.empty()) {
+ fprintf(stdout, "\n No modules matched: use -help\n");
+ }
+}
+
+void ShowUsageWithFlagsRestrict(const char *argv0, const char *restrict) {
+ vector<string> substrings;
+ if (restrict != NULL && *restrict != '\0') {
+ substrings.push_back(restrict);
+ }
+ ShowUsageWithFlagsMatching(argv0, substrings);
+}
+
+void ShowUsageWithFlags(const char *argv0) {
+ ShowUsageWithFlagsRestrict(argv0, "");
+}
+
+// Convert the help, program, and usage to xml.
+static void ShowXMLOfFlags(const char *prog_name) {
+ vector<CommandLineFlagInfo> flags;
+ GetAllFlags(&flags); // flags are sorted: by filename, then flagname
+
+ // XML. There is no corresponding schema yet
+ fprintf(stdout, "<?xml version=\"1.0\"?>\n");
+ // The document
+ fprintf(stdout, "<AllFlags>\n");
+ // the program name and usage
+ fprintf(stdout, "<program>%s</program>\n",
+ XMLText(Basename(prog_name)).c_str());
+ fprintf(stdout, "<usage>%s</usage>\n",
+ XMLText(ProgramUsage()).c_str());
+ // All the flags
+ for (vector<CommandLineFlagInfo>::const_iterator flag = flags.begin();
+ flag != flags.end();
+ ++flag) {
+ if (flag->description != kStrippedFlagHelp)
+ fprintf(stdout, "%s\n", DescribeOneFlagInXML(*flag).c_str());
+ }
+ // The end of the document
+ fprintf(stdout, "</AllFlags>\n");
+}
+
+// --------------------------------------------------------------------
+// ShowVersion()
+// Called upon --version. Prints build-related info.
+// --------------------------------------------------------------------
+
+static void ShowVersion() {
+ fprintf(stdout, "%s\n", ProgramInvocationShortName());
+ // TODO: add other stuff, like a timestamp, who built it, what
+ // target they built, etc.
+
+# if !defined(NDEBUG)
+ fprintf(stdout, "Debug build (NDEBUG not #defined)\n");
+# endif
+}
+
+static void AppendPrognameStrings(vector<string>* substrings,
+ const char* progname) {
+ string r("/");
+ r += progname;
+ substrings->push_back(r + ".");
+ substrings->push_back(r + "-main.");
+ substrings->push_back(r + "_main.");
+}
+
+// --------------------------------------------------------------------
+// HandleCommandLineHelpFlags()
+// Checks all the 'reporting' commandline flags to see if any
+// have been set. If so, handles them appropriately. Note
+// that all of them, by definition, cause the program to exit
+// if they trigger.
+// --------------------------------------------------------------------
+
+void HandleCommandLineHelpFlags() {
+ const char* progname = ProgramInvocationShortName();
+ extern void (*commandlineflags_exitfunc)(int); // in gflags.cc
+
+ HandleCommandLineCompletions();
+
+ vector<string> substrings;
+ AppendPrognameStrings(&substrings, progname);
+
+ if (FLAGS_helpshort) {
+ // show only flags related to this binary:
+ // E.g. for fileutil.cc, want flags containing ... "/fileutil." cc
+ ShowUsageWithFlagsMatching(progname, substrings);
+ commandlineflags_exitfunc(1); // almost certainly exit()
+
+ } else if (FLAGS_help || FLAGS_helpfull) {
+ // show all options
+ ShowUsageWithFlagsRestrict(progname, ""); // empty restrict
+ commandlineflags_exitfunc(1);
+
+ } else if (!FLAGS_helpon.empty()) {
+ string restrict = "/" + FLAGS_helpon + ".";
+ ShowUsageWithFlagsRestrict(progname, restrict.c_str());
+ commandlineflags_exitfunc(1);
+
+ } else if (!FLAGS_helpmatch.empty()) {
+ ShowUsageWithFlagsRestrict(progname, FLAGS_helpmatch.c_str());
+ commandlineflags_exitfunc(1);
+
+ } else if (FLAGS_helppackage) {
+ // Shows help for all files in the same directory as main(). We
+ // don't want to resort to looking at dirname(progname), because
+ // the user can pick progname, and it may not relate to the file
+ // where main() resides. So instead, we search the flags for a
+ // filename like "/progname.cc", and take the dirname of that.
+ vector<CommandLineFlagInfo> flags;
+ GetAllFlags(&flags);
+ string last_package;
+ for (vector<CommandLineFlagInfo>::const_iterator flag = flags.begin();
+ flag != flags.end();
+ ++flag) {
+ if (!FileMatchesSubstring(flag->filename, substrings))
+ continue;
+ const string package = Dirname(flag->filename) + "/";
+ if (package != last_package) {
+ ShowUsageWithFlagsRestrict(progname, package.c_str());
+ if (!last_package.empty()) { // means this isn't our first pkg
+ fprintf(stderr, "WARNING: Multiple packages contain a file=%s\n",
+ progname);
+ }
+ last_package = package;
+ }
+ }
+ if (last_package.empty()) { // never found a package to print
+ fprintf(stderr, "WARNING: Unable to find a package for file=%s\n",
+ progname);
+ }
+ commandlineflags_exitfunc(1);
+
+ } else if (FLAGS_helpxml) {
+ ShowXMLOfFlags(progname);
+ commandlineflags_exitfunc(1);
+
+ } else if (FLAGS_version) {
+ ShowVersion();
+ // Unlike help, we may be asking for version in a script, so return 0
+ commandlineflags_exitfunc(0);
+ }
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/gflags/mutex.h b/extern/libmv/third_party/gflags/mutex.h
new file mode 100644
index 00000000000..6e1e8976b6d
--- /dev/null
+++ b/extern/libmv/third_party/gflags/mutex.h
@@ -0,0 +1,349 @@
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// ---
+// Author: Craig Silverstein.
+//
+// A simple mutex wrapper, supporting locks and read-write locks.
+// You should assume the locks are *not* re-entrant.
+//
+// To use: you should define the following macros in your configure.ac:
+// ACX_PTHREAD
+// AC_RWLOCK
+// The latter is defined in ../autoconf.
+//
+// This class is meant to be internal-only and should be wrapped by an
+// internal namespace. Before you use this module, please give the
+// name of your internal namespace for this module. Or, if you want
+// to expose it, you'll want to move it to the Google namespace. We
+// cannot put this class in global namespace because there can be some
+// problems when we have multiple versions of Mutex in each shared object.
+//
+// NOTE: by default, we have #ifdef'ed out the TryLock() method.
+// This is for two reasons:
+// 1) TryLock() under Windows is a bit annoying (it requires a
+// #define to be defined very early).
+// 2) TryLock() is broken for NO_THREADS mode, at least in NDEBUG
+// mode.
+// If you need TryLock(), and either these two caveats are not a
+// problem for you, or you're willing to work around them, then
+// feel free to #define GMUTEX_TRYLOCK, or to remove the #ifdefs
+// in the code below.
+//
+// CYGWIN NOTE: Cygwin support for rwlock seems to be buggy:
+// http://www.cygwin.com/ml/cygwin/2008-12/msg00017.html
+// Because of that, we might as well use windows locks for
+// cygwin. They seem to be more reliable than the cygwin pthreads layer.
+//
+// TRICKY IMPLEMENTATION NOTE:
+// This class is designed to be safe to use during
+// dynamic-initialization -- that is, by global constructors that are
+// run before main() starts. The issue in this case is that
+// dynamic-initialization happens in an unpredictable order, and it
+// could be that someone else's dynamic initializer could call a
+// function that tries to acquire this mutex -- but that all happens
+// before this mutex's constructor has run. (This can happen even if
+// the mutex and the function that uses the mutex are in the same .cc
+// file.) Basically, because Mutex does non-trivial work in its
+// constructor, it's not, in the naive implementation, safe to use
+// before dynamic initialization has run on it.
+//
+// The solution used here is to pair the actual mutex primitive with a
+// bool that is set to true when the mutex is dynamically initialized.
+// (Before that it's false.) Then we modify all mutex routines to
+// look at the bool, and not try to lock/unlock until the bool makes
+// it to true (which happens after the Mutex constructor has run.)
+//
+// This works because before main() starts -- particularly, during
+// dynamic initialization -- there are no threads, so a) it's ok that
+// the mutex operations are a no-op, since we don't need locking then
+// anyway; and b) we can be quite confident our bool won't change
+// state between a call to Lock() and a call to Unlock() (that would
+// require a global constructor in one translation unit to call Lock()
+// and another global constructor in another translation unit to call
+// Unlock() later, which is pretty perverse).
+//
+// That said, it's tricky, and can conceivably fail; it's safest to
+// avoid trying to acquire a mutex in a global constructor, if you
+// can. One way it can fail is that a really smart compiler might
+// initialize the bool to true at static-initialization time (too
+// early) rather than at dynamic-initialization time. To discourage
+// that, we set is_safe_ to true in code (not the constructor
+// colon-initializer) and set it to true via a function that always
+// evaluates to true, but that the compiler can't know always
+// evaluates to true. This should be good enough.
+//
+// A related issue is code that could try to access the mutex
+// after it's been destroyed in the global destructors (because
+// the Mutex global destructor runs before some other global
+// destructor, that tries to acquire the mutex). The way we
+// deal with this is by taking a constructor arg that global
+// mutexes should pass in, that causes the destructor to do no
+// work. We still depend on the compiler not doing anything
+// weird to a Mutex's memory after it is destroyed, but for a
+// static global variable, that's pretty safe.
+
+#ifndef GOOGLE_MUTEX_H_
+#define GOOGLE_MUTEX_H_
+
+#include "config.h" // to figure out pthreads support
+
+#if defined(NO_THREADS)
+ typedef int MutexType; // to keep a lock-count
+#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
+# define WIN32_LEAN_AND_MEAN // We only need minimal includes
+# ifdef GMUTEX_TRYLOCK
+ // We need Windows NT or later for TryEnterCriticalSection(). If you
+ // don't need that functionality, you can remove these _WIN32_WINNT
+ // lines, and change TryLock() to assert(0) or something.
+# ifndef _WIN32_WINNT
+# define _WIN32_WINNT 0x0400
+# endif
+# endif
+# include <windows.h>
+ typedef CRITICAL_SECTION MutexType;
+#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
+ // Needed for pthread_rwlock_*. If it causes problems, you could take it
+ // out, but then you'd have to unset HAVE_RWLOCK (at least on linux -- it
+ // *does* cause problems for FreeBSD, or MacOSX, but isn't needed
+ // for locking there.)
+# ifdef __linux__
+# define _XOPEN_SOURCE 500 // may be needed to get the rwlock calls
+# endif
+# include <pthread.h>
+ typedef pthread_rwlock_t MutexType;
+#elif defined(HAVE_PTHREAD)
+# include <pthread.h>
+ typedef pthread_mutex_t MutexType;
+#else
+# error Need to implement mutex.h for your architecture, or #define NO_THREADS
+#endif
+
+#include <assert.h>
+#include <stdlib.h> // for abort()
+
+#define MUTEX_NAMESPACE gflags_mutex_namespace
+
+namespace MUTEX_NAMESPACE {
+
+class Mutex {
+ public:
+ // This is used for the single-arg constructor
+ enum LinkerInitialized { LINKER_INITIALIZED };
+
+ // Create a Mutex that is not held by anybody. This constructor is
+ // typically used for Mutexes allocated on the heap or the stack.
+ inline Mutex();
+ // This constructor should be used for global, static Mutex objects.
+ // It inhibits work being done by the destructor, which makes it
+ // safer for code that tries to acqiure this mutex in their global
+ // destructor.
+ inline Mutex(LinkerInitialized);
+
+ // Destructor
+ inline ~Mutex();
+
+ inline void Lock(); // Block if needed until free then acquire exclusively
+ inline void Unlock(); // Release a lock acquired via Lock()
+#ifdef GMUTEX_TRYLOCK
+ inline bool TryLock(); // If free, Lock() and return true, else return false
+#endif
+ // Note that on systems that don't support read-write locks, these may
+ // be implemented as synonyms to Lock() and Unlock(). So you can use
+ // these for efficiency, but don't use them anyplace where being able
+ // to do shared reads is necessary to avoid deadlock.
+ inline void ReaderLock(); // Block until free or shared then acquire a share
+ inline void ReaderUnlock(); // Release a read share of this Mutex
+ inline void WriterLock() { Lock(); } // Acquire an exclusive lock
+ inline void WriterUnlock() { Unlock(); } // Release a lock from WriterLock()
+
+ private:
+ MutexType mutex_;
+ // We want to make sure that the compiler sets is_safe_ to true only
+ // when we tell it to, and never makes assumptions is_safe_ is
+ // always true. volatile is the most reliable way to do that.
+ volatile bool is_safe_;
+ // This indicates which constructor was called.
+ bool destroy_;
+
+ inline void SetIsSafe() { is_safe_ = true; }
+
+ // Catch the error of writing Mutex when intending MutexLock.
+ Mutex(Mutex* /*ignored*/) {}
+ // Disallow "evil" constructors
+ Mutex(const Mutex&);
+ void operator=(const Mutex&);
+};
+
+// Now the implementation of Mutex for various systems
+#if defined(NO_THREADS)
+
+// When we don't have threads, we can be either reading or writing,
+// but not both. We can have lots of readers at once (in no-threads
+// mode, that's most likely to happen in recursive function calls),
+// but only one writer. We represent this by having mutex_ be -1 when
+// writing and a number > 0 when reading (and 0 when no lock is held).
+//
+// In debug mode, we assert these invariants, while in non-debug mode
+// we do nothing, for efficiency. That's why everything is in an
+// assert.
+
+Mutex::Mutex() : mutex_(0) { }
+Mutex::Mutex(Mutex::LinkerInitialized) : mutex_(0) { }
+Mutex::~Mutex() { assert(mutex_ == 0); }
+void Mutex::Lock() { assert(--mutex_ == -1); }
+void Mutex::Unlock() { assert(mutex_++ == -1); }
+#ifdef GMUTEX_TRYLOCK
+bool Mutex::TryLock() { if (mutex_) return false; Lock(); return true; }
+#endif
+void Mutex::ReaderLock() { assert(++mutex_ > 0); }
+void Mutex::ReaderUnlock() { assert(mutex_-- > 0); }
+
+#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
+
+Mutex::Mutex() : destroy_(true) {
+ InitializeCriticalSection(&mutex_);
+ SetIsSafe();
+}
+Mutex::Mutex(LinkerInitialized) : destroy_(false) {
+ InitializeCriticalSection(&mutex_);
+ SetIsSafe();
+}
+Mutex::~Mutex() { if (destroy_) DeleteCriticalSection(&mutex_); }
+void Mutex::Lock() { if (is_safe_) EnterCriticalSection(&mutex_); }
+void Mutex::Unlock() { if (is_safe_) LeaveCriticalSection(&mutex_); }
+#ifdef GMUTEX_TRYLOCK
+bool Mutex::TryLock() { return is_safe_ ?
+ TryEnterCriticalSection(&mutex_) != 0 : true; }
+#endif
+void Mutex::ReaderLock() { Lock(); } // we don't have read-write locks
+void Mutex::ReaderUnlock() { Unlock(); }
+
+#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
+
+#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
+ if (is_safe_ && fncall(&mutex_) != 0) abort(); \
+} while (0)
+
+Mutex::Mutex() : destroy_(true) {
+ SetIsSafe();
+ if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
+}
+Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
+ SetIsSafe();
+ if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
+}
+Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_rwlock_destroy); }
+void Mutex::Lock() { SAFE_PTHREAD(pthread_rwlock_wrlock); }
+void Mutex::Unlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
+#ifdef GMUTEX_TRYLOCK
+bool Mutex::TryLock() { return is_safe_ ?
+ pthread_rwlock_trywrlock(&mutex_) == 0 : true; }
+#endif
+void Mutex::ReaderLock() { SAFE_PTHREAD(pthread_rwlock_rdlock); }
+void Mutex::ReaderUnlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
+#undef SAFE_PTHREAD
+
+#elif defined(HAVE_PTHREAD)
+
+#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
+ if (is_safe_ && fncall(&mutex_) != 0) abort(); \
+} while (0)
+
+Mutex::Mutex() : destroy_(true) {
+ SetIsSafe();
+ if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
+}
+Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
+ SetIsSafe();
+ if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
+}
+Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_mutex_destroy); }
+void Mutex::Lock() { SAFE_PTHREAD(pthread_mutex_lock); }
+void Mutex::Unlock() { SAFE_PTHREAD(pthread_mutex_unlock); }
+#ifdef GMUTEX_TRYLOCK
+bool Mutex::TryLock() { return is_safe_ ?
+ pthread_mutex_trylock(&mutex_) == 0 : true; }
+#endif
+void Mutex::ReaderLock() { Lock(); }
+void Mutex::ReaderUnlock() { Unlock(); }
+#undef SAFE_PTHREAD
+
+#endif
+
+// --------------------------------------------------------------------------
+// Some helper classes
+
+// MutexLock(mu) acquires mu when constructed and releases it when destroyed.
+class MutexLock {
+ public:
+ explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
+ ~MutexLock() { mu_->Unlock(); }
+ private:
+ Mutex * const mu_;
+ // Disallow "evil" constructors
+ MutexLock(const MutexLock&);
+ void operator=(const MutexLock&);
+};
+
+// ReaderMutexLock and WriterMutexLock do the same, for rwlocks
+class ReaderMutexLock {
+ public:
+ explicit ReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
+ ~ReaderMutexLock() { mu_->ReaderUnlock(); }
+ private:
+ Mutex * const mu_;
+ // Disallow "evil" constructors
+ ReaderMutexLock(const ReaderMutexLock&);
+ void operator=(const ReaderMutexLock&);
+};
+
+class WriterMutexLock {
+ public:
+ explicit WriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
+ ~WriterMutexLock() { mu_->WriterUnlock(); }
+ private:
+ Mutex * const mu_;
+ // Disallow "evil" constructors
+ WriterMutexLock(const WriterMutexLock&);
+ void operator=(const WriterMutexLock&);
+};
+
+// Catch bug where variable name is omitted, e.g. MutexLock (&mu);
+#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_decl_missing_var_name)
+#define ReaderMutexLock(x) COMPILE_ASSERT(0, rmutex_lock_decl_missing_var_name)
+#define WriterMutexLock(x) COMPILE_ASSERT(0, wmutex_lock_decl_missing_var_name)
+
+} // namespace MUTEX_NAMESPACE
+
+using namespace MUTEX_NAMESPACE;
+
+#undef MUTEX_NAMESPACE
+
+#endif /* #define GOOGLE_MUTEX_H__ */
diff --git a/extern/libmv/third_party/glog/AUTHORS b/extern/libmv/third_party/glog/AUTHORS
new file mode 100644
index 00000000000..ee92be88dcf
--- /dev/null
+++ b/extern/libmv/third_party/glog/AUTHORS
@@ -0,0 +1,2 @@
+opensource@google.com
+
diff --git a/extern/libmv/third_party/glog/COPYING b/extern/libmv/third_party/glog/COPYING
new file mode 100644
index 00000000000..38396b580b3
--- /dev/null
+++ b/extern/libmv/third_party/glog/COPYING
@@ -0,0 +1,65 @@
+Copyright (c) 2008, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+A function gettimeofday in utilities.cc is based on
+
+http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd
+
+The license of this code is:
+
+Copyright (c) 2003-2008, Jouni Malinen <j@w1.fi> and contributors
+All Rights Reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name(s) of the above-listed copyright holder(s) nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/extern/libmv/third_party/glog/ChangeLog b/extern/libmv/third_party/glog/ChangeLog
new file mode 100644
index 00000000000..350fee921e3
--- /dev/null
+++ b/extern/libmv/third_party/glog/ChangeLog
@@ -0,0 +1,59 @@
+2010-06-15 Google Inc. <opensource@google.com>
+
+ * google-glog: version 0.3.1
+ * GLOG_* environment variables now work even when gflags is installed.
+ * Snow leopard support.
+ * Now we can build and test from out side tree.
+ * Add DCHECK_NOTNULL.
+ * Add ShutdownGoogleLogging to close syslog (thanks DGunchev)
+ * Fix --enable-frame-pointers option (thanks kazuki.ohta)
+ * Fix libunwind detection (thanks giantchen)
+
+2009-07-30 Google Inc. <opensource@google.com>
+
+ * google-glog: version 0.3.0
+ * Fix a deadlock happened when user uses glog with recent gflags.
+ * Suppress several unnecessary warnings (thanks keir).
+ * NetBSD and OpenBSD support.
+ * Use Win32API GetComputeNameA properly (thanks magila).
+ * Fix user name detection for Windows (thanks ademin).
+ * Fix several minor bugs.
+
+2009-04-10 Google Inc. <opensource@google.com>
+ * google-glog: version 0.2.1
+ * Fix timestamps of VC++ version.
+ * Add pkg-config support (thanks Tomasz)
+ * Fix build problem when building with gtest (thanks Michael)
+ * Add --with-gflags option for configure (thanks Michael)
+ * Fixes for GCC 4.4 (thanks John)
+
+2009-01-23 Google Inc. <opensource@google.com>
+ * google-glog: version 0.2
+ * Add initial Windows VC++ support.
+ * Google testing/mocking frameworks integration.
+ * Link pthread library automatically.
+ * Flush logs in signal handlers.
+ * Add macros LOG_TO_STRING, LOG_AT_LEVEL, DVLOG, and LOG_TO_SINK_ONLY.
+ * Log microseconds.
+ * Add --log_backtrace_at option.
+ * Fix some minor bugs.
+
+2008-11-18 Google Inc. <opensource@google.com>
+ * google-glog: version 0.1.2
+ * Add InstallFailureSignalHandler(). (satorux)
+ * Re-organize the way to produce stacktraces.
+ * Don't define unnecessary macro DISALLOW_EVIL_CONSTRUCTORS.
+
+2008-10-15 Google Inc. <opensource@google.com>
+ * google-glog: version 0.1.1
+ * Support symbolize for MacOSX 10.5.
+ * BUG FIX: --vmodule didn't work with gflags.
+ * BUG FIX: symbolize_unittest failed with GCC 4.3.
+ * Several fixes on the document.
+
+2008-10-07 Google Inc. <opensource@google.com>
+
+ * google-glog: initial release:
+ The glog package contains a library that implements application-level
+ logging. This library provides logging APIs based on C++-style
+ streams and various helper macros.
diff --git a/extern/libmv/third_party/glog/NEWS b/extern/libmv/third_party/glog/NEWS
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/extern/libmv/third_party/glog/NEWS
diff --git a/extern/libmv/third_party/glog/README b/extern/libmv/third_party/glog/README
new file mode 100644
index 00000000000..77efd37505a
--- /dev/null
+++ b/extern/libmv/third_party/glog/README
@@ -0,0 +1,5 @@
+This repository contains a C++ implementation of the Google logging
+module. Documentation for the implementation is in doc/.
+
+See INSTALL for (generic) installation instructions for C++: basically
+ ./configure && make && make install
diff --git a/extern/libmv/third_party/glog/README.libmv b/extern/libmv/third_party/glog/README.libmv
new file mode 100644
index 00000000000..8f1243b2430
--- /dev/null
+++ b/extern/libmv/third_party/glog/README.libmv
@@ -0,0 +1,38 @@
+Project: Google Logging
+URL: http://code.google.com/p/google-glog/
+License: New BSD
+Upstream version: 0.3.1
+Local modifications:
+
+Upgrading Notes
+* Had to change #include <gflags/gflags.h> to #include "gflags/gflags.h"
+* Make sure to copy over a config_YOUR_PLATFORM.h and put it in config.h
+
+Old changes which got obsoleted (maybe?) by 0.3.1 merge:
+* The config_linux.h is generated by configure on Keir's Ubuntu 9.04 desktop.
+* Commented out some struct ::tm weirdness causing compile failures on
+ ubuntu 8.10 and 9.04.
+* Switched several initializers to memset instead of = {}.
+* Changed some includes pointing to gflags. Not sure why the regular inclusion
+ didn't work.
+* Added some compile flags to silence various warnings, allowing us to keep the
+ differences between upstream small.
+* Don't redefine _XOPEN_SOURCE.
+* Added "google::" to GetReferenceableValue in CHECK_OP_LOG.
+* Add virtual destructor to Thread in googletest.h.
+* Update windows/glog/log_severity to build with QT library that include WinGDI
+ (It cause a double definition of ERROR variable).
+
+Old changes which got obsoleted (maybe?) by 0.2.1 merge:
+* Added #ifndef / def REG_EIP; not sure what that is.
+* Added (void) arg stuff to prevent unused variable warnings.
+* Added google:: namespace prefix to GetReferencableValue
+* Added assignments for several functions marked with no_ignore_return, where
+ the return value was ignored.
+* Commented out the unused function DumpPCAndSymbol() in utilities.cc to silent
+ gcc on the mac
+
+TODO(keir): Remove any obsoleted changes above if they are not necessary after
+testing on more platforms.
+
+WARNING: Mac port not updated for 0.2.1
diff --git a/extern/libmv/third_party/glog/src/base/commandlineflags.h b/extern/libmv/third_party/glog/src/base/commandlineflags.h
new file mode 100644
index 00000000000..6c529ccd847
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/base/commandlineflags.h
@@ -0,0 +1,132 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// This file is a compatibility layer that defines Google's version of
+// command line flags that are used for configuration.
+//
+// We put flags into their own namespace. It is purposefully
+// named in an opaque way that people should have trouble typing
+// directly. The idea is that DEFINE puts the flag in the weird
+// namespace, and DECLARE imports the flag from there into the
+// current namespace. The net result is to force people to use
+// DECLARE to get access to a flag, rather than saying
+// extern bool FLAGS_logtostderr;
+// or some such instead. We want this so we can put extra
+// functionality (like sanity-checking) in DECLARE if we want,
+// and make sure it is picked up everywhere.
+//
+// We also put the type of the variable in the namespace, so that
+// people can't DECLARE_int32 something that they DEFINE_bool'd
+// elsewhere.
+#ifndef BASE_COMMANDLINEFLAGS_H__
+#define BASE_COMMANDLINEFLAGS_H__
+
+#include "config.h"
+#include <string>
+#include <string.h> // for memchr
+#include <stdlib.h> // for getenv
+
+#ifdef HAVE_LIB_GFLAGS
+
+#include "third_party/gflags/gflags.h"
+
+#else
+
+#include "glog/logging.h"
+
+#define DECLARE_VARIABLE(type, name, tn) \
+ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##tn##_instead { \
+ extern GOOGLE_GLOG_DLL_DECL type FLAGS_##name; \
+ } \
+ using FLAG__namespace_do_not_use_directly_use_DECLARE_##tn##_instead::FLAGS_##name
+#define DEFINE_VARIABLE(type, name, value, meaning, tn) \
+ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##tn##_instead { \
+ GOOGLE_GLOG_DLL_DECL type FLAGS_##name(value); \
+ char FLAGS_no##name; \
+ } \
+ using FLAG__namespace_do_not_use_directly_use_DECLARE_##tn##_instead::FLAGS_##name
+
+// bool specialization
+#define DECLARE_bool(name) \
+ DECLARE_VARIABLE(bool, name, bool)
+#define DEFINE_bool(name, value, meaning) \
+ DEFINE_VARIABLE(bool, name, value, meaning, bool)
+
+// int32 specialization
+#define DECLARE_int32(name) \
+ DECLARE_VARIABLE(GOOGLE_NAMESPACE::int32, name, int32)
+#define DEFINE_int32(name, value, meaning) \
+ DEFINE_VARIABLE(GOOGLE_NAMESPACE::int32, name, value, meaning, int32)
+
+// Special case for string, because we have to specify the namespace
+// std::string, which doesn't play nicely with our FLAG__namespace hackery.
+#define DECLARE_string(name) \
+ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
+ extern GOOGLE_GLOG_DLL_DECL std::string FLAGS_##name; \
+ } \
+ using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
+#define DEFINE_string(name, value, meaning) \
+ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
+ GOOGLE_GLOG_DLL_DECL std::string FLAGS_##name(value); \
+ char FLAGS_no##name; \
+ } \
+ using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
+
+#endif // HAVE_LIB_GFLAGS
+
+// Define GLOG_DEFINE_* using DEFINE_* . By using these macros, we
+// have GLOG_* environ variables even if we have gflags installed.
+//
+// If both an environment variable and a flag are specified, the value
+// specified by a flag wins. E.g., if GLOG_v=0 and --v=1, the
+// verbosity will be 1, not 0.
+
+#define GLOG_DEFINE_bool(name, value, meaning) \
+ DEFINE_bool(name, EnvToBool("GLOG_" #name, value), meaning)
+
+#define GLOG_DEFINE_int32(name, value, meaning) \
+ DEFINE_int32(name, EnvToInt("GLOG_" #name, value), meaning)
+
+#define GLOG_DEFINE_string(name, value, meaning) \
+ DEFINE_string(name, EnvToString("GLOG_" #name, value), meaning)
+
+// These macros (could be functions, but I don't want to bother with a .cc
+// file), make it easier to initialize flags from the environment.
+
+#define EnvToString(envname, dflt) \
+ (!getenv(envname) ? (dflt) : getenv(envname))
+
+#define EnvToBool(envname, dflt) \
+ (!getenv(envname) ? (dflt) : memchr("tTyY1\0", getenv(envname)[0], 6) != NULL)
+
+#define EnvToInt(envname, dflt) \
+ (!getenv(envname) ? (dflt) : strtol(getenv(envname), NULL, 10))
+
+#endif // BASE_COMMANDLINEFLAGS_H__
diff --git a/extern/libmv/third_party/glog/src/base/googleinit.h b/extern/libmv/third_party/glog/src/base/googleinit.h
new file mode 100644
index 00000000000..c907308e852
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/base/googleinit.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Jacob Hoffman-Andrews
+
+#ifndef _GOOGLEINIT_H
+#define _GOOGLEINIT_H
+
+class GoogleInitializer {
+ public:
+ typedef void (*void_function)(void);
+ GoogleInitializer(const char* name, void_function f) {
+ f();
+ }
+};
+
+#define REGISTER_MODULE_INITIALIZER(name, body) \
+ namespace { \
+ static void google_init_module_##name () { body; } \
+ GoogleInitializer google_initializer_module_##name(#name, \
+ google_init_module_##name); \
+ }
+
+#endif /* _GOOGLEINIT_H */
diff --git a/extern/libmv/third_party/glog/src/base/mutex.h b/extern/libmv/third_party/glog/src/base/mutex.h
new file mode 100644
index 00000000000..7ba88cb5a63
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/base/mutex.h
@@ -0,0 +1,325 @@
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// ---
+// Author: Craig Silverstein.
+//
+// A simple mutex wrapper, supporting locks and read-write locks.
+// You should assume the locks are *not* re-entrant.
+//
+// To use: you should define the following macros in your configure.ac:
+// ACX_PTHREAD
+// AC_RWLOCK
+// The latter is defined in ../autoconf.
+//
+// This class is meant to be internal-only and should be wrapped by an
+// internal namespace. Before you use this module, please give the
+// name of your internal namespace for this module. Or, if you want
+// to expose it, you'll want to move it to the Google namespace. We
+// cannot put this class in global namespace because there can be some
+// problems when we have multiple versions of Mutex in each shared object.
+//
+// NOTE: by default, we have #ifdef'ed out the TryLock() method.
+// This is for two reasons:
+// 1) TryLock() under Windows is a bit annoying (it requires a
+// #define to be defined very early).
+// 2) TryLock() is broken for NO_THREADS mode, at least in NDEBUG
+// mode.
+// If you need TryLock(), and either these two caveats are not a
+// problem for you, or you're willing to work around them, then
+// feel free to #define GMUTEX_TRYLOCK, or to remove the #ifdefs
+// in the code below.
+//
+// CYGWIN NOTE: Cygwin support for rwlock seems to be buggy:
+// http://www.cygwin.com/ml/cygwin/2008-12/msg00017.html
+// Because of that, we might as well use windows locks for
+// cygwin. They seem to be more reliable than the cygwin pthreads layer.
+//
+// TRICKY IMPLEMENTATION NOTE:
+// This class is designed to be safe to use during
+// dynamic-initialization -- that is, by global constructors that are
+// run before main() starts. The issue in this case is that
+// dynamic-initialization happens in an unpredictable order, and it
+// could be that someone else's dynamic initializer could call a
+// function that tries to acquire this mutex -- but that all happens
+// before this mutex's constructor has run. (This can happen even if
+// the mutex and the function that uses the mutex are in the same .cc
+// file.) Basically, because Mutex does non-trivial work in its
+// constructor, it's not, in the naive implementation, safe to use
+// before dynamic initialization has run on it.
+//
+// The solution used here is to pair the actual mutex primitive with a
+// bool that is set to true when the mutex is dynamically initialized.
+// (Before that it's false.) Then we modify all mutex routines to
+// look at the bool, and not try to lock/unlock until the bool makes
+// it to true (which happens after the Mutex constructor has run.)
+//
+// This works because before main() starts -- particularly, during
+// dynamic initialization -- there are no threads, so a) it's ok that
+// the mutex operations are a no-op, since we don't need locking then
+// anyway; and b) we can be quite confident our bool won't change
+// state between a call to Lock() and a call to Unlock() (that would
+// require a global constructor in one translation unit to call Lock()
+// and another global constructor in another translation unit to call
+// Unlock() later, which is pretty perverse).
+//
+// That said, it's tricky, and can conceivably fail; it's safest to
+// avoid trying to acquire a mutex in a global constructor, if you
+// can. One way it can fail is that a really smart compiler might
+// initialize the bool to true at static-initialization time (too
+// early) rather than at dynamic-initialization time. To discourage
+// that, we set is_safe_ to true in code (not the constructor
+// colon-initializer) and set it to true via a function that always
+// evaluates to true, but that the compiler can't know always
+// evaluates to true. This should be good enough.
+
+#ifndef GOOGLE_MUTEX_H_
+#define GOOGLE_MUTEX_H_
+
+#include "config.h" // to figure out pthreads support
+
+#if defined(NO_THREADS)
+ typedef int MutexType; // to keep a lock-count
+#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
+# define WIN32_LEAN_AND_MEAN // We only need minimal includes
+# ifdef GMUTEX_TRYLOCK
+ // We need Windows NT or later for TryEnterCriticalSection(). If you
+ // don't need that functionality, you can remove these _WIN32_WINNT
+ // lines, and change TryLock() to assert(0) or something.
+# ifndef _WIN32_WINNT
+# define _WIN32_WINNT 0x0400
+# endif
+# endif
+// To avoid macro definition of ERROR.
+# define NOGDI
+// To avoid macro definition of min/max.
+# define NOMINMAX
+# include <windows.h>
+ typedef CRITICAL_SECTION MutexType;
+#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
+ // Needed for pthread_rwlock_*. If it causes problems, you could take it
+ // out, but then you'd have to unset HAVE_RWLOCK (at least on linux -- it
+ // *does* cause problems for FreeBSD, or MacOSX, but isn't needed
+ // for locking there.)
+# ifdef __linux__
+# define _XOPEN_SOURCE 500 // may be needed to get the rwlock calls
+# endif
+# include <pthread.h>
+ typedef pthread_rwlock_t MutexType;
+#elif defined(HAVE_PTHREAD)
+# include <pthread.h>
+ typedef pthread_mutex_t MutexType;
+#else
+# error Need to implement mutex.h for your architecture, or #define NO_THREADS
+#endif
+
+// We need to include these header files after defining _XOPEN_SOURCE
+// as they may define the _XOPEN_SOURCE macro.
+#include <assert.h>
+#include <stdlib.h> // for abort()
+
+#define MUTEX_NAMESPACE glog_internal_namespace_
+
+namespace MUTEX_NAMESPACE {
+
+class Mutex {
+ public:
+ // Create a Mutex that is not held by anybody. This constructor is
+ // typically used for Mutexes allocated on the heap or the stack.
+ // See below for a recommendation for constructing global Mutex
+ // objects.
+ inline Mutex();
+
+ // Destructor
+ inline ~Mutex();
+
+ inline void Lock(); // Block if needed until free then acquire exclusively
+ inline void Unlock(); // Release a lock acquired via Lock()
+#ifdef GMUTEX_TRYLOCK
+ inline bool TryLock(); // If free, Lock() and return true, else return false
+#endif
+ // Note that on systems that don't support read-write locks, these may
+ // be implemented as synonyms to Lock() and Unlock(). So you can use
+ // these for efficiency, but don't use them anyplace where being able
+ // to do shared reads is necessary to avoid deadlock.
+ inline void ReaderLock(); // Block until free or shared then acquire a share
+ inline void ReaderUnlock(); // Release a read share of this Mutex
+ inline void WriterLock() { Lock(); } // Acquire an exclusive lock
+ inline void WriterUnlock() { Unlock(); } // Release a lock from WriterLock()
+
+ // TODO(hamaji): Do nothing, implement correctly.
+ inline void AssertHeld() {}
+
+ private:
+ MutexType mutex_;
+ // We want to make sure that the compiler sets is_safe_ to true only
+ // when we tell it to, and never makes assumptions is_safe_ is
+ // always true. volatile is the most reliable way to do that.
+ volatile bool is_safe_;
+
+ inline void SetIsSafe() { is_safe_ = true; }
+
+ // Catch the error of writing Mutex when intending MutexLock.
+ Mutex(Mutex* /*ignored*/) {}
+ // Disallow "evil" constructors
+ Mutex(const Mutex&);
+ void operator=(const Mutex&);
+};
+
+// Now the implementation of Mutex for various systems
+#if defined(NO_THREADS)
+
+// When we don't have threads, we can be either reading or writing,
+// but not both. We can have lots of readers at once (in no-threads
+// mode, that's most likely to happen in recursive function calls),
+// but only one writer. We represent this by having mutex_ be -1 when
+// writing and a number > 0 when reading (and 0 when no lock is held).
+//
+// In debug mode, we assert these invariants, while in non-debug mode
+// we do nothing, for efficiency. That's why everything is in an
+// assert.
+
+Mutex::Mutex() : mutex_(0) { }
+Mutex::~Mutex() { assert(mutex_ == 0); }
+void Mutex::Lock() { assert(--mutex_ == -1); }
+void Mutex::Unlock() { assert(mutex_++ == -1); }
+#ifdef GMUTEX_TRYLOCK
+bool Mutex::TryLock() { if (mutex_) return false; Lock(); return true; }
+#endif
+void Mutex::ReaderLock() { assert(++mutex_ > 0); }
+void Mutex::ReaderUnlock() { assert(mutex_-- > 0); }
+
+#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
+
+Mutex::Mutex() { InitializeCriticalSection(&mutex_); SetIsSafe(); }
+Mutex::~Mutex() { DeleteCriticalSection(&mutex_); }
+void Mutex::Lock() { if (is_safe_) EnterCriticalSection(&mutex_); }
+void Mutex::Unlock() { if (is_safe_) LeaveCriticalSection(&mutex_); }
+#ifdef GMUTEX_TRYLOCK
+bool Mutex::TryLock() { return is_safe_ ?
+ TryEnterCriticalSection(&mutex_) != 0 : true; }
+#endif
+void Mutex::ReaderLock() { Lock(); } // we don't have read-write locks
+void Mutex::ReaderUnlock() { Unlock(); }
+
+#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
+
+#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
+ if (is_safe_ && fncall(&mutex_) != 0) abort(); \
+} while (0)
+
+Mutex::Mutex() {
+ SetIsSafe();
+ if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
+}
+Mutex::~Mutex() { SAFE_PTHREAD(pthread_rwlock_destroy); }
+void Mutex::Lock() { SAFE_PTHREAD(pthread_rwlock_wrlock); }
+void Mutex::Unlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
+#ifdef GMUTEX_TRYLOCK
+bool Mutex::TryLock() { return is_safe_ ?
+ pthread_rwlock_trywrlock(&mutex_) == 0 :
+ true; }
+#endif
+void Mutex::ReaderLock() { SAFE_PTHREAD(pthread_rwlock_rdlock); }
+void Mutex::ReaderUnlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
+#undef SAFE_PTHREAD
+
+#elif defined(HAVE_PTHREAD)
+
+#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
+ if (is_safe_ && fncall(&mutex_) != 0) abort(); \
+} while (0)
+
+Mutex::Mutex() {
+ SetIsSafe();
+ if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
+}
+Mutex::~Mutex() { SAFE_PTHREAD(pthread_mutex_destroy); }
+void Mutex::Lock() { SAFE_PTHREAD(pthread_mutex_lock); }
+void Mutex::Unlock() { SAFE_PTHREAD(pthread_mutex_unlock); }
+#ifdef GMUTEX_TRYLOCK
+bool Mutex::TryLock() { return is_safe_ ?
+ pthread_mutex_trylock(&mutex_) == 0 : true; }
+#endif
+void Mutex::ReaderLock() { Lock(); }
+void Mutex::ReaderUnlock() { Unlock(); }
+#undef SAFE_PTHREAD
+
+#endif
+
+// --------------------------------------------------------------------------
+// Some helper classes
+
+// MutexLock(mu) acquires mu when constructed and releases it when destroyed.
+class MutexLock {
+ public:
+ explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
+ ~MutexLock() { mu_->Unlock(); }
+ private:
+ Mutex * const mu_;
+ // Disallow "evil" constructors
+ MutexLock(const MutexLock&);
+ void operator=(const MutexLock&);
+};
+
+// ReaderMutexLock and WriterMutexLock do the same, for rwlocks
+class ReaderMutexLock {
+ public:
+ explicit ReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
+ ~ReaderMutexLock() { mu_->ReaderUnlock(); }
+ private:
+ Mutex * const mu_;
+ // Disallow "evil" constructors
+ ReaderMutexLock(const ReaderMutexLock&);
+ void operator=(const ReaderMutexLock&);
+};
+
+class WriterMutexLock {
+ public:
+ explicit WriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
+ ~WriterMutexLock() { mu_->WriterUnlock(); }
+ private:
+ Mutex * const mu_;
+ // Disallow "evil" constructors
+ WriterMutexLock(const WriterMutexLock&);
+ void operator=(const WriterMutexLock&);
+};
+
+// Catch bug where variable name is omitted, e.g. MutexLock (&mu);
+#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_decl_missing_var_name)
+#define ReaderMutexLock(x) COMPILE_ASSERT(0, rmutex_lock_decl_missing_var_name)
+#define WriterMutexLock(x) COMPILE_ASSERT(0, wmutex_lock_decl_missing_var_name)
+
+} // namespace MUTEX_NAMESPACE
+
+using namespace MUTEX_NAMESPACE;
+
+#undef MUTEX_NAMESPACE
+
+#endif /* #define GOOGLE_MUTEX_H__ */
diff --git a/extern/libmv/third_party/glog/src/config.h b/extern/libmv/third_party/glog/src/config.h
new file mode 100644
index 00000000000..ed8d56e7799
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/config.h
@@ -0,0 +1,11 @@
+/* src/config.h. Generated from config.h.in by configure. */
+/* src/config.h.in. Generated from configure.ac by autoheader. */
+
+/* Namespace for Google classes */
+#ifdef __APPLE__
+ #include "config_mac.h"
+#elif __GNUC__
+ #include "config_linux.h"
+#elif _MSC_VER
+ #include "windows/config.h"
+#endif
diff --git a/extern/libmv/third_party/glog/src/config_linux.h b/extern/libmv/third_party/glog/src/config_linux.h
new file mode 100644
index 00000000000..df6956c9ecf
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/config_linux.h
@@ -0,0 +1,164 @@
+/* src/config.h. Generated from config.h.in by configure. */
+/* src/config.h.in. Generated from configure.ac by autoheader. */
+
+/* Namespace for Google classes */
+#define GOOGLE_NAMESPACE google
+
+/* Define if you have the `dladdr' function */
+/* #undef HAVE_DLADDR */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define to 1 if you have the <execinfo.h> header file. */
+#define HAVE_EXECINFO_H 1
+
+/* Define if you have the `fcntl' function */
+#define HAVE_FCNTL 1
+
+/* Define to 1 if you have the <glob.h> header file. */
+#define HAVE_GLOB_H 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `pthread' library (-lpthread). */
+#define HAVE_LIBPTHREAD 1
+
+/* Define to 1 if you have the <libunwind.h> header file. */
+/* #undef HAVE_LIBUNWIND_H */
+
+/* define if you have google gflags library */
+#define HAVE_LIB_GFLAGS 1
+
+/* define if you have google gmock library */
+/* #undef HAVE_LIB_GMOCK */
+
+/* define if you have google gtest library */
+/* #undef HAVE_LIB_GTEST */
+
+/* define if you have libunwind */
+/* #undef HAVE_LIB_UNWIND */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* define if the compiler implements namespaces */
+#define HAVE_NAMESPACES 1
+
+/* Define if you have POSIX threads libraries and header files. */
+#define HAVE_PTHREAD 1
+
+/* Define to 1 if you have the <pwd.h> header file. */
+#define HAVE_PWD_H 1
+
+/* define if the compiler implements pthread_rwlock_* */
+#define HAVE_RWLOCK 1
+
+/* Define if you have the `sigaltstack' function */
+#define HAVE_SIGALTSTACK 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the <syscall.h> header file. */
+#define HAVE_SYSCALL_H 1
+
+/* Define to 1 if you have the <syslog.h> header file. */
+#define HAVE_SYSLOG_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/syscall.h> header file. */
+#define HAVE_SYS_SYSCALL_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <sys/ucontext.h> header file. */
+#define HAVE_SYS_UCONTEXT_H 1
+
+/* Define to 1 if you have the <sys/utsname.h> header file. */
+#define HAVE_SYS_UTSNAME_H 1
+
+/* Define to 1 if you have the <ucontext.h> header file. */
+#define HAVE_UCONTEXT_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* define if the compiler supports using expression for operator */
+#define HAVE_USING_OPERATOR 1
+
+/* define if your compiler has __attribute__ */
+#define HAVE___ATTRIBUTE__ 1
+
+/* define if your compiler has __builtin_expect */
+#define HAVE___BUILTIN_EXPECT 1
+
+/* define if your compiler has __sync_val_compare_and_swap */
+/* #undef HAVE___SYNC_VAL_COMPARE_AND_SWAP */
+
+/* Name of package */
+#define PACKAGE "glog"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "opensource@google.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "glog"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "glog 0.3.1"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "glog"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "0.3.1"
+
+/* How to access the PC from a struct ucontext */
+#if defined(_M_X64) || defined(__amd64__)
+ #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP]
+#else
+ #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP]
+#endif
+
+/* Define to necessary symbol if this constant uses a non-standard name on
+ your system. */
+/* #undef PTHREAD_CREATE_JOINABLE */
+
+/* The size of `void *', as computed by sizeof. */
+#define SIZEOF_VOID_P 8
+
+/* Define to 1 if you have the ANSI C header files. */
+/* #undef STDC_HEADERS */
+
+#define STDC_HEADERS 1
+/* the namespace where STL code like vector<> is defined */
+#define STL_NAMESPACE std
+
+/* location of source code */
+#define TEST_SRC_DIR "."
+
+/* Version number of package */
+#define VERSION "0.3.1"
+
+/* Stops putting the code inside the Google namespace */
+#define _END_GOOGLE_NAMESPACE_ }
+
+/* Puts following code inside the Google namespace */
+#define _START_GOOGLE_NAMESPACE_ namespace google {
diff --git a/extern/libmv/third_party/glog/src/config_mac.h b/extern/libmv/third_party/glog/src/config_mac.h
new file mode 100644
index 00000000000..5f953d17ba9
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/config_mac.h
@@ -0,0 +1,159 @@
+/* src/config.h. Generated from config.h.in by configure. */
+/* src/config.h.in. Generated from configure.ac by autoheader. */
+
+/* Namespace for Google classes */
+#define GOOGLE_NAMESPACE google
+
+/* Define if you have the `dladdr' function */
+#define HAVE_DLADDR 1
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define to 1 if you have the <execinfo.h> header file. */
+#define HAVE_EXECINFO_H 1
+
+/* Define if you have the `fcntl' function */
+#define HAVE_FCNTL 1
+
+/* Define to 1 if you have the <glob.h> header file. */
+#define HAVE_GLOB_H 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `pthread' library (-lpthread). */
+#define HAVE_LIBPTHREAD 1
+
+/* Define to 1 if you have the <libunwind.h> header file. */
+#define HAVE_LIBUNWIND_H 1
+
+/* define if you have google gflags library */
+#define HAVE_LIB_GFLAGS 1
+
+/* define if you have google gmock library */
+/* #undef HAVE_LIB_GMOCK */
+
+/* define if you have google gtest library */
+//#define HAVE_LIB_GTEST 1
+
+/* define if you have libunwind */
+/* #undef HAVE_LIB_UNWIND */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* define if the compiler implements namespaces */
+#define HAVE_NAMESPACES 1
+
+/* Define if you have POSIX threads libraries and header files. */
+#define HAVE_PTHREAD 1
+
+/* Define to 1 if you have the <pwd.h> header file. */
+#define HAVE_PWD_H 1
+
+/* define if the compiler implements pthread_rwlock_* */
+#define HAVE_RWLOCK 1
+
+/* Define if you have the `sigaltstack' function */
+#define HAVE_SIGALTSTACK 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the <syscall.h> header file. */
+/* #undef HAVE_SYSCALL_H */
+
+/* Define to 1 if you have the <syslog.h> header file. */
+#define HAVE_SYSLOG_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/syscall.h> header file. */
+#define HAVE_SYS_SYSCALL_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <sys/ucontext.h> header file. */
+#define HAVE_SYS_UCONTEXT_H 1
+
+/* Define to 1 if you have the <sys/utsname.h> header file. */
+#define HAVE_SYS_UTSNAME_H 1
+
+/* Define to 1 if you have the <ucontext.h> header file. */
+/* #undef HAVE_UCONTEXT_H */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* define if the compiler supports using expression for operator */
+#define HAVE_USING_OPERATOR 1
+
+/* define if your compiler has __attribute__ */
+#define HAVE___ATTRIBUTE__ 1
+
+/* define if your compiler has __builtin_expect */
+#define HAVE___BUILTIN_EXPECT 1
+
+/* define if your compiler has __sync_val_compare_and_swap */
+/* #undef HAVE___SYNC_VAL_COMPARE_AND_SWAP */
+
+/* Name of package */
+#define PACKAGE "glog"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "opensource@google.com"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "glog"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "glog 0.3.1"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "glog"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "0.3.1"
+
+/* How to access the PC from a struct ucontext */
+#undef PC_FROM_UCONTEXT
+
+/* Define to necessary symbol if this constant uses a non-standard name on
+ your system. */
+/* #undef PTHREAD_CREATE_JOINABLE */
+
+/* The size of `void *', as computed by sizeof. */
+#define SIZEOF_VOID_P 8
+
+/* Define to 1 if you have the ANSI C header files. */
+/* #undef STDC_HEADERS */
+
+/* the namespace where STL code like vector<> is defined */
+#define STL_NAMESPACE std
+
+/* location of source code */
+#define TEST_SRC_DIR "."
+
+/* Version number of package */
+#define VERSION "0.3.1"
+
+/* Stops putting the code inside the Google namespace */
+#define _END_GOOGLE_NAMESPACE_ }
+
+/* Puts following code inside the Google namespace */
+#define _START_GOOGLE_NAMESPACE_ namespace google {
diff --git a/extern/libmv/third_party/glog/src/demangle.cc b/extern/libmv/third_party/glog/src/demangle.cc
new file mode 100644
index 00000000000..46556bf3c13
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/demangle.cc
@@ -0,0 +1,1231 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Satoru Takabayashi
+
+#include <stdio.h> // for NULL
+#include "demangle.h"
+
+_START_GOOGLE_NAMESPACE_
+
+typedef struct {
+ const char *abbrev;
+ const char *real_name;
+} AbbrevPair;
+
+// List of operators from Itanium C++ ABI.
+static const AbbrevPair kOperatorList[] = {
+ { "nw", "new" },
+ { "na", "new[]" },
+ { "dl", "delete" },
+ { "da", "delete[]" },
+ { "ps", "+" },
+ { "ng", "-" },
+ { "ad", "&" },
+ { "de", "*" },
+ { "co", "~" },
+ { "pl", "+" },
+ { "mi", "-" },
+ { "ml", "*" },
+ { "dv", "/" },
+ { "rm", "%" },
+ { "an", "&" },
+ { "or", "|" },
+ { "eo", "^" },
+ { "aS", "=" },
+ { "pL", "+=" },
+ { "mI", "-=" },
+ { "mL", "*=" },
+ { "dV", "/=" },
+ { "rM", "%=" },
+ { "aN", "&=" },
+ { "oR", "|=" },
+ { "eO", "^=" },
+ { "ls", "<<" },
+ { "rs", ">>" },
+ { "lS", "<<=" },
+ { "rS", ">>=" },
+ { "eq", "==" },
+ { "ne", "!=" },
+ { "lt", "<" },
+ { "gt", ">" },
+ { "le", "<=" },
+ { "ge", ">=" },
+ { "nt", "!" },
+ { "aa", "&&" },
+ { "oo", "||" },
+ { "pp", "++" },
+ { "mm", "--" },
+ { "cm", "," },
+ { "pm", "->*" },
+ { "pt", "->" },
+ { "cl", "()" },
+ { "ix", "[]" },
+ { "qu", "?" },
+ { "st", "sizeof" },
+ { "sz", "sizeof" },
+ { NULL, NULL },
+};
+
+// List of builtin types from Itanium C++ ABI.
+static const AbbrevPair kBuiltinTypeList[] = {
+ { "v", "void" },
+ { "w", "wchar_t" },
+ { "b", "bool" },
+ { "c", "char" },
+ { "a", "signed char" },
+ { "h", "unsigned char" },
+ { "s", "short" },
+ { "t", "unsigned short" },
+ { "i", "int" },
+ { "j", "unsigned int" },
+ { "l", "long" },
+ { "m", "unsigned long" },
+ { "x", "long long" },
+ { "y", "unsigned long long" },
+ { "n", "__int128" },
+ { "o", "unsigned __int128" },
+ { "f", "float" },
+ { "d", "double" },
+ { "e", "long double" },
+ { "g", "__float128" },
+ { "z", "ellipsis" },
+ { NULL, NULL }
+};
+
+// List of substitutions Itanium C++ ABI.
+static const AbbrevPair kSubstitutionList[] = {
+ { "St", "" },
+ { "Sa", "allocator" },
+ { "Sb", "basic_string" },
+ // std::basic_string<char, std::char_traits<char>,std::allocator<char> >
+ { "Ss", "string"},
+ // std::basic_istream<char, std::char_traits<char> >
+ { "Si", "istream" },
+ // std::basic_ostream<char, std::char_traits<char> >
+ { "So", "ostream" },
+ // std::basic_iostream<char, std::char_traits<char> >
+ { "Sd", "iostream" },
+ { NULL, NULL }
+};
+
+// State needed for demangling.
+typedef struct {
+ const char *mangled_cur; // Cursor of mangled name.
+ const char *mangled_end; // End of mangled name.
+ char *out_cur; // Cursor of output string.
+ const char *out_begin; // Beginning of output string.
+ const char *out_end; // End of output string.
+ const char *prev_name; // For constructors/destructors.
+ int prev_name_length; // For constructors/destructors.
+ int nest_level; // For nested names.
+ int number; // Remember the previous number.
+ bool append; // Append flag.
+ bool overflowed; // True if output gets overflowed.
+} State;
+
+// We don't use strlen() in libc since it's not guaranteed to be async
+// signal safe.
+static size_t StrLen(const char *str) {
+ size_t len = 0;
+ while (*str != '\0') {
+ ++str;
+ ++len;
+ }
+ return len;
+}
+
+// Returns true if "str" has "prefix" as a prefix.
+static bool StrPrefix(const char *str, const char *prefix) {
+ size_t i = 0;
+ while (str[i] != '\0' && prefix[i] != '\0' &&
+ str[i] == prefix[i]) {
+ ++i;
+ }
+ return prefix[i] == '\0'; // Consumed everything in "prefix".
+}
+
+static void InitState(State *state, const char *mangled,
+ char *out, int out_size) {
+ state->mangled_cur = mangled;
+ state->mangled_end = mangled + StrLen(mangled);
+ state->out_cur = out;
+ state->out_begin = out;
+ state->out_end = out + out_size;
+ state->prev_name = NULL;
+ state->prev_name_length = -1;
+ state->nest_level = -1;
+ state->number = -1;
+ state->append = true;
+ state->overflowed = false;
+}
+
+// Calculates the remaining length of the mangled name.
+static int RemainingLength(State *state) {
+ return state->mangled_end - state->mangled_cur;
+}
+
+// Returns true and advances "mangled_cur" if we find "c" at
+// "mangled_cur" position.
+static bool ParseChar(State *state, const char c) {
+ if (RemainingLength(state) >= 1 && *state->mangled_cur == c) {
+ ++state->mangled_cur;
+ return true;
+ }
+ return false;
+}
+
+// Returns true and advances "mangled_cur" if we find "two_chars" at
+// "mangled_cur" position.
+static bool ParseTwoChar(State *state, const char *two_chars) {
+ if (RemainingLength(state) >= 2 &&
+ state->mangled_cur[0] == two_chars[0] &&
+ state->mangled_cur[1] == two_chars[1]) {
+ state->mangled_cur += 2;
+ return true;
+ }
+ return false;
+}
+
+// Returns true and advances "mangled_cur" if we find any character in
+// "char_class" at "mangled_cur" position.
+static bool ParseCharClass(State *state, const char *char_class) {
+ if (state->mangled_cur == state->mangled_end) {
+ return false;
+ }
+ const char *p = char_class;
+ for (; *p != '\0'; ++p) {
+ if (*state->mangled_cur == *p) {
+ state->mangled_cur += 1;
+ return true;
+ }
+ }
+ return false;
+}
+
+// This function is used for handling an optional non-terminal.
+static bool Optional(bool status) {
+ return true;
+}
+
+// This function is used for handling <non-terminal>+ syntax.
+typedef bool (*ParseFunc)(State *);
+static bool OneOrMore(ParseFunc parse_func, State *state) {
+ if (parse_func(state)) {
+ while (parse_func(state)) {
+ }
+ return true;
+ }
+ return false;
+}
+
+// Append "str" at "out_cur". If there is an overflow, "overflowed"
+// is set to true for later use. The output string is ensured to
+// always terminate with '\0' as long as there is no overflow.
+static void Append(State *state, const char * const str, const int length) {
+ int i;
+ for (i = 0; i < length; ++i) {
+ if (state->out_cur + 1 < state->out_end) { // +1 for '\0'
+ *state->out_cur = str[i];
+ ++state->out_cur;
+ } else {
+ state->overflowed = true;
+ break;
+ }
+ }
+ if (!state->overflowed) {
+ *state->out_cur = '\0'; // Terminate it with '\0'
+ }
+}
+
+// We don't use equivalents in libc to avoid locale issues.
+static bool IsLower(char c) {
+ return c >= 'a' && c <= 'z';
+}
+
+static bool IsAlpha(char c) {
+ return ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'));
+}
+
+// Append "str" with some tweaks, iff "append" state is true.
+// Returns true so that it can be placed in "if" conditions.
+static void MaybeAppendWithLength(State *state, const char * const str,
+ const int length) {
+ if (state->append && length > 0) {
+ // Append a space if the output buffer ends with '<' and "str"
+ // starts with '<' to avoid <<<.
+ if (str[0] == '<' && state->out_begin < state->out_cur &&
+ state->out_cur[-1] == '<') {
+ Append(state, " ", 1);
+ }
+ // Remember the last identifier name for ctors/dtors.
+ if (IsAlpha(str[0]) || str[0] == '_') {
+ state->prev_name = state->out_cur;
+ state->prev_name_length = length;
+ }
+ Append(state, str, length);
+ }
+}
+
+// A convenient wrapper arount MaybeAppendWithLength().
+static bool MaybeAppend(State *state, const char * const str) {
+ if (state->append) {
+ int length = StrLen(str);
+ MaybeAppendWithLength(state, str, length);
+ }
+ return true;
+}
+
+// This function is used for handling nested names.
+static bool EnterNestedName(State *state) {
+ state->nest_level = 0;
+ return true;
+}
+
+// This function is used for handling nested names.
+static bool LeaveNestedName(State *state, int prev_value) {
+ state->nest_level = prev_value;
+ return true;
+}
+
+// Disable the append mode not to print function parameters, etc.
+static bool DisableAppend(State *state) {
+ state->append = false;
+ return true;
+}
+
+// Restore the append mode to the previous state.
+static bool RestoreAppend(State *state, bool prev_value) {
+ state->append = prev_value;
+ return true;
+}
+
+// Increase the nest level for nested names.
+static void MaybeIncreaseNestLevel(State *state) {
+ if (state->nest_level > -1) {
+ ++state->nest_level;
+ }
+}
+
+// Appends :: for nested names if necessary.
+static void MaybeAppendSeparator(State *state) {
+ if (state->nest_level >= 1) {
+ MaybeAppend(state, "::");
+ }
+}
+
+// Cancel the last separator if necessary.
+static void MaybeCancelLastSeparator(State *state) {
+ if (state->nest_level >= 1 && state->append &&
+ state->out_begin <= state->out_cur - 2) {
+ state->out_cur -= 2;
+ *state->out_cur = '\0';
+ }
+}
+
+// Returns true if identifier pointed by "mangled_cur" is anonymous
+// namespace.
+static bool IdentifierIsAnonymousNamespace(State *state) {
+ const char anon_prefix[] = "_GLOBAL__N_";
+ return (state->number > sizeof(anon_prefix) - 1 && // Should be longer.
+ StrPrefix(state->mangled_cur, anon_prefix));
+}
+
+// Forward declarations of our parsing functions.
+static bool ParseMangledName(State *state);
+static bool ParseEncoding(State *state);
+static bool ParseName(State *state);
+static bool ParseUnscopedName(State *state);
+static bool ParseUnscopedTemplateName(State *state);
+static bool ParseNestedName(State *state);
+static bool ParsePrefix(State *state);
+static bool ParseUnqualifiedName(State *state);
+static bool ParseSourceName(State *state);
+static bool ParseLocalSourceName(State *state);
+static bool ParseNumber(State *state);
+static bool ParseFloatNumber(State *state);
+static bool ParseSeqId(State *state);
+static bool ParseIdentifier(State *state);
+static bool ParseOperatorName(State *state);
+static bool ParseSpecialName(State *state);
+static bool ParseCallOffset(State *state);
+static bool ParseNVOffset(State *state);
+static bool ParseVOffset(State *state);
+static bool ParseCtorDtorName(State *state);
+static bool ParseType(State *state);
+static bool ParseCVQualifiers(State *state);
+static bool ParseBuiltinType(State *state);
+static bool ParseFunctionType(State *state);
+static bool ParseBareFunctionType(State *state);
+static bool ParseClassEnumType(State *state);
+static bool ParseArrayType(State *state);
+static bool ParsePointerToMemberType(State *state);
+static bool ParseTemplateParam(State *state);
+static bool ParseTemplateTemplateParam(State *state);
+static bool ParseTemplateArgs(State *state);
+static bool ParseTemplateArg(State *state);
+static bool ParseExpression(State *state);
+static bool ParseExprPrimary(State *state);
+static bool ParseLocalName(State *state);
+static bool ParseDiscriminator(State *state);
+static bool ParseSubstitution(State *state);
+
+// Implementation note: the following code is a straightforward
+// translation of the Itanium C++ ABI defined in BNF with a couple of
+// exceptions.
+//
+// - Support GNU extensions not defined in the Itanium C++ ABI
+// - <prefix> and <template-prefix> are combined to avoid infinite loop
+// - Reorder patterns to shorten the code
+// - Reorder patterns to give greedier functions precedence
+// We'll mark "Less greedy than" for these cases in the code
+//
+// Each parsing function changes the state and returns true on
+// success. Otherwise, don't change the state and returns false. To
+// ensure that the state isn't changed in the latter case, we save the
+// original state before we call more than one parsing functions
+// consecutively with &&, and restore the state if unsuccessful. See
+// ParseEncoding() as an example of this convention. We follow the
+// convention throughout the code.
+//
+// Originally we tried to do demangling without following the full ABI
+// syntax but it turned out we needed to follow the full syntax to
+// parse complicated cases like nested template arguments. Note that
+// implementing a full-fledged demangler isn't trivial (libiberty's
+// cp-demangle.c has +4300 lines).
+//
+// Note that (foo) in <(foo) ...> is a modifier to be ignored.
+//
+// Reference:
+// - Itanium C++ ABI
+// <http://www.codesourcery.com/cxx-abi/abi.html#mangling>
+
+// <mangled-name> ::= _Z <encoding>
+static bool ParseMangledName(State *state) {
+ if (ParseTwoChar(state, "_Z") && ParseEncoding(state)) {
+ // Append trailing version suffix if any.
+ // ex. _Z3foo@@GLIBCXX_3.4
+ if (state->mangled_cur < state->mangled_end &&
+ state->mangled_cur[0] == '@') {
+ MaybeAppend(state, state->mangled_cur);
+ state->mangled_cur = state->mangled_end;
+ }
+ return true;
+ }
+ return false;
+}
+
+// <encoding> ::= <(function) name> <bare-function-type>
+// ::= <(data) name>
+// ::= <special-name>
+static bool ParseEncoding(State *state) {
+ State copy = *state;
+ if (ParseName(state) && ParseBareFunctionType(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseName(state) || ParseSpecialName(state)) {
+ return true;
+ }
+ return false;
+}
+
+// <name> ::= <nested-name>
+// ::= <unscoped-template-name> <template-args>
+// ::= <unscoped-name>
+// ::= <local-name>
+static bool ParseName(State *state) {
+ if (ParseNestedName(state) || ParseLocalName(state)) {
+ return true;
+ }
+
+ State copy = *state;
+ if (ParseUnscopedTemplateName(state) &&
+ ParseTemplateArgs(state)) {
+ return true;
+ }
+ *state = copy;
+
+ // Less greedy than <unscoped-template-name> <template-args>.
+ if (ParseUnscopedName(state)) {
+ return true;
+ }
+ return false;
+}
+
+// <unscoped-name> ::= <unqualified-name>
+// ::= St <unqualified-name>
+static bool ParseUnscopedName(State *state) {
+ if (ParseUnqualifiedName(state)) {
+ return true;
+ }
+
+ State copy = *state;
+ if (ParseTwoChar(state, "St") &&
+ MaybeAppend(state, "std::") &&
+ ParseUnqualifiedName(state)) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <unscoped-template-name> ::= <unscoped-name>
+// ::= <substitution>
+static bool ParseUnscopedTemplateName(State *state) {
+ return ParseUnscopedName(state) || ParseSubstitution(state);
+}
+
+// <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
+// ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+static bool ParseNestedName(State *state) {
+ State copy = *state;
+ if (ParseChar(state, 'N') &&
+ EnterNestedName(state) &&
+ Optional(ParseCVQualifiers(state)) &&
+ ParsePrefix(state) &&
+ LeaveNestedName(state, copy.nest_level) &&
+ ParseChar(state, 'E')) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// This part is tricky. If we literally translate them to code, we'll
+// end up infinite loop. Hence we merge them to avoid the case.
+//
+// <prefix> ::= <prefix> <unqualified-name>
+// ::= <template-prefix> <template-args>
+// ::= <template-param>
+// ::= <substitution>
+// ::= # empty
+// <template-prefix> ::= <prefix> <(template) unqualified-name>
+// ::= <template-param>
+// ::= <substitution>
+static bool ParsePrefix(State *state) {
+ bool has_something = false;
+ while (true) {
+ MaybeAppendSeparator(state);
+ if (ParseTemplateParam(state) ||
+ ParseSubstitution(state) ||
+ ParseUnscopedName(state)) {
+ has_something = true;
+ MaybeIncreaseNestLevel(state);
+ continue;
+ }
+ MaybeCancelLastSeparator(state);
+ if (has_something && ParseTemplateArgs(state)) {
+ return ParsePrefix(state);
+ } else {
+ break;
+ }
+ }
+ return true;
+}
+
+// <unqualified-name> ::= <operator-name>
+// ::= <ctor-dtor-name>
+// ::= <source-name>
+// ::= <local-source-name>
+static bool ParseUnqualifiedName(State *state) {
+ return (ParseOperatorName(state) ||
+ ParseCtorDtorName(state) ||
+ ParseSourceName(state) ||
+ ParseLocalSourceName(state));
+}
+
+// <source-name> ::= <positive length number> <identifier>
+static bool ParseSourceName(State *state) {
+ State copy = *state;
+ if (ParseNumber(state) && ParseIdentifier(state)) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <local-source-name> ::= L <source-name> [<discriminator>]
+//
+// References:
+// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=31775
+// http://gcc.gnu.org/viewcvs?view=rev&revision=124467
+static bool ParseLocalSourceName(State *state) {
+ State copy = *state;
+ if (ParseChar(state, 'L') && ParseSourceName(state) &&
+ Optional(ParseDiscriminator(state))) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <number> ::= [n] <non-negative decimal integer>
+static bool ParseNumber(State *state) {
+ int sign = 1;
+ if (ParseChar(state, 'n')) {
+ sign = -1;
+ }
+ const char *p = state->mangled_cur;
+ int number = 0;
+ for (;p < state->mangled_end; ++p) {
+ if ((*p >= '0' && *p <= '9')) {
+ number = number * 10 + (*p - '0');
+ } else {
+ break;
+ }
+ }
+ if (p != state->mangled_cur) { // Conversion succeeded.
+ state->mangled_cur = p;
+ state->number = number * sign;
+ return true;
+ }
+ return false;
+}
+
+// Floating-point literals are encoded using a fixed-length lowercase
+// hexadecimal string.
+static bool ParseFloatNumber(State *state) {
+ const char *p = state->mangled_cur;
+ int number = 0;
+ for (;p < state->mangled_end; ++p) {
+ if ((*p >= '0' && *p <= '9')) {
+ number = number * 16 + (*p - '0');
+ } else if (*p >= 'a' && *p <= 'f') {
+ number = number * 16 + (*p - 'a' + 10);
+ } else {
+ break;
+ }
+ }
+ if (p != state->mangled_cur) { // Conversion succeeded.
+ state->mangled_cur = p;
+ state->number = number;
+ return true;
+ }
+ return false;
+}
+
+// The <seq-id> is a sequence number in base 36,
+// using digits and upper case letters
+static bool ParseSeqId(State *state) {
+ const char *p = state->mangled_cur;
+ int number = 0;
+ for (;p < state->mangled_end; ++p) {
+ if ((*p >= '0' && *p <= '9')) {
+ number = number * 36 + (*p - '0');
+ } else if (*p >= 'A' && *p <= 'Z') {
+ number = number * 36 + (*p - 'A' + 10);
+ } else {
+ break;
+ }
+ }
+ if (p != state->mangled_cur) { // Conversion succeeded.
+ state->mangled_cur = p;
+ state->number = number;
+ return true;
+ }
+ return false;
+}
+
+// <identifier> ::= <unqualified source code identifier>
+static bool ParseIdentifier(State *state) {
+ if (state->number == -1 ||
+ RemainingLength(state) < state->number) {
+ return false;
+ }
+ if (IdentifierIsAnonymousNamespace(state)) {
+ MaybeAppend(state, "(anonymous namespace)");
+ } else {
+ MaybeAppendWithLength(state, state->mangled_cur, state->number);
+ }
+ state->mangled_cur += state->number;
+ state->number = -1; // Reset the number.
+ return true;
+}
+
+// <operator-name> ::= nw, and other two letters cases
+// ::= cv <type> # (cast)
+// ::= v <digit> <source-name> # vendor extended operator
+static bool ParseOperatorName(State *state) {
+ if (RemainingLength(state) < 2) {
+ return false;
+ }
+ // First check with "cv" (cast) case.
+ State copy = *state;
+ if (ParseTwoChar(state, "cv") &&
+ MaybeAppend(state, "operator ") &&
+ EnterNestedName(state) &&
+ ParseType(state) &&
+ LeaveNestedName(state, copy.nest_level)) {
+ return true;
+ }
+ *state = copy;
+
+ // Then vendor extended operators.
+ if (ParseChar(state, 'v') && ParseCharClass(state, "0123456789") &&
+ ParseSourceName(state)) {
+ return true;
+ }
+ *state = copy;
+
+ // Other operator names should start with a lower alphabet followed
+ // by a lower/upper alphabet.
+ if (!(IsLower(state->mangled_cur[0]) &&
+ IsAlpha(state->mangled_cur[1]))) {
+ return false;
+ }
+ // We may want to perform a binary search if we really need speed.
+ const AbbrevPair *p;
+ for (p = kOperatorList; p->abbrev != NULL; ++p) {
+ if (state->mangled_cur[0] == p->abbrev[0] &&
+ state->mangled_cur[1] == p->abbrev[1]) {
+ MaybeAppend(state, "operator");
+ if (IsLower(*p->real_name)) { // new, delete, etc.
+ MaybeAppend(state, " ");
+ }
+ MaybeAppend(state, p->real_name);
+ state->mangled_cur += 2;
+ return true;
+ }
+ }
+ return false;
+}
+
+// <special-name> ::= TV <type>
+// ::= TT <type>
+// ::= TI <type>
+// ::= TS <type>
+// ::= Tc <call-offset> <call-offset> <(base) encoding>
+// ::= GV <(object) name>
+// ::= T <call-offset> <(base) encoding>
+// G++ extensions:
+// ::= TC <type> <(offset) number> _ <(base) type>
+// ::= TF <type>
+// ::= TJ <type>
+// ::= GR <name>
+// ::= GA <encoding>
+// ::= Th <call-offset> <(base) encoding>
+// ::= Tv <call-offset> <(base) encoding>
+//
+// Note: we don't care much about them since they don't appear in
+// stack traces. The are special data.
+static bool ParseSpecialName(State *state) {
+ State copy = *state;
+ if (ParseChar(state, 'T') &&
+ ParseCharClass(state, "VTIS") &&
+ ParseType(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseTwoChar(state, "Tc") && ParseCallOffset(state) &&
+ ParseCallOffset(state) && ParseEncoding(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseTwoChar(state, "GV") &&
+ ParseName(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseChar(state, 'T') && ParseCallOffset(state) &&
+ ParseEncoding(state)) {
+ return true;
+ }
+ *state = copy;
+
+ // G++ extensions
+ if (ParseTwoChar(state, "TC") && ParseType(state) &&
+ ParseNumber(state) && ParseChar(state, '_') &&
+ DisableAppend(state) &&
+ ParseType(state)) {
+ RestoreAppend(state, copy.append);
+ return true;
+ }
+ *state = copy;
+
+ if (ParseChar(state, 'T') && ParseCharClass(state, "FJ") &&
+ ParseType(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseTwoChar(state, "GR") && ParseName(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseTwoChar(state, "GA") && ParseEncoding(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseChar(state, 'T') && ParseCharClass(state, "hv") &&
+ ParseCallOffset(state) && ParseEncoding(state)) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <call-offset> ::= h <nv-offset> _
+// ::= v <v-offset> _
+static bool ParseCallOffset(State *state) {
+ State copy = *state;
+ if (ParseChar(state, 'h') &&
+ ParseNVOffset(state) && ParseChar(state, '_')) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseChar(state, 'v') &&
+ ParseVOffset(state) && ParseChar(state, '_')) {
+ return true;
+ }
+ *state = copy;
+
+ return false;
+}
+
+// <nv-offset> ::= <(offset) number>
+static bool ParseNVOffset(State *state) {
+ return ParseNumber(state);
+}
+
+// <v-offset> ::= <(offset) number> _ <(virtual offset) number>
+static bool ParseVOffset(State *state) {
+ State copy = *state;
+ if (ParseNumber(state) && ParseChar(state, '_') &&
+ ParseNumber(state)) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <ctor-dtor-name> ::= C1 | C2 | C3
+// ::= D0 | D1 | D2
+static bool ParseCtorDtorName(State *state) {
+ State copy = *state;
+ if (ParseChar(state, 'C') &&
+ ParseCharClass(state, "123")) {
+ const char * const prev_name = state->prev_name;
+ const int prev_name_length = state->prev_name_length;
+ MaybeAppendWithLength(state, prev_name, prev_name_length);
+ return true;
+ }
+ *state = copy;
+
+ if (ParseChar(state, 'D') &&
+ ParseCharClass(state, "012")) {
+ const char * const prev_name = state->prev_name;
+ const int prev_name_length = state->prev_name_length;
+ MaybeAppend(state, "~");
+ MaybeAppendWithLength(state, prev_name, prev_name_length);
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <type> ::= <CV-qualifiers> <type>
+// ::= P <type>
+// ::= R <type>
+// ::= C <type>
+// ::= G <type>
+// ::= U <source-name> <type>
+// ::= <builtin-type>
+// ::= <function-type>
+// ::= <class-enum-type>
+// ::= <array-type>
+// ::= <pointer-to-member-type>
+// ::= <template-template-param> <template-args>
+// ::= <template-param>
+// ::= <substitution>
+static bool ParseType(State *state) {
+ // We should check CV-qualifers, and PRGC things first.
+ State copy = *state;
+ if (ParseCVQualifiers(state) && ParseType(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseCharClass(state, "PRCG") && ParseType(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseChar(state, 'U') && ParseSourceName(state) &&
+ ParseType(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseBuiltinType(state) ||
+ ParseFunctionType(state) ||
+ ParseClassEnumType(state) ||
+ ParseArrayType(state) ||
+ ParsePointerToMemberType(state) ||
+ ParseSubstitution(state)) {
+ return true;
+ }
+
+ if (ParseTemplateTemplateParam(state) &&
+ ParseTemplateArgs(state)) {
+ return true;
+ }
+ *state = copy;
+
+ // Less greedy than <template-template-param> <template-args>.
+ if (ParseTemplateParam(state)) {
+ return true;
+ }
+
+ return false;
+}
+
+// <CV-qualifiers> ::= [r] [V] [K]
+// We don't allow empty <CV-qualifiers> to avoid infinite loop in
+// ParseType().
+static bool ParseCVQualifiers(State *state) {
+ int num_cv_qualifiers = 0;
+ num_cv_qualifiers += ParseChar(state, 'r');
+ num_cv_qualifiers += ParseChar(state, 'V');
+ num_cv_qualifiers += ParseChar(state, 'K');
+ return num_cv_qualifiers > 0;
+}
+
+// <builtin-type> ::= v, etc.
+// ::= u <source-name>
+static bool ParseBuiltinType(State *state) {
+ const AbbrevPair *p;
+ for (p = kBuiltinTypeList; p->abbrev != NULL; ++p) {
+ if (state->mangled_cur[0] == p->abbrev[0]) {
+ MaybeAppend(state, p->real_name);
+ ++state->mangled_cur;
+ return true;
+ }
+ }
+
+ State copy = *state;
+ if (ParseChar(state, 'u') && ParseSourceName(state)) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <function-type> ::= F [Y] <bare-function-type> E
+static bool ParseFunctionType(State *state) {
+ State copy = *state;
+ if (ParseChar(state, 'F') && Optional(ParseChar(state, 'Y')) &&
+ ParseBareFunctionType(state) && ParseChar(state, 'E')) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <bare-function-type> ::= <(signature) type>+
+static bool ParseBareFunctionType(State *state) {
+ State copy = *state;
+ DisableAppend(state);
+ if (OneOrMore(ParseType, state)) {
+ RestoreAppend(state, copy.append);
+ MaybeAppend(state, "()");
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <class-enum-type> ::= <name>
+static bool ParseClassEnumType(State *state) {
+ return ParseName(state);
+}
+
+// <array-type> ::= A <(positive dimension) number> _ <(element) type>
+// ::= A [<(dimension) expression>] _ <(element) type>
+static bool ParseArrayType(State *state) {
+ State copy = *state;
+ if (ParseChar(state, 'A') && ParseNumber(state) &&
+ ParseChar(state, '_') && ParseType(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseChar(state, 'A') && Optional(ParseExpression(state)) &&
+ ParseChar(state, '_') && ParseType(state)) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <pointer-to-member-type> ::= M <(class) type> <(member) type>
+static bool ParsePointerToMemberType(State *state) {
+ State copy = *state;
+ if (ParseChar(state, 'M') && ParseType(state) &&
+ ParseType(state)) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <template-param> ::= T_
+// ::= T <parameter-2 non-negative number> _
+static bool ParseTemplateParam(State *state) {
+ if (ParseTwoChar(state, "T_")) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true;
+ }
+
+ State copy = *state;
+ if (ParseChar(state, 'T') && ParseNumber(state) &&
+ ParseChar(state, '_')) {
+ MaybeAppend(state, "?"); // We don't support template substitutions.
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+
+// <template-template-param> ::= <template-param>
+// ::= <substitution>
+static bool ParseTemplateTemplateParam(State *state) {
+ return (ParseTemplateParam(state) ||
+ ParseSubstitution(state));
+}
+
+// <template-args> ::= I <template-arg>+ E
+static bool ParseTemplateArgs(State *state) {
+ State copy = *state;
+ DisableAppend(state);
+ if (ParseChar(state, 'I') &&
+ OneOrMore(ParseTemplateArg, state) &&
+ ParseChar(state, 'E')) {
+ RestoreAppend(state, copy.append);
+ MaybeAppend(state, "<>");
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <template-arg> ::= <type>
+// ::= <expr-primary>
+// ::= X <expression> E
+static bool ParseTemplateArg(State *state) {
+ if (ParseType(state) ||
+ ParseExprPrimary(state)) {
+ return true;
+ }
+
+ State copy = *state;
+ if (ParseChar(state, 'X') && ParseExpression(state) &&
+ ParseChar(state, 'E')) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <expression> ::= <template-param>
+// ::= <expr-primary>
+// ::= <unary operator-name> <expression>
+// ::= <binary operator-name> <expression> <expression>
+// ::= <trinary operator-name> <expression> <expression>
+// <expression>
+// ::= st <type>
+// ::= sr <type> <unqualified-name> <template-args>
+// ::= sr <type> <unqualified-name>
+static bool ParseExpression(State *state) {
+ if (ParseTemplateParam(state) || ParseExprPrimary(state)) {
+ return true;
+ }
+
+ State copy = *state;
+ if (ParseOperatorName(state) &&
+ ParseExpression(state) &&
+ ParseExpression(state) &&
+ ParseExpression(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseOperatorName(state) &&
+ ParseExpression(state) &&
+ ParseExpression(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseOperatorName(state) &&
+ ParseExpression(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseTwoChar(state, "st") && ParseType(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseTwoChar(state, "sr") && ParseType(state) &&
+ ParseUnqualifiedName(state) &&
+ ParseTemplateArgs(state)) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseTwoChar(state, "sr") && ParseType(state) &&
+ ParseUnqualifiedName(state)) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <expr-primary> ::= L <type> <(value) number> E
+// ::= L <type> <(value) float> E
+// ::= L <mangled-name> E
+// // A bug in g++'s C++ ABI version 2 (-fabi-version=2).
+// ::= LZ <encoding> E
+static bool ParseExprPrimary(State *state) {
+ State copy = *state;
+ if (ParseChar(state, 'L') && ParseType(state) &&
+ ParseNumber(state) &&
+ ParseChar(state, 'E')) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseChar(state, 'L') && ParseType(state) &&
+ ParseFloatNumber(state) &&
+ ParseChar(state, 'E')) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseChar(state, 'L') && ParseMangledName(state) &&
+ ParseChar(state, 'E')) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseTwoChar(state, "LZ") && ParseEncoding(state) &&
+ ParseChar(state, 'E')) {
+ return true;
+ }
+ *state = copy;
+
+ return false;
+}
+
+// <local-name> := Z <(function) encoding> E <(entity) name>
+// [<discriminator>]
+// := Z <(function) encoding> E s [<discriminator>]
+static bool ParseLocalName(State *state) {
+ State copy = *state;
+ if (ParseChar(state, 'Z') && ParseEncoding(state) &&
+ ParseChar(state, 'E') && MaybeAppend(state, "::") &&
+ ParseName(state) && Optional(ParseDiscriminator(state))) {
+ return true;
+ }
+ *state = copy;
+
+ if (ParseChar(state, 'Z') && ParseEncoding(state) &&
+ ParseTwoChar(state, "Es") && Optional(ParseDiscriminator(state))) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <discriminator> := _ <(non-negative) number>
+static bool ParseDiscriminator(State *state) {
+ State copy = *state;
+ if (ParseChar(state, '_') && ParseNumber(state)) {
+ return true;
+ }
+ *state = copy;
+ return false;
+}
+
+// <substitution> ::= S_
+// ::= S <seq-id> _
+// ::= St, etc.
+static bool ParseSubstitution(State *state) {
+ if (ParseTwoChar(state, "S_")) {
+ MaybeAppend(state, "?"); // We don't support substitutions.
+ return true;
+ }
+
+ State copy = *state;
+ if (ParseChar(state, 'S') && ParseSeqId(state) &&
+ ParseChar(state, '_')) {
+ MaybeAppend(state, "?"); // We don't support substitutions.
+ return true;
+ }
+ *state = copy;
+
+ // Expand abbreviations like "St" => "std".
+ if (ParseChar(state, 'S')) {
+ const AbbrevPair *p;
+ for (p = kSubstitutionList; p->abbrev != NULL; ++p) {
+ if (state->mangled_cur[0] == p->abbrev[1]) {
+ MaybeAppend(state, "std");
+ if (p->real_name[0] != '\0') {
+ MaybeAppend(state, "::");
+ MaybeAppend(state, p->real_name);
+ }
+ state->mangled_cur += 1;
+ return true;
+ }
+ }
+ }
+ *state = copy;
+ return false;
+}
+
+// The demangler entry point.
+bool Demangle(const char *mangled, char *out, int out_size) {
+ State state;
+ InitState(&state, mangled, out, out_size);
+ return (ParseMangledName(&state) &&
+ state.overflowed == false &&
+ RemainingLength(&state) == 0);
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/glog/src/demangle.h b/extern/libmv/third_party/glog/src/demangle.h
new file mode 100644
index 00000000000..9c7591527c0
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/demangle.h
@@ -0,0 +1,84 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Satoru Takabayashi
+//
+// An async-signal-safe and thread-safe demangler for Itanium C++ ABI
+// (aka G++ V3 ABI).
+
+// The demangler is implemented to be used in async signal handlers to
+// symbolize stack traces. We cannot use libstdc++'s
+// abi::__cxa_demangle() in such signal handlers since it's not async
+// signal safe (it uses malloc() internally).
+//
+// Note that this demangler doesn't support full demangling. More
+// specifically, it doesn't print types of function parameters and
+// types of template arguments. It just skips them. However, it's
+// still very useful to extract basic information such as class,
+// function, constructor, destructor, and operator names.
+//
+// See the implementation note in demangle.cc if you are interested.
+//
+// Example:
+//
+// | Mangled Name | The Demangler | abi::__cxa_demangle()
+// |---------------|---------------|-----------------------
+// | _Z1fv | f() | f()
+// | _Z1fi | f() | f(int)
+// | _Z3foo3bar | foo() | foo(bar)
+// | _Z1fIiEvi | f<>() | void f<int>(int)
+// | _ZN1N1fE | N::f | N::f
+// | _ZN3Foo3BarEv | Foo::Bar() | Foo::Bar()
+// | _Zrm1XS_" | operator%() | operator%(X, X)
+// | _ZN3FooC1Ev | Foo::Foo() | Foo::Foo()
+// | _Z1fSs | f() | f(std::basic_string<char,
+// | | | std::char_traits<char>,
+// | | | std::allocator<char> >)
+//
+// See the unit test for more examples.
+//
+// Note: we might want to write demanglers for ABIs other than Itanium
+// C++ ABI in the future.
+//
+
+#ifndef BASE_DEMANGLE_H_
+#define BASE_DEMANGLE_H_
+
+#include "config.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// Demangle "mangled". On success, return true and write the
+// demangled symbol name to "out". Otherwise, return false.
+// "out" is modified even if demangling is unsuccessful.
+bool Demangle(const char *mangled, char *out, int out_size);
+
+_END_GOOGLE_NAMESPACE_
+
+#endif // BASE_DEMANGLE_H_
diff --git a/extern/libmv/third_party/glog/src/glog/log_severity.h b/extern/libmv/third_party/glog/src/glog/log_severity.h
new file mode 100644
index 00000000000..17805fbadd4
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/glog/log_severity.h
@@ -0,0 +1,84 @@
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef BASE_LOG_SEVERITY_H__
+#define BASE_LOG_SEVERITY_H__
+
+// Annoying stuff for windows -- makes sure clients can import these functions
+#ifndef GOOGLE_GLOG_DLL_DECL
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# define GOOGLE_GLOG_DLL_DECL __declspec(dllimport)
+# else
+# define GOOGLE_GLOG_DLL_DECL
+# endif
+#endif
+
+// Variables of type LogSeverity are widely taken to lie in the range
+// [0, NUM_SEVERITIES-1]. Be careful to preserve this assumption if
+// you ever need to change their values or add a new severity.
+typedef int LogSeverity;
+
+const int INFO = 0, WARNING = 1, ERROR = 2, FATAL = 3, NUM_SEVERITIES = 4;
+
+// DFATAL is FATAL in debug mode, ERROR in normal mode
+#ifdef NDEBUG
+#define DFATAL_LEVEL ERROR
+#else
+#define DFATAL_LEVEL FATAL
+#endif
+
+extern GOOGLE_GLOG_DLL_DECL const char* const LogSeverityNames[NUM_SEVERITIES];
+
+// NDEBUG usage helpers related to (RAW_)DCHECK:
+//
+// DEBUG_MODE is for small !NDEBUG uses like
+// if (DEBUG_MODE) foo.CheckThatFoo();
+// instead of substantially more verbose
+// #ifndef NDEBUG
+// foo.CheckThatFoo();
+// #endif
+//
+// IF_DEBUG_MODE is for small !NDEBUG uses like
+// IF_DEBUG_MODE( string error; )
+// DCHECK(Foo(&error)) << error;
+// instead of substantially more verbose
+// #ifndef NDEBUG
+// string error;
+// DCHECK(Foo(&error)) << error;
+// #endif
+//
+#ifdef NDEBUG
+enum { DEBUG_MODE = 0 };
+#define IF_DEBUG_MODE(x)
+#else
+enum { DEBUG_MODE = 1 };
+#define IF_DEBUG_MODE(x) x
+#endif
+
+#endif // BASE_LOG_SEVERITY_H__
diff --git a/extern/libmv/third_party/glog/src/glog/logging.h b/extern/libmv/third_party/glog/src/glog/logging.h
new file mode 100644
index 00000000000..a58d478ab17
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/glog/logging.h
@@ -0,0 +1,1507 @@
+// Copyright (c) 1999, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Ray Sidney
+//
+// This file contains #include information about logging-related stuff.
+// Pretty much everybody needs to #include this file so that they can
+// log various happenings.
+//
+
+#ifndef _LOGGING_H_
+#define _LOGGING_H_
+
+#include <errno.h>
+#include <string.h>
+#include <time.h>
+#include <string>
+#if 1
+# include <unistd.h>
+#endif
+#ifdef __DEPRECATED
+// Make GCC quiet.
+# undef __DEPRECATED
+# include <strstream>
+# define __DEPRECATED
+#else
+# include <strstream>
+#endif
+#include <vector>
+
+// Annoying stuff for windows -- makes sure clients can import these functions
+#ifndef GOOGLE_GLOG_DLL_DECL
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# define GOOGLE_GLOG_DLL_DECL __declspec(dllimport)
+# else
+# define GOOGLE_GLOG_DLL_DECL
+# endif
+#endif
+
+// We care a lot about number of bits things take up. Unfortunately,
+// systems define their bit-specific ints in a lot of different ways.
+// We use our own way, and have a typedef to get there.
+// Note: these commands below may look like "#if 1" or "#if 0", but
+// that's because they were constructed that way at ./configure time.
+// Look at logging.h.in to see how they're calculated (based on your config).
+#if 1
+#include <stdint.h> // the normal place uint16_t is defined
+#endif
+#if 1
+#include <sys/types.h> // the normal place u_int16_t is defined
+#endif
+#if 1
+#include <inttypes.h> // a third place for uint16_t or u_int16_t
+#endif
+
+#if 1
+#include "third_party/gflags/gflags.h"
+#endif
+
+namespace google {
+
+#if 1 // the C99 format
+typedef int32_t int32;
+typedef uint32_t uint32;
+typedef int64_t int64;
+typedef uint64_t uint64;
+#elif 1 // the BSD format
+typedef int32_t int32;
+typedef u_int32_t uint32;
+typedef int64_t int64;
+typedef u_int64_t uint64;
+#elif 0 // the windows (vc7) format
+typedef __int32 int32;
+typedef unsigned __int32 uint32;
+typedef __int64 int64;
+typedef unsigned __int64 uint64;
+#else
+#error Do not know how to define a 32-bit integer quantity on your system
+#endif
+
+}
+
+// The global value of GOOGLE_STRIP_LOG. All the messages logged to
+// LOG(XXX) with severity less than GOOGLE_STRIP_LOG will not be displayed.
+// If it can be determined at compile time that the message will not be
+// printed, the statement will be compiled out.
+//
+// Example: to strip out all INFO and WARNING messages, use the value
+// of 2 below. To make an exception for WARNING messages from a single
+// file, add "#define GOOGLE_STRIP_LOG 1" to that file _before_ including
+// base/logging.h
+#ifndef GOOGLE_STRIP_LOG
+#define GOOGLE_STRIP_LOG 0
+#endif
+
+// GCC can be told that a certain branch is not likely to be taken (for
+// instance, a CHECK failure), and use that information in static analysis.
+// Giving it this information can help it optimize for the common case in
+// the absence of better information (ie. -fprofile-arcs).
+//
+#ifndef GOOGLE_PREDICT_BRANCH_NOT_TAKEN
+#if 1
+#define GOOGLE_PREDICT_BRANCH_NOT_TAKEN(x) (__builtin_expect(x, 0))
+#else
+#define GOOGLE_PREDICT_BRANCH_NOT_TAKEN(x) x
+#endif
+#endif
+
+// Make a bunch of macros for logging. The way to log things is to stream
+// things to LOG(<a particular severity level>). E.g.,
+//
+// LOG(INFO) << "Found " << num_cookies << " cookies";
+//
+// You can capture log messages in a string, rather than reporting them
+// immediately:
+//
+// vector<string> errors;
+// LOG_STRING(ERROR, &errors) << "Couldn't parse cookie #" << cookie_num;
+//
+// This pushes back the new error onto 'errors'; if given a NULL pointer,
+// it reports the error via LOG(ERROR).
+//
+// You can also do conditional logging:
+//
+// LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// You can also do occasional logging (log every n'th occurrence of an
+// event):
+//
+// LOG_EVERY_N(INFO, 10) << "Got the " << COUNTER << "th cookie";
+//
+// The above will cause log messages to be output on the 1st, 11th, 21st, ...
+// times it is executed. Note that the special COUNTER value is used to
+// identify which repetition is happening.
+//
+// You can also do occasional conditional logging (log every n'th
+// occurrence of an event, when condition is satisfied):
+//
+// LOG_IF_EVERY_N(INFO, (size > 1024), 10) << "Got the " << COUNTER
+// << "th big cookie";
+//
+// You can log messages the first N times your code executes a line. E.g.
+//
+// LOG_FIRST_N(INFO, 20) << "Got the " << COUNTER << "th cookie";
+//
+// Outputs log messages for the first 20 times it is executed.
+//
+// Analogous SYSLOG, SYSLOG_IF, and SYSLOG_EVERY_N macros are available.
+// These log to syslog as well as to the normal logs. If you use these at
+// all, you need to be aware that syslog can drastically reduce performance,
+// especially if it is configured for remote logging! Don't use these
+// unless you fully understand this and have a concrete need to use them.
+// Even then, try to minimize your use of them.
+//
+// There are also "debug mode" logging macros like the ones above:
+//
+// DLOG(INFO) << "Found cookies";
+//
+// DLOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// DLOG_EVERY_N(INFO, 10) << "Got the " << COUNTER << "th cookie";
+//
+// All "debug mode" logging is compiled away to nothing for non-debug mode
+// compiles.
+//
+// We also have
+//
+// LOG_ASSERT(assertion);
+// DLOG_ASSERT(assertion);
+//
+// which is syntactic sugar for {,D}LOG_IF(FATAL, assert fails) << assertion;
+//
+// There are "verbose level" logging macros. They look like
+//
+// VLOG(1) << "I'm printed when you run the program with --v=1 or more";
+// VLOG(2) << "I'm printed when you run the program with --v=2 or more";
+//
+// These always log at the INFO log level (when they log at all).
+// The verbose logging can also be turned on module-by-module. For instance,
+// --vmodule=mapreduce=2,file=1,gfs*=3 --v=0
+// will cause:
+// a. VLOG(2) and lower messages to be printed from mapreduce.{h,cc}
+// b. VLOG(1) and lower messages to be printed from file.{h,cc}
+// c. VLOG(3) and lower messages to be printed from files prefixed with "gfs"
+// d. VLOG(0) and lower messages to be printed from elsewhere
+//
+// The wildcarding functionality shown by (c) supports both '*' (match
+// 0 or more characters) and '?' (match any single character) wildcards.
+//
+// There's also VLOG_IS_ON(n) "verbose level" condition macro. To be used as
+//
+// if (VLOG_IS_ON(2)) {
+// // do some logging preparation and logging
+// // that can't be accomplished with just VLOG(2) << ...;
+// }
+//
+// There are also VLOG_IF, VLOG_EVERY_N and VLOG_IF_EVERY_N "verbose level"
+// condition macros for sample cases, when some extra computation and
+// preparation for logs is not needed.
+// VLOG_IF(1, (size > 1024))
+// << "I'm printed when size is more than 1024 and when you run the "
+// "program with --v=1 or more";
+// VLOG_EVERY_N(1, 10)
+// << "I'm printed every 10th occurrence, and when you run the program "
+// "with --v=1 or more. Present occurence is " << COUNTER;
+// VLOG_IF_EVERY_N(1, (size > 1024), 10)
+// << "I'm printed on every 10th occurence of case when size is more "
+// " than 1024, when you run the program with --v=1 or more. ";
+// "Present occurence is " << COUNTER;
+//
+// The supported severity levels for macros that allow you to specify one
+// are (in increasing order of severity) INFO, WARNING, ERROR, and FATAL.
+// Note that messages of a given severity are logged not only in the
+// logfile for that severity, but also in all logfiles of lower severity.
+// E.g., a message of severity FATAL will be logged to the logfiles of
+// severity FATAL, ERROR, WARNING, and INFO.
+//
+// There is also the special severity of DFATAL, which logs FATAL in
+// debug mode, ERROR in normal mode.
+//
+// Very important: logging a message at the FATAL severity level causes
+// the program to terminate (after the message is logged).
+//
+// Unless otherwise specified, logs will be written to the filename
+// "<program name>.<hostname>.<user name>.log.<severity level>.", followed
+// by the date, time, and pid (you can't prevent the date, time, and pid
+// from being in the filename).
+//
+// The logging code takes two flags:
+// --v=# set the verbose level
+// --logtostderr log all the messages to stderr instead of to logfiles
+
+// LOG LINE PREFIX FORMAT
+//
+// Log lines have this form:
+//
+// Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+//
+// where the fields are defined as follows:
+//
+// L A single character, representing the log level
+// (eg 'I' for INFO)
+// mm The month (zero padded; ie May is '05')
+// dd The day (zero padded)
+// hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
+// threadid The space-padded thread ID as returned by GetTID()
+// (this matches the PID on Linux)
+// file The file name
+// line The line number
+// msg The user-supplied message
+//
+// Example:
+//
+// I1103 11:57:31.739339 24395 google.cc:2341] Command line: ./some_prog
+// I1103 11:57:31.739403 24395 google.cc:2342] Process id 24395
+//
+// NOTE: although the microseconds are useful for comparing events on
+// a single machine, clocks on different machines may not be well
+// synchronized. Hence, use caution when comparing the low bits of
+// timestamps from different machines.
+
+#ifndef DECLARE_VARIABLE
+#define MUST_UNDEF_GFLAGS_DECLARE_MACROS
+#define DECLARE_VARIABLE(type, name, tn) \
+ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##tn##_instead { \
+ extern GOOGLE_GLOG_DLL_DECL type FLAGS_##name; \
+ } \
+ using FLAG__namespace_do_not_use_directly_use_DECLARE_##tn##_instead::FLAGS_##name
+
+// bool specialization
+#define DECLARE_bool(name) \
+ DECLARE_VARIABLE(bool, name, bool)
+
+// int32 specialization
+#define DECLARE_int32(name) \
+ DECLARE_VARIABLE(google::int32, name, int32)
+
+// Special case for string, because we have to specify the namespace
+// std::string, which doesn't play nicely with our FLAG__namespace hackery.
+#define DECLARE_string(name) \
+ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
+ extern GOOGLE_GLOG_DLL_DECL std::string FLAGS_##name; \
+ } \
+ using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
+#endif
+
+// Set whether log messages go to stderr instead of logfiles
+DECLARE_bool(logtostderr);
+
+// Set whether log messages go to stderr in addition to logfiles.
+DECLARE_bool(alsologtostderr);
+
+// Log messages at a level >= this flag are automatically sent to
+// stderr in addition to log files.
+DECLARE_int32(stderrthreshold);
+
+// Set whether the log prefix should be prepended to each line of output.
+DECLARE_bool(log_prefix);
+
+// Log messages at a level <= this flag are buffered.
+// Log messages at a higher level are flushed immediately.
+DECLARE_int32(logbuflevel);
+
+// Sets the maximum number of seconds which logs may be buffered for.
+DECLARE_int32(logbufsecs);
+
+// Log suppression level: messages logged at a lower level than this
+// are suppressed.
+DECLARE_int32(minloglevel);
+
+// If specified, logfiles are written into this directory instead of the
+// default logging directory.
+DECLARE_string(log_dir);
+
+// Sets the path of the directory into which to put additional links
+// to the log files.
+DECLARE_string(log_link);
+
+DECLARE_int32(v); // in vlog_is_on.cc
+
+// Sets the maximum log file size (in MB).
+DECLARE_int32(max_log_size);
+
+// Sets whether to avoid logging to the disk if the disk is full.
+DECLARE_bool(stop_logging_if_full_disk);
+
+#ifdef MUST_UNDEF_GFLAGS_DECLARE_MACROS
+#undef MUST_UNDEF_GFLAGS_DECLARE_MACROS
+#undef DECLARE_VARIABLE
+#undef DECLARE_bool
+#undef DECLARE_int32
+#undef DECLARE_string
+#endif
+
+// Log messages below the GOOGLE_STRIP_LOG level will be compiled away for
+// security reasons. See LOG(severtiy) below.
+
+// A few definitions of macros that don't generate much code. Since
+// LOG(INFO) and its ilk are used all over our code, it's
+// better to have compact code for these operations.
+
+#if GOOGLE_STRIP_LOG == 0
+#define COMPACT_GOOGLE_LOG_INFO google::LogMessage( \
+ __FILE__, __LINE__)
+#define LOG_TO_STRING_INFO(message) google::LogMessage( \
+ __FILE__, __LINE__, google::INFO, message)
+#else
+#define COMPACT_GOOGLE_LOG_INFO google::NullStream()
+#define LOG_TO_STRING_INFO(message) google::NullStream()
+#endif
+
+#if GOOGLE_STRIP_LOG <= 1
+#define COMPACT_GOOGLE_LOG_WARNING google::LogMessage( \
+ __FILE__, __LINE__, google::WARNING)
+#define LOG_TO_STRING_WARNING(message) google::LogMessage( \
+ __FILE__, __LINE__, google::WARNING, message)
+#else
+#define COMPACT_GOOGLE_LOG_WARNING google::NullStream()
+#define LOG_TO_STRING_WARNING(message) google::NullStream()
+#endif
+
+#if GOOGLE_STRIP_LOG <= 2
+#define COMPACT_GOOGLE_LOG_ERROR google::LogMessage( \
+ __FILE__, __LINE__, google::ERROR)
+#define LOG_TO_STRING_ERROR(message) google::LogMessage( \
+ __FILE__, __LINE__, google::ERROR, message)
+#else
+#define COMPACT_GOOGLE_LOG_ERROR google::NullStream()
+#define LOG_TO_STRING_ERROR(message) google::NullStream()
+#endif
+
+#if GOOGLE_STRIP_LOG <= 3
+#define COMPACT_GOOGLE_LOG_FATAL google::LogMessageFatal( \
+ __FILE__, __LINE__)
+#define LOG_TO_STRING_FATAL(message) google::LogMessage( \
+ __FILE__, __LINE__, google::FATAL, message)
+#else
+#define COMPACT_GOOGLE_LOG_FATAL google::NullStreamFatal()
+#define LOG_TO_STRING_FATAL(message) google::NullStreamFatal()
+#endif
+
+// For DFATAL, we want to use LogMessage (as opposed to
+// LogMessageFatal), to be consistent with the original behavior.
+#ifdef NDEBUG
+#define COMPACT_GOOGLE_LOG_DFATAL COMPACT_GOOGLE_LOG_ERROR
+#elif GOOGLE_STRIP_LOG <= 3
+#define COMPACT_GOOGLE_LOG_DFATAL google::LogMessage( \
+ __FILE__, __LINE__, google::FATAL)
+#else
+#define COMPACT_GOOGLE_LOG_DFATAL google::NullStreamFatal()
+#endif
+
+#define GOOGLE_LOG_INFO(counter) google::LogMessage(__FILE__, __LINE__, google::INFO, counter, &google::LogMessage::SendToLog)
+#define SYSLOG_INFO(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::INFO, counter, \
+ &google::LogMessage::SendToSyslogAndLog)
+#define GOOGLE_LOG_WARNING(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::WARNING, counter, \
+ &google::LogMessage::SendToLog)
+#define SYSLOG_WARNING(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::WARNING, counter, \
+ &google::LogMessage::SendToSyslogAndLog)
+#define GOOGLE_LOG_ERROR(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::ERROR, counter, \
+ &google::LogMessage::SendToLog)
+#define SYSLOG_ERROR(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::ERROR, counter, \
+ &google::LogMessage::SendToSyslogAndLog)
+#define GOOGLE_LOG_FATAL(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::FATAL, counter, \
+ &google::LogMessage::SendToLog)
+#define SYSLOG_FATAL(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::FATAL, counter, \
+ &google::LogMessage::SendToSyslogAndLog)
+#define GOOGLE_LOG_DFATAL(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::DFATAL_LEVEL, counter, \
+ &google::LogMessage::SendToLog)
+#define SYSLOG_DFATAL(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::DFATAL_LEVEL, counter, \
+ &google::LogMessage::SendToSyslogAndLog)
+
+#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) || defined(__CYGWIN32__)
+// A very useful logging macro to log windows errors:
+#define LOG_SYSRESULT(result) \
+ if (FAILED(result)) { \
+ LPTSTR message = NULL; \
+ LPTSTR msg = reinterpret_cast<LPTSTR>(&message); \
+ DWORD message_length = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | \
+ FORMAT_MESSAGE_FROM_SYSTEM, \
+ 0, result, 0, msg, 100, NULL); \
+ if (message_length > 0) { \
+ google::LogMessage(__FILE__, __LINE__, ERROR, 0, \
+ &google::LogMessage::SendToLog).stream() << message; \
+ LocalFree(message); \
+ } \
+ }
+#endif
+
+// We use the preprocessor's merging operator, "##", so that, e.g.,
+// LOG(INFO) becomes the token GOOGLE_LOG_INFO. There's some funny
+// subtle difference between ostream member streaming functions (e.g.,
+// ostream::operator<<(int) and ostream non-member streaming functions
+// (e.g., ::operator<<(ostream&, string&): it turns out that it's
+// impossible to stream something like a string directly to an unnamed
+// ostream. We employ a neat hack by calling the stream() member
+// function of LogMessage which seems to avoid the problem.
+#define LOG(severity) COMPACT_GOOGLE_LOG_ ## severity.stream()
+#define SYSLOG(severity) SYSLOG_ ## severity(0).stream()
+
+namespace google {
+
+// They need the definitions of integer types.
+#include "glog/log_severity.h"
+#include "glog/vlog_is_on.h"
+
+// Initialize google's logging library. You will see the program name
+// specified by argv0 in log outputs.
+GOOGLE_GLOG_DLL_DECL void InitGoogleLogging(const char* argv0);
+
+// Shutdown google's logging library.
+GOOGLE_GLOG_DLL_DECL void ShutdownGoogleLogging();
+
+// Install a function which will be called after LOG(FATAL).
+GOOGLE_GLOG_DLL_DECL void InstallFailureFunction(void (*fail_func)());
+
+class LogSink; // defined below
+
+// If a non-NULL sink pointer is given, we push this message to that sink.
+// For LOG_TO_SINK we then do normal LOG(severity) logging as well.
+// This is useful for capturing messages and passing/storing them
+// somewhere more specific than the global log of the process.
+// Argument types:
+// LogSink* sink;
+// LogSeverity severity;
+// The cast is to disambiguate NULL arguments.
+#define LOG_TO_SINK(sink, severity) \
+ google::LogMessage( \
+ __FILE__, __LINE__, \
+ google::severity, \
+ static_cast<google::LogSink*>(sink), true).stream()
+#define LOG_TO_SINK_BUT_NOT_TO_LOGFILE(sink, severity) \
+ google::LogMessage( \
+ __FILE__, __LINE__, \
+ google::severity, \
+ static_cast<google::LogSink*>(sink), false).stream()
+
+// If a non-NULL string pointer is given, we write this message to that string.
+// We then do normal LOG(severity) logging as well.
+// This is useful for capturing messages and storing them somewhere more
+// specific than the global log of the process.
+// Argument types:
+// string* message;
+// LogSeverity severity;
+// The cast is to disambiguate NULL arguments.
+// NOTE: LOG(severity) expands to LogMessage().stream() for the specified
+// severity.
+#define LOG_TO_STRING(severity, message) \
+ LOG_TO_STRING_##severity(static_cast<string*>(message)).stream()
+
+// If a non-NULL pointer is given, we push the message onto the end
+// of a vector of strings; otherwise, we report it with LOG(severity).
+// This is handy for capturing messages and perhaps passing them back
+// to the caller, rather than reporting them immediately.
+// Argument types:
+// LogSeverity severity;
+// vector<string> *outvec;
+// The cast is to disambiguate NULL arguments.
+#define LOG_STRING(severity, outvec) \
+ LOG_TO_STRING_##severity(static_cast<vector<string>*>(outvec)).stream()
+
+#define LOG_IF(severity, condition) \
+ !(condition) ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+#define SYSLOG_IF(severity, condition) \
+ !(condition) ? (void) 0 : google::LogMessageVoidify() & SYSLOG(severity)
+
+#define LOG_ASSERT(condition) \
+ LOG_IF(FATAL, !(condition)) << "Assert failed: " #condition
+#define SYSLOG_ASSERT(condition) \
+ SYSLOG_IF(FATAL, !(condition)) << "Assert failed: " #condition
+
+// CHECK dies with a fatal error if condition is not true. It is *not*
+// controlled by NDEBUG, so the check will be executed regardless of
+// compilation mode. Therefore, it is safe to do things like:
+// CHECK(fp->Write(x) == 4)
+#define CHECK(condition) \
+ LOG_IF(FATAL, GOOGLE_PREDICT_BRANCH_NOT_TAKEN(!(condition))) \
+ << "Check failed: " #condition " "
+
+// A container for a string pointer which can be evaluated to a bool -
+// true iff the pointer is NULL.
+struct CheckOpString {
+ CheckOpString(std::string* str) : str_(str) { }
+ // No destructor: if str_ is non-NULL, we're about to LOG(FATAL),
+ // so there's no point in cleaning up str_.
+ operator bool() const {
+ return GOOGLE_PREDICT_BRANCH_NOT_TAKEN(str_ != NULL);
+ }
+ std::string* str_;
+};
+
+// Function is overloaded for integral types to allow static const
+// integrals declared in classes and not defined to be used as arguments to
+// CHECK* macros. It's not encouraged though.
+template <class T>
+inline const T& GetReferenceableValue(const T& t) { return t; }
+inline char GetReferenceableValue(char t) { return t; }
+inline unsigned char GetReferenceableValue(unsigned char t) { return t; }
+inline signed char GetReferenceableValue(signed char t) { return t; }
+inline short GetReferenceableValue(short t) { return t; }
+inline unsigned short GetReferenceableValue(unsigned short t) { return t; }
+inline int GetReferenceableValue(int t) { return t; }
+inline unsigned int GetReferenceableValue(unsigned int t) { return t; }
+inline long GetReferenceableValue(long t) { return t; }
+inline unsigned long GetReferenceableValue(unsigned long t) { return t; }
+inline long long GetReferenceableValue(long long t) { return t; }
+inline unsigned long long GetReferenceableValue(unsigned long long t) {
+ return t;
+}
+
+// This is a dummy class to define the following operator.
+struct DummyClassToDefineOperator {};
+
+}
+
+// Define global operator<< to declare using ::operator<<.
+// This declaration will allow use to use CHECK macros for user
+// defined classes which have operator<< (e.g., stl_logging.h).
+inline std::ostream& operator<<(
+ std::ostream& out, const google::DummyClassToDefineOperator&) {
+ return out;
+}
+
+namespace google {
+
+// Build the error message string.
+template<class t1, class t2>
+std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) {
+ // It means that we cannot use stl_logging if compiler doesn't
+ // support using expression for operator.
+ // TODO(hamaji): Figure out a way to fix.
+#if 1
+ using ::operator<<;
+#endif
+ std::strstream ss;
+ ss << names << " (" << v1 << " vs. " << v2 << ")";
+ return new std::string(ss.str(), ss.pcount());
+}
+
+// Helper functions for CHECK_OP macro.
+// The (int, int) specialization works around the issue that the compiler
+// will not instantiate the template version of the function on values of
+// unnamed enum type - see comment below.
+#define DEFINE_CHECK_OP_IMPL(name, op) \
+ template <class t1, class t2> \
+ inline std::string* Check##name##Impl(const t1& v1, const t2& v2, \
+ const char* names) { \
+ if (v1 op v2) return NULL; \
+ else return MakeCheckOpString(v1, v2, names); \
+ } \
+ inline std::string* Check##name##Impl(int v1, int v2, const char* names) { \
+ return Check##name##Impl<int, int>(v1, v2, names); \
+ }
+
+// Use _EQ, _NE, _LE, etc. in case the file including base/logging.h
+// provides its own #defines for the simpler names EQ, NE, LE, etc.
+// This happens if, for example, those are used as token names in a
+// yacc grammar.
+DEFINE_CHECK_OP_IMPL(_EQ, ==)
+DEFINE_CHECK_OP_IMPL(_NE, !=)
+DEFINE_CHECK_OP_IMPL(_LE, <=)
+DEFINE_CHECK_OP_IMPL(_LT, < )
+DEFINE_CHECK_OP_IMPL(_GE, >=)
+DEFINE_CHECK_OP_IMPL(_GT, > )
+#undef DEFINE_CHECK_OP_IMPL
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use CHECK_EQ et al below.
+
+#if defined(STATIC_ANALYSIS)
+// Only for static analysis tool to know that it is equivalent to assert
+#define CHECK_OP_LOG(name, op, val1, val2, log) CHECK((val1) op (val2))
+#elif !defined(NDEBUG)
+// In debug mode, avoid constructing CheckOpStrings if possible,
+// to reduce the overhead of CHECK statments by 2x.
+// Real DCHECK-heavy tests have seen 1.5x speedups.
+
+// The meaning of "string" might be different between now and
+// when this macro gets invoked (e.g., if someone is experimenting
+// with other string implementations that get defined after this
+// file is included). Save the current meaning now and use it
+// in the macro.
+typedef std::string _Check_string;
+#define CHECK_OP_LOG(name, op, val1, val2, log) \
+ while (google::_Check_string* _result = \
+ google::Check##name##Impl( \
+ google::GetReferenceableValue(val1), \
+ google::GetReferenceableValue(val2), \
+ #val1 " " #op " " #val2)) \
+ log(__FILE__, __LINE__, \
+ google::CheckOpString(_result)).stream()
+#else
+// In optimized mode, use CheckOpString to hint to compiler that
+// the while condition is unlikely.
+#define CHECK_OP_LOG(name, op, val1, val2, log) \
+ while (google::CheckOpString _result = \
+ google::Check##name##Impl( \
+ google::GetReferenceableValue(val1), \
+ google::GetReferenceableValue(val2), \
+ #val1 " " #op " " #val2)) \
+ log(__FILE__, __LINE__, _result).stream()
+#endif // STATIC_ANALYSIS, !NDEBUG
+
+#if GOOGLE_STRIP_LOG <= 3
+#define CHECK_OP(name, op, val1, val2) \
+ CHECK_OP_LOG(name, op, val1, val2, google::LogMessageFatal)
+#else
+#define CHECK_OP(name, op, val1, val2) \
+ CHECK_OP_LOG(name, op, val1, val2, google::NullStreamFatal)
+#endif // STRIP_LOG <= 3
+
+// Equality/Inequality checks - compare two values, and log a FATAL message
+// including the two values when the result is not as expected. The values
+// must have operator<<(ostream, ...) defined.
+//
+// You may append to the error message like so:
+// CHECK_NE(1, 2) << ": The world must be ending!";
+//
+// We are very careful to ensure that each argument is evaluated exactly
+// once, and that anything which is legal to pass as a function argument is
+// legal here. In particular, the arguments may be temporary expressions
+// which will end up being destroyed at the end of the apparent statement,
+// for example:
+// CHECK_EQ(string("abc")[1], 'b');
+//
+// WARNING: These don't compile correctly if one of the arguments is a pointer
+// and the other is NULL. To work around this, simply static_cast NULL to the
+// type of the desired pointer.
+
+#define CHECK_EQ(val1, val2) CHECK_OP(_EQ, ==, val1, val2)
+#define CHECK_NE(val1, val2) CHECK_OP(_NE, !=, val1, val2)
+#define CHECK_LE(val1, val2) CHECK_OP(_LE, <=, val1, val2)
+#define CHECK_LT(val1, val2) CHECK_OP(_LT, < , val1, val2)
+#define CHECK_GE(val1, val2) CHECK_OP(_GE, >=, val1, val2)
+#define CHECK_GT(val1, val2) CHECK_OP(_GT, > , val1, val2)
+
+// Check that the input is non NULL. This very useful in constructor
+// initializer lists.
+
+#define CHECK_NOTNULL(val) \
+ google::CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
+
+// Helper functions for string comparisons.
+// To avoid bloat, the definitions are in logging.cc.
+#define DECLARE_CHECK_STROP_IMPL(func, expected) \
+ GOOGLE_GLOG_DLL_DECL std::string* Check##func##expected##Impl( \
+ const char* s1, const char* s2, const char* names);
+DECLARE_CHECK_STROP_IMPL(strcmp, true)
+DECLARE_CHECK_STROP_IMPL(strcmp, false)
+DECLARE_CHECK_STROP_IMPL(strcasecmp, true)
+DECLARE_CHECK_STROP_IMPL(strcasecmp, false)
+#undef DECLARE_CHECK_STROP_IMPL
+
+// Helper macro for string comparisons.
+// Don't use this macro directly in your code, use CHECK_STREQ et al below.
+#define CHECK_STROP(func, op, expected, s1, s2) \
+ while (google::CheckOpString _result = \
+ google::Check##func##expected##Impl((s1), (s2), \
+ #s1 " " #op " " #s2)) \
+ LOG(FATAL) << *_result.str_
+
+
+// String (char*) equality/inequality checks.
+// CASE versions are case-insensitive.
+//
+// Note that "s1" and "s2" may be temporary strings which are destroyed
+// by the compiler at the end of the current "full expression"
+// (e.g. CHECK_STREQ(Foo().c_str(), Bar().c_str())).
+
+#define CHECK_STREQ(s1, s2) CHECK_STROP(strcmp, ==, true, s1, s2)
+#define CHECK_STRNE(s1, s2) CHECK_STROP(strcmp, !=, false, s1, s2)
+#define CHECK_STRCASEEQ(s1, s2) CHECK_STROP(strcasecmp, ==, true, s1, s2)
+#define CHECK_STRCASENE(s1, s2) CHECK_STROP(strcasecmp, !=, false, s1, s2)
+
+#define CHECK_INDEX(I,A) CHECK(I < (sizeof(A)/sizeof(A[0])))
+#define CHECK_BOUND(B,A) CHECK(B <= (sizeof(A)/sizeof(A[0])))
+
+#define CHECK_DOUBLE_EQ(val1, val2) \
+ do { \
+ CHECK_LE((val1), (val2)+0.000000000000001L); \
+ CHECK_GE((val1), (val2)-0.000000000000001L); \
+ } while (0)
+
+#define CHECK_NEAR(val1, val2, margin) \
+ do { \
+ CHECK_LE((val1), (val2)+(margin)); \
+ CHECK_GE((val1), (val2)-(margin)); \
+ } while (0)
+
+// perror()..googly style!
+//
+// PLOG() and PLOG_IF() and PCHECK() behave exactly like their LOG* and
+// CHECK equivalents with the addition that they postpend a description
+// of the current state of errno to their output lines.
+
+#define PLOG(severity) GOOGLE_PLOG(severity, 0).stream()
+
+#define GOOGLE_PLOG(severity, counter) \
+ google::ErrnoLogMessage( \
+ __FILE__, __LINE__, google::severity, counter, \
+ &google::LogMessage::SendToLog)
+
+#define PLOG_IF(severity, condition) \
+ !(condition) ? (void) 0 : google::LogMessageVoidify() & PLOG(severity)
+
+// A CHECK() macro that postpends errno if the condition is false. E.g.
+//
+// if (poll(fds, nfds, timeout) == -1) { PCHECK(errno == EINTR); ... }
+#define PCHECK(condition) \
+ PLOG_IF(FATAL, GOOGLE_PREDICT_BRANCH_NOT_TAKEN(!(condition))) \
+ << "Check failed: " #condition " "
+
+// A CHECK() macro that lets you assert the success of a function that
+// returns -1 and sets errno in case of an error. E.g.
+//
+// CHECK_ERR(mkdir(path, 0700));
+//
+// or
+//
+// int fd = open(filename, flags); CHECK_ERR(fd) << ": open " << filename;
+#define CHECK_ERR(invocation) \
+PLOG_IF(FATAL, GOOGLE_PREDICT_BRANCH_NOT_TAKEN((invocation) == -1)) \
+ << #invocation
+
+// Use macro expansion to create, for each use of LOG_EVERY_N(), static
+// variables with the __LINE__ expansion as part of the variable name.
+#define LOG_EVERY_N_VARNAME(base, line) LOG_EVERY_N_VARNAME_CONCAT(base, line)
+#define LOG_EVERY_N_VARNAME_CONCAT(base, line) base ## line
+
+#define LOG_OCCURRENCES LOG_EVERY_N_VARNAME(occurrences_, __LINE__)
+#define LOG_OCCURRENCES_MOD_N LOG_EVERY_N_VARNAME(occurrences_mod_n_, __LINE__)
+
+#define SOME_KIND_OF_LOG_EVERY_N(severity, n, what_to_do) \
+ static int LOG_OCCURRENCES = 0, LOG_OCCURRENCES_MOD_N = 0; \
+ ++LOG_OCCURRENCES; \
+ if (++LOG_OCCURRENCES_MOD_N > n) LOG_OCCURRENCES_MOD_N -= n; \
+ if (LOG_OCCURRENCES_MOD_N == 1) \
+ google::LogMessage( \
+ __FILE__, __LINE__, google::severity, LOG_OCCURRENCES, \
+ &what_to_do).stream()
+
+#define SOME_KIND_OF_LOG_IF_EVERY_N(severity, condition, n, what_to_do) \
+ static int LOG_OCCURRENCES = 0, LOG_OCCURRENCES_MOD_N = 0; \
+ ++LOG_OCCURRENCES; \
+ if (condition && \
+ ((LOG_OCCURRENCES_MOD_N=(LOG_OCCURRENCES_MOD_N + 1) % n) == (1 % n))) \
+ google::LogMessage( \
+ __FILE__, __LINE__, google::severity, LOG_OCCURRENCES, \
+ &what_to_do).stream()
+
+#define SOME_KIND_OF_PLOG_EVERY_N(severity, n, what_to_do) \
+ static int LOG_OCCURRENCES = 0, LOG_OCCURRENCES_MOD_N = 0; \
+ ++LOG_OCCURRENCES; \
+ if (++LOG_OCCURRENCES_MOD_N > n) LOG_OCCURRENCES_MOD_N -= n; \
+ if (LOG_OCCURRENCES_MOD_N == 1) \
+ google::ErrnoLogMessage( \
+ __FILE__, __LINE__, google::severity, LOG_OCCURRENCES, \
+ &what_to_do).stream()
+
+#define SOME_KIND_OF_LOG_FIRST_N(severity, n, what_to_do) \
+ static int LOG_OCCURRENCES = 0; \
+ if (LOG_OCCURRENCES <= n) \
+ ++LOG_OCCURRENCES; \
+ if (LOG_OCCURRENCES <= n) \
+ google::LogMessage( \
+ __FILE__, __LINE__, google::severity, LOG_OCCURRENCES, \
+ &what_to_do).stream()
+
+namespace glog_internal_namespace_ {
+template <bool>
+struct CompileAssert {
+};
+struct CrashReason;
+} // namespace glog_internal_namespace_
+
+#define GOOGLE_GLOG_COMPILE_ASSERT(expr, msg) \
+ typedef google::glog_internal_namespace_::CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
+
+#define LOG_EVERY_N(severity, n) \
+ GOOGLE_GLOG_COMPILE_ASSERT(google::severity < \
+ google::NUM_SEVERITIES, \
+ INVALID_REQUESTED_LOG_SEVERITY); \
+ SOME_KIND_OF_LOG_EVERY_N(severity, (n), google::LogMessage::SendToLog)
+
+#define SYSLOG_EVERY_N(severity, n) \
+ SOME_KIND_OF_LOG_EVERY_N(severity, (n), google::LogMessage::SendToSyslogAndLog)
+
+#define PLOG_EVERY_N(severity, n) \
+ SOME_KIND_OF_PLOG_EVERY_N(severity, (n), google::LogMessage::SendToLog)
+
+#define LOG_FIRST_N(severity, n) \
+ SOME_KIND_OF_LOG_FIRST_N(severity, (n), google::LogMessage::SendToLog)
+
+#define LOG_IF_EVERY_N(severity, condition, n) \
+ SOME_KIND_OF_LOG_IF_EVERY_N(severity, (condition), (n), google::LogMessage::SendToLog)
+
+// We want the special COUNTER value available for LOG_EVERY_X()'ed messages
+enum PRIVATE_Counter {COUNTER};
+
+
+// Plus some debug-logging macros that get compiled to nothing for production
+
+#ifndef NDEBUG
+
+#define DLOG(severity) LOG(severity)
+#define DVLOG(verboselevel) VLOG(verboselevel)
+#define DLOG_IF(severity, condition) LOG_IF(severity, condition)
+#define DLOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n)
+#define DLOG_IF_EVERY_N(severity, condition, n) \
+ LOG_IF_EVERY_N(severity, condition, n)
+#define DLOG_ASSERT(condition) LOG_ASSERT(condition)
+
+// debug-only checking. not executed in NDEBUG mode.
+#define DCHECK(condition) CHECK(condition)
+#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
+#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
+#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
+#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
+#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
+#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
+#define DCHECK_NOTNULL(val) CHECK_NOTNULL(val)
+#define DCHECK_STREQ(str1, str2) CHECK_STREQ(str1, str2)
+#define DCHECK_STRCASEEQ(str1, str2) CHECK_STRCASEEQ(str1, str2)
+#define DCHECK_STRNE(str1, str2) CHECK_STRNE(str1, str2)
+#define DCHECK_STRCASENE(str1, str2) CHECK_STRCASENE(str1, str2)
+
+#else // NDEBUG
+
+#define DLOG(severity) \
+ true ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+
+#define DVLOG(verboselevel) \
+ (true || !VLOG_IS_ON(verboselevel)) ?\
+ (void) 0 : google::LogMessageVoidify() & LOG(INFO)
+
+#define DLOG_IF(severity, condition) \
+ (true || !(condition)) ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+
+#define DLOG_EVERY_N(severity, n) \
+ true ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+
+#define DLOG_IF_EVERY_N(severity, condition, n) \
+ (true || !(condition))? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+
+#define DLOG_ASSERT(condition) \
+ true ? (void) 0 : LOG_ASSERT(condition)
+
+#define DCHECK(condition) \
+ while (false) \
+ CHECK(condition)
+
+#define DCHECK_EQ(val1, val2) \
+ while (false) \
+ CHECK_EQ(val1, val2)
+
+#define DCHECK_NE(val1, val2) \
+ while (false) \
+ CHECK_NE(val1, val2)
+
+#define DCHECK_LE(val1, val2) \
+ while (false) \
+ CHECK_LE(val1, val2)
+
+#define DCHECK_LT(val1, val2) \
+ while (false) \
+ CHECK_LT(val1, val2)
+
+#define DCHECK_GE(val1, val2) \
+ while (false) \
+ CHECK_GE(val1, val2)
+
+#define DCHECK_GT(val1, val2) \
+ while (false) \
+ CHECK_GT(val1, val2)
+
+#define DCHECK_NOTNULL(val) (val)
+
+#define DCHECK_STREQ(str1, str2) \
+ while (false) \
+ CHECK_STREQ(str1, str2)
+
+#define DCHECK_STRCASEEQ(str1, str2) \
+ while (false) \
+ CHECK_STRCASEEQ(str1, str2)
+
+#define DCHECK_STRNE(str1, str2) \
+ while (false) \
+ CHECK_STRNE(str1, str2)
+
+#define DCHECK_STRCASENE(str1, str2) \
+ while (false) \
+ CHECK_STRCASENE(str1, str2)
+
+
+#endif // NDEBUG
+
+// Log only in verbose mode.
+
+#define VLOG(verboselevel) LOG_IF(INFO, VLOG_IS_ON(verboselevel))
+
+#define VLOG_IF(verboselevel, condition) \
+ LOG_IF(INFO, (condition) && VLOG_IS_ON(verboselevel))
+
+#define VLOG_EVERY_N(verboselevel, n) \
+ LOG_IF_EVERY_N(INFO, VLOG_IS_ON(verboselevel), n)
+
+#define VLOG_IF_EVERY_N(verboselevel, condition, n) \
+ LOG_IF_EVERY_N(INFO, (condition) && VLOG_IS_ON(verboselevel), n)
+
+//
+// This class more or less represents a particular log message. You
+// create an instance of LogMessage and then stream stuff to it.
+// When you finish streaming to it, ~LogMessage is called and the
+// full message gets streamed to the appropriate destination.
+//
+// You shouldn't actually use LogMessage's constructor to log things,
+// though. You should use the LOG() macro (and variants thereof)
+// above.
+class GOOGLE_GLOG_DLL_DECL LogMessage {
+public:
+ enum {
+ // Passing kNoLogPrefix for the line number disables the
+ // log-message prefix. Useful for using the LogMessage
+ // infrastructure as a printing utility. See also the --log_prefix
+ // flag for controlling the log-message prefix on an
+ // application-wide basis.
+ kNoLogPrefix = -1
+ };
+
+ // LogStream inherit from non-DLL-exported class (std::ostrstream)
+ // and VC++ produces a warning for this situation.
+ // However, MSDN says "C4275 can be ignored in Microsoft Visual C++
+ // 2005 if you are deriving from a type in the Standard C++ Library"
+ // http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+ // Let's just ignore the warning.
+#ifdef _MSC_VER
+# pragma warning(disable: 4275)
+#endif
+ class GOOGLE_GLOG_DLL_DECL LogStream : public std::ostrstream {
+#ifdef _MSC_VER
+# pragma warning(default: 4275)
+#endif
+ public:
+ LogStream(char *buf, int len, int ctr)
+ : ostrstream(buf, len),
+ ctr_(ctr) {
+ self_ = this;
+ }
+
+ int ctr() const { return ctr_; }
+ void set_ctr(int ctr) { ctr_ = ctr; }
+ LogStream* self() const { return self_; }
+
+ private:
+ int ctr_; // Counter hack (for the LOG_EVERY_X() macro)
+ LogStream *self_; // Consistency check hack
+ };
+
+public:
+ // icc 8 requires this typedef to avoid an internal compiler error.
+ typedef void (LogMessage::*SendMethod)();
+
+ LogMessage(const char* file, int line, LogSeverity severity, int ctr,
+ SendMethod send_method);
+
+ // Two special constructors that generate reduced amounts of code at
+ // LOG call sites for common cases.
+
+ // Used for LOG(INFO): Implied are:
+ // severity = INFO, ctr = 0, send_method = &LogMessage::SendToLog.
+ //
+ // Using this constructor instead of the more complex constructor above
+ // saves 19 bytes per call site.
+ LogMessage(const char* file, int line);
+
+ // Used for LOG(severity) where severity != INFO. Implied
+ // are: ctr = 0, send_method = &LogMessage::SendToLog
+ //
+ // Using this constructor instead of the more complex constructor above
+ // saves 17 bytes per call site.
+ LogMessage(const char* file, int line, LogSeverity severity);
+
+ // Constructor to log this message to a specified sink (if not NULL).
+ // Implied are: ctr = 0, send_method = &LogMessage::SendToSinkAndLog if
+ // also_send_to_log is true, send_method = &LogMessage::SendToSink otherwise.
+ LogMessage(const char* file, int line, LogSeverity severity, LogSink* sink,
+ bool also_send_to_log);
+
+ // Constructor where we also give a vector<string> pointer
+ // for storing the messages (if the pointer is not NULL).
+ // Implied are: ctr = 0, send_method = &LogMessage::SaveOrSendToLog.
+ LogMessage(const char* file, int line, LogSeverity severity,
+ std::vector<std::string>* outvec);
+
+ // Constructor where we also give a string pointer for storing the
+ // message (if the pointer is not NULL). Implied are: ctr = 0,
+ // send_method = &LogMessage::WriteToStringAndLog.
+ LogMessage(const char* file, int line, LogSeverity severity,
+ std::string* message);
+
+ // A special constructor used for check failures
+ LogMessage(const char* file, int line, const CheckOpString& result);
+
+ ~LogMessage();
+
+ // Flush a buffered message to the sink set in the constructor. Always
+ // called by the destructor, it may also be called from elsewhere if
+ // needed. Only the first call is actioned; any later ones are ignored.
+ void Flush();
+
+ // An arbitrary limit on the length of a single log message. This
+ // is so that streaming can be done more efficiently.
+ static const size_t kMaxLogMessageLen;
+
+ // Theses should not be called directly outside of logging.*,
+ // only passed as SendMethod arguments to other LogMessage methods:
+ void SendToLog(); // Actually dispatch to the logs
+ void SendToSyslogAndLog(); // Actually dispatch to syslog and the logs
+
+ // Call abort() or similar to perform LOG(FATAL) crash.
+ static void Fail() __attribute__ ((noreturn));
+
+ std::ostream& stream() { return *(data_->stream_); }
+
+ int preserved_errno() const { return data_->preserved_errno_; }
+
+ // Must be called without the log_mutex held. (L < log_mutex)
+ static int64 num_messages(int severity);
+
+private:
+ // Fully internal SendMethod cases:
+ void SendToSinkAndLog(); // Send to sink if provided and dispatch to the logs
+ void SendToSink(); // Send to sink if provided, do nothing otherwise.
+
+ // Write to string if provided and dispatch to the logs.
+ void WriteToStringAndLog();
+
+ void SaveOrSendToLog(); // Save to stringvec if provided, else to logs
+
+ void Init(const char* file, int line, LogSeverity severity,
+ void (LogMessage::*send_method)());
+
+ // Used to fill in crash information during LOG(FATAL) failures.
+ void RecordCrashReason(glog_internal_namespace_::CrashReason* reason);
+
+ // Counts of messages sent at each priority:
+ static int64 num_messages_[NUM_SEVERITIES]; // under log_mutex
+
+ // We keep the data in a separate struct so that each instance of
+ // LogMessage uses less stack space.
+ struct GOOGLE_GLOG_DLL_DECL LogMessageData {
+ LogMessageData() {};
+
+ int preserved_errno_; // preserved errno
+ char* buf_;
+ char* message_text_; // Complete message text (points to selected buffer)
+ LogStream* stream_alloc_;
+ LogStream* stream_;
+ char severity_; // What level is this LogMessage logged at?
+ int line_; // line number where logging call is.
+ void (LogMessage::*send_method_)(); // Call this in destructor to send
+ union { // At most one of these is used: union to keep the size low.
+ LogSink* sink_; // NULL or sink to send message to
+ std::vector<std::string>* outvec_; // NULL or vector to push message onto
+ std::string* message_; // NULL or string to write message into
+ };
+ time_t timestamp_; // Time of creation of LogMessage
+ struct ::tm tm_time_; // Time of creation of LogMessage
+ size_t num_prefix_chars_; // # of chars of prefix in this message
+ size_t num_chars_to_log_; // # of chars of msg to send to log
+ size_t num_chars_to_syslog_; // # of chars of msg to send to syslog
+ const char* basename_; // basename of file that called LOG
+ const char* fullname_; // fullname of file that called LOG
+ bool has_been_flushed_; // false => data has not been flushed
+ bool first_fatal_; // true => this was first fatal msg
+
+ ~LogMessageData();
+ private:
+ LogMessageData(const LogMessageData&);
+ void operator=(const LogMessageData&);
+ };
+
+ static LogMessageData fatal_msg_data_exclusive_;
+ static LogMessageData fatal_msg_data_shared_;
+
+ LogMessageData* allocated_;
+ LogMessageData* data_;
+
+ friend class LogDestination;
+
+ LogMessage(const LogMessage&);
+ void operator=(const LogMessage&);
+};
+
+// This class happens to be thread-hostile because all instances share
+// a single data buffer, but since it can only be created just before
+// the process dies, we don't worry so much.
+class GOOGLE_GLOG_DLL_DECL LogMessageFatal : public LogMessage {
+ public:
+ LogMessageFatal(const char* file, int line);
+ LogMessageFatal(const char* file, int line, const CheckOpString& result);
+ ~LogMessageFatal() __attribute__ ((noreturn));
+};
+
+// A non-macro interface to the log facility; (useful
+// when the logging level is not a compile-time constant).
+inline void LogAtLevel(int const severity, std::string const &msg) {
+ LogMessage(__FILE__, __LINE__, severity).stream() << msg;
+}
+
+// A macro alternative of LogAtLevel. New code may want to use this
+// version since there are two advantages: 1. this version outputs the
+// file name and the line number where this macro is put like other
+// LOG macros, 2. this macro can be used as C++ stream.
+#define LOG_AT_LEVEL(severity) google::LogMessage(__FILE__, __LINE__, severity).stream()
+
+// A small helper for CHECK_NOTNULL().
+template <typename T>
+T* CheckNotNull(const char *file, int line, const char *names, T* t) {
+ if (t == NULL) {
+ LogMessageFatal(file, line, new std::string(names));
+ }
+ return t;
+}
+
+// Allow folks to put a counter in the LOG_EVERY_X()'ed messages. This
+// only works if ostream is a LogStream. If the ostream is not a
+// LogStream you'll get an assert saying as much at runtime.
+GOOGLE_GLOG_DLL_DECL std::ostream& operator<<(std::ostream &os,
+ const PRIVATE_Counter&);
+
+
+// Derived class for PLOG*() above.
+class GOOGLE_GLOG_DLL_DECL ErrnoLogMessage : public LogMessage {
+ public:
+
+ ErrnoLogMessage(const char* file, int line, LogSeverity severity, int ctr,
+ void (LogMessage::*send_method)());
+
+ // Postpends ": strerror(errno) [errno]".
+ ~ErrnoLogMessage();
+
+ private:
+ ErrnoLogMessage(const ErrnoLogMessage&);
+ void operator=(const ErrnoLogMessage&);
+};
+
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros. This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+
+class GOOGLE_GLOG_DLL_DECL LogMessageVoidify {
+ public:
+ LogMessageVoidify() { }
+ // This has to be an operator with a precedence lower than << but
+ // higher than ?:
+ void operator&(std::ostream&) { }
+};
+
+
+// Flushes all log files that contains messages that are at least of
+// the specified severity level. Thread-safe.
+GOOGLE_GLOG_DLL_DECL void FlushLogFiles(LogSeverity min_severity);
+
+// Flushes all log files that contains messages that are at least of
+// the specified severity level. Thread-hostile because it ignores
+// locking -- used for catastrophic failures.
+GOOGLE_GLOG_DLL_DECL void FlushLogFilesUnsafe(LogSeverity min_severity);
+
+//
+// Set the destination to which a particular severity level of log
+// messages is sent. If base_filename is "", it means "don't log this
+// severity". Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void SetLogDestination(LogSeverity severity,
+ const char* base_filename);
+
+//
+// Set the basename of the symlink to the latest log file at a given
+// severity. If symlink_basename is empty, do not make a symlink. If
+// you don't call this function, the symlink basename is the
+// invocation name of the program. Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void SetLogSymlink(LogSeverity severity,
+ const char* symlink_basename);
+
+//
+// Used to send logs to some other kind of destination
+// Users should subclass LogSink and override send to do whatever they want.
+// Implementations must be thread-safe because a shared instance will
+// be called from whichever thread ran the LOG(XXX) line.
+class GOOGLE_GLOG_DLL_DECL LogSink {
+ public:
+ virtual ~LogSink();
+
+ // Sink's logging logic (message_len is such as to exclude '\n' at the end).
+ // This method can't use LOG() or CHECK() as logging system mutex(s) are held
+ // during this call.
+ virtual void send(LogSeverity severity, const char* full_filename,
+ const char* base_filename, int line,
+ const struct ::tm* tm_time,
+ const char* message, size_t message_len) = 0;
+
+ // Redefine this to implement waiting for
+ // the sink's logging logic to complete.
+ // It will be called after each send() returns,
+ // but before that LogMessage exits or crashes.
+ // By default this function does nothing.
+ // Using this function one can implement complex logic for send()
+ // that itself involves logging; and do all this w/o causing deadlocks and
+ // inconsistent rearrangement of log messages.
+ // E.g. if a LogSink has thread-specific actions, the send() method
+ // can simply add the message to a queue and wake up another thread that
+ // handles real logging while itself making some LOG() calls;
+ // WaitTillSent() can be implemented to wait for that logic to complete.
+ // See our unittest for an example.
+ virtual void WaitTillSent();
+
+ // Returns the normal text output of the log message.
+ // Can be useful to implement send().
+ static std::string ToString(LogSeverity severity, const char* file, int line,
+ const struct ::tm* tm_time,
+ const char* message, size_t message_len);
+};
+
+// Add or remove a LogSink as a consumer of logging data. Thread-safe.
+GOOGLE_GLOG_DLL_DECL void AddLogSink(LogSink *destination);
+GOOGLE_GLOG_DLL_DECL void RemoveLogSink(LogSink *destination);
+
+//
+// Specify an "extension" added to the filename specified via
+// SetLogDestination. This applies to all severity levels. It's
+// often used to append the port we're listening on to the logfile
+// name. Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void SetLogFilenameExtension(
+ const char* filename_extension);
+
+//
+// Make it so that all log messages of at least a particular severity
+// are logged to stderr (in addition to logging to the usual log
+// file(s)). Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void SetStderrLogging(LogSeverity min_severity);
+
+//
+// Make it so that all log messages go only to stderr. Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void LogToStderr();
+
+//
+// Make it so that all log messages of at least a particular severity are
+// logged via email to a list of addresses (in addition to logging to the
+// usual log file(s)). The list of addresses is just a string containing
+// the email addresses to send to (separated by spaces, say). Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void SetEmailLogging(LogSeverity min_severity,
+ const char* addresses);
+
+// A simple function that sends email. dest is a commma-separated
+// list of addressess. Thread-safe.
+GOOGLE_GLOG_DLL_DECL bool SendEmail(const char *dest,
+ const char *subject, const char *body);
+
+GOOGLE_GLOG_DLL_DECL const std::vector<std::string>& GetLoggingDirectories();
+
+// For tests only: Clear the internal [cached] list of logging directories to
+// force a refresh the next time GetLoggingDirectories is called.
+// Thread-hostile.
+void TestOnly_ClearLoggingDirectoriesList();
+
+// Returns a set of existing temporary directories, which will be a
+// subset of the directories returned by GetLogginDirectories().
+// Thread-safe.
+GOOGLE_GLOG_DLL_DECL void GetExistingTempDirectories(
+ std::vector<std::string>* list);
+
+// Print any fatal message again -- useful to call from signal handler
+// so that the last thing in the output is the fatal message.
+// Thread-hostile, but a race is unlikely.
+GOOGLE_GLOG_DLL_DECL void ReprintFatalMessage();
+
+// Truncate a log file that may be the append-only output of multiple
+// processes and hence can't simply be renamed/reopened (typically a
+// stdout/stderr). If the file "path" is > "limit" bytes, copy the
+// last "keep" bytes to offset 0 and truncate the rest. Since we could
+// be racing with other writers, this approach has the potential to
+// lose very small amounts of data. For security, only follow symlinks
+// if the path is /proc/self/fd/*
+GOOGLE_GLOG_DLL_DECL void TruncateLogFile(const char *path,
+ int64 limit, int64 keep);
+
+// Truncate stdout and stderr if they are over the value specified by
+// --max_log_size; keep the final 1MB. This function has the same
+// race condition as TruncateLogFile.
+GOOGLE_GLOG_DLL_DECL void TruncateStdoutStderr();
+
+// Return the string representation of the provided LogSeverity level.
+// Thread-safe.
+GOOGLE_GLOG_DLL_DECL const char* GetLogSeverityName(LogSeverity severity);
+
+// ---------------------------------------------------------------------
+// Implementation details that are not useful to most clients
+// ---------------------------------------------------------------------
+
+// A Logger is the interface used by logging modules to emit entries
+// to a log. A typical implementation will dump formatted data to a
+// sequence of files. We also provide interfaces that will forward
+// the data to another thread so that the invoker never blocks.
+// Implementations should be thread-safe since the logging system
+// will write to them from multiple threads.
+
+namespace base {
+
+class GOOGLE_GLOG_DLL_DECL Logger {
+ public:
+ virtual ~Logger();
+
+ // Writes "message[0,message_len-1]" corresponding to an event that
+ // occurred at "timestamp". If "force_flush" is true, the log file
+ // is flushed immediately.
+ //
+ // The input message has already been formatted as deemed
+ // appropriate by the higher level logging facility. For example,
+ // textual log messages already contain timestamps, and the
+ // file:linenumber header.
+ virtual void Write(bool force_flush,
+ time_t timestamp,
+ const char* message,
+ int message_len) = 0;
+
+ // Flush any buffered messages
+ virtual void Flush() = 0;
+
+ // Get the current LOG file size.
+ // The returned value is approximate since some
+ // logged data may not have been flushed to disk yet.
+ virtual uint32 LogSize() = 0;
+};
+
+// Get the logger for the specified severity level. The logger
+// remains the property of the logging module and should not be
+// deleted by the caller. Thread-safe.
+extern GOOGLE_GLOG_DLL_DECL Logger* GetLogger(LogSeverity level);
+
+// Set the logger for the specified severity level. The logger
+// becomes the property of the logging module and should not
+// be deleted by the caller. Thread-safe.
+extern GOOGLE_GLOG_DLL_DECL void SetLogger(LogSeverity level, Logger* logger);
+
+}
+
+// glibc has traditionally implemented two incompatible versions of
+// strerror_r(). There is a poorly defined convention for picking the
+// version that we want, but it is not clear whether it even works with
+// all versions of glibc.
+// So, instead, we provide this wrapper that automatically detects the
+// version that is in use, and then implements POSIX semantics.
+// N.B. In addition to what POSIX says, we also guarantee that "buf" will
+// be set to an empty string, if this function failed. This means, in most
+// cases, you do not need to check the error code and you can directly
+// use the value of "buf". It will never have an undefined value.
+GOOGLE_GLOG_DLL_DECL int posix_strerror_r(int err, char *buf, size_t len);
+
+
+// A class for which we define operator<<, which does nothing.
+class GOOGLE_GLOG_DLL_DECL NullStream : public LogMessage::LogStream {
+ public:
+ // Initialize the LogStream so the messages can be written somewhere
+ // (they'll never be actually displayed). This will be needed if a
+ // NullStream& is implicitly converted to LogStream&, in which case
+ // the overloaded NullStream::operator<< will not be invoked.
+ NullStream() : LogMessage::LogStream(message_buffer_, 1, 0) { }
+ NullStream(const char* /*file*/, int /*line*/,
+ const CheckOpString& /*result*/) :
+ LogMessage::LogStream(message_buffer_, 1, 0) { }
+ NullStream &stream() { return *this; }
+ private:
+ // A very short buffer for messages (which we discard anyway). This
+ // will be needed if NullStream& converted to LogStream& (e.g. as a
+ // result of a conditional expression).
+ char message_buffer_[2];
+};
+
+// Do nothing. This operator is inline, allowing the message to be
+// compiled away. The message will not be compiled away if we do
+// something like (flag ? LOG(INFO) : LOG(ERROR)) << message; when
+// SKIP_LOG=WARNING. In those cases, NullStream will be implicitly
+// converted to LogStream and the message will be computed and then
+// quietly discarded.
+template<class T>
+inline NullStream& operator<<(NullStream &str, const T &value) { return str; }
+
+// Similar to NullStream, but aborts the program (without stack
+// trace), like LogMessageFatal.
+class GOOGLE_GLOG_DLL_DECL NullStreamFatal : public NullStream {
+ public:
+ NullStreamFatal() { }
+ NullStreamFatal(const char* file, int line, const CheckOpString& result) :
+ NullStream(file, line, result) { }
+ __attribute__ ((noreturn)) ~NullStreamFatal() { _exit(1); }
+};
+
+// Install a signal handler that will dump signal information and a stack
+// trace when the program crashes on certain signals. We'll install the
+// signal handler for the following signals.
+//
+// SIGSEGV, SIGILL, SIGFPE, SIGABRT, SIGBUS, and SIGTERM.
+//
+// By default, the signal handler will write the failure dump to the
+// standard error. You can customize the destination by installing your
+// own writer function by InstallFailureWriter() below.
+//
+// Note on threading:
+//
+// The function should be called before threads are created, if you want
+// to use the failure signal handler for all threads. The stack trace
+// will be shown only for the thread that receives the signal. In other
+// words, stack traces of other threads won't be shown.
+GOOGLE_GLOG_DLL_DECL void InstallFailureSignalHandler();
+
+// Installs a function that is used for writing the failure dump. "data"
+// is the pointer to the beginning of a message to be written, and "size"
+// is the size of the message. You should not expect the data is
+// terminated with '\0'.
+GOOGLE_GLOG_DLL_DECL void InstallFailureWriter(
+ void (*writer)(const char* data, int size));
+
+}
+
+#endif // _LOGGING_H_
diff --git a/extern/libmv/third_party/glog/src/glog/raw_logging.h b/extern/libmv/third_party/glog/src/glog/raw_logging.h
new file mode 100644
index 00000000000..9e9b3772f3b
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/glog/raw_logging.h
@@ -0,0 +1,185 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Maxim Lifantsev
+//
+// Thread-safe logging routines that do not allocate any memory or
+// acquire any locks, and can therefore be used by low-level memory
+// allocation and synchronization code.
+
+#ifndef BASE_RAW_LOGGING_H_
+#define BASE_RAW_LOGGING_H_
+
+#include <time.h>
+
+namespace google {
+
+#include "glog/log_severity.h"
+#include "glog/vlog_is_on.h"
+
+// Annoying stuff for windows -- makes sure clients can import these functions
+#ifndef GOOGLE_GLOG_DLL_DECL
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# define GOOGLE_GLOG_DLL_DECL __declspec(dllimport)
+# else
+# define GOOGLE_GLOG_DLL_DECL
+# endif
+#endif
+
+// This is similar to LOG(severity) << format... and VLOG(level) << format..,
+// but
+// * it is to be used ONLY by low-level modules that can't use normal LOG()
+// * it is desiged to be a low-level logger that does not allocate any
+// memory and does not need any locks, hence:
+// * it logs straight and ONLY to STDERR w/o buffering
+// * it uses an explicit format and arguments list
+// * it will silently chop off really long message strings
+// Usage example:
+// RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
+// RAW_VLOG(3, "status is %i", status);
+// These will print an almost standard log lines like this to stderr only:
+// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
+// I0821 211317 file.cc:142] RAW: status is 20
+#define RAW_LOG(severity, ...) \
+ do { \
+ switch (google::severity) { \
+ case 0: \
+ RAW_LOG_INFO(__VA_ARGS__); \
+ break; \
+ case 1: \
+ RAW_LOG_WARNING(__VA_ARGS__); \
+ break; \
+ case 2: \
+ RAW_LOG_ERROR(__VA_ARGS__); \
+ break; \
+ case 3: \
+ RAW_LOG_FATAL(__VA_ARGS__); \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+// The following STRIP_LOG testing is performed in the header file so that it's
+// possible to completely compile out the logging code and the log messages.
+#if STRIP_LOG == 0
+#define RAW_VLOG(verboselevel, ...) \
+ do { \
+ if (VLOG_IS_ON(verboselevel)) { \
+ RAW_LOG_INFO(__VA_ARGS__); \
+ } \
+ } while (0)
+#else
+#define RAW_VLOG(verboselevel, ...) RawLogStub__(0, __VA_ARGS__)
+#endif // STRIP_LOG == 0
+
+#if STRIP_LOG == 0
+#define RAW_LOG_INFO(...) google::RawLog__(google::INFO, \
+ __FILE__, __LINE__, __VA_ARGS__)
+#else
+#define RAW_LOG_INFO(...) google::RawLogStub__(0, __VA_ARGS__)
+#endif // STRIP_LOG == 0
+
+#if STRIP_LOG <= 1
+#define RAW_LOG_WARNING(...) google::RawLog__(google::WARNING, \
+ __FILE__, __LINE__, __VA_ARGS__)
+#else
+#define RAW_LOG_WARNING(...) google::RawLogStub__(0, __VA_ARGS__)
+#endif // STRIP_LOG <= 1
+
+#if STRIP_LOG <= 2
+#define RAW_LOG_ERROR(...) google::RawLog__(google::ERROR, \
+ __FILE__, __LINE__, __VA_ARGS__)
+#else
+#define RAW_LOG_ERROR(...) google::RawLogStub__(0, __VA_ARGS__)
+#endif // STRIP_LOG <= 2
+
+#if STRIP_LOG <= 3
+#define RAW_LOG_FATAL(...) google::RawLog__(google::FATAL, \
+ __FILE__, __LINE__, __VA_ARGS__)
+#else
+#define RAW_LOG_FATAL(...) \
+ do { \
+ google::RawLogStub__(0, __VA_ARGS__); \
+ exit(1); \
+ } while (0)
+#endif // STRIP_LOG <= 3
+
+// Similar to CHECK(condition) << message,
+// but for low-level modules: we use only RAW_LOG that does not allocate memory.
+// We do not want to provide args list here to encourage this usage:
+// if (!cond) RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
+// so that the args are not computed when not needed.
+#define RAW_CHECK(condition, message) \
+ do { \
+ if (!(condition)) { \
+ RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
+ } \
+ } while (0)
+
+// Debug versions of RAW_LOG and RAW_CHECK
+#ifndef NDEBUG
+
+#define RAW_DLOG(severity, ...) RAW_LOG(severity, __VA_ARGS__)
+#define RAW_DCHECK(condition, message) RAW_CHECK(condition, message)
+
+#else // NDEBUG
+
+#define RAW_DLOG(severity, ...) \
+ while (false) \
+ RAW_LOG(severity, __VA_ARGS__)
+#define RAW_DCHECK(condition, message) \
+ while (false) \
+ RAW_CHECK(condition, message)
+
+#endif // NDEBUG
+
+// Stub log function used to work around for unused variable warnings when
+// building with STRIP_LOG > 0.
+static inline void RawLogStub__(int ignored, ...) {
+}
+
+// Helper function to implement RAW_LOG and RAW_VLOG
+// Logs format... at "severity" level, reporting it
+// as called from file:line.
+// This does not allocate memory or acquire locks.
+GOOGLE_GLOG_DLL_DECL void RawLog__(LogSeverity severity,
+ const char* file,
+ int line,
+ const char* format, ...)
+ __attribute__((__format__ (__printf__, 4, 5)));
+
+// Hack to propagate time information into this module so that
+// this module does not have to directly call localtime_r(),
+// which could allocate memory.
+GOOGLE_GLOG_DLL_DECL void RawLog__SetLastTime(const struct tm& t, int usecs);
+
+}
+
+#endif // BASE_RAW_LOGGING_H_
diff --git a/extern/libmv/third_party/glog/src/glog/vlog_is_on.h b/extern/libmv/third_party/glog/src/glog/vlog_is_on.h
new file mode 100644
index 00000000000..02b0b867097
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/glog/vlog_is_on.h
@@ -0,0 +1,129 @@
+// Copyright (c) 1999, 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Ray Sidney and many others
+//
+// Defines the VLOG_IS_ON macro that controls the variable-verbosity
+// conditional logging.
+//
+// It's used by VLOG and VLOG_IF in logging.h
+// and by RAW_VLOG in raw_logging.h to trigger the logging.
+//
+// It can also be used directly e.g. like this:
+// if (VLOG_IS_ON(2)) {
+// // do some logging preparation and logging
+// // that can't be accomplished e.g. via just VLOG(2) << ...;
+// }
+//
+// The truth value that VLOG_IS_ON(level) returns is determined by
+// the three verbosity level flags:
+// --v=<n> Gives the default maximal active V-logging level;
+// 0 is the default.
+// Normally positive values are used for V-logging levels.
+// --vmodule=<str> Gives the per-module maximal V-logging levels to override
+// the value given by --v.
+// E.g. "my_module=2,foo*=3" would change the logging level
+// for all code in source files "my_module.*" and "foo*.*"
+// ("-inl" suffixes are also disregarded for this matching).
+//
+// SetVLOGLevel helper function is provided to do limited dynamic control over
+// V-logging by overriding the per-module settings given via --vmodule flag.
+//
+// CAVEAT: --vmodule functionality is not available in non gcc compilers.
+//
+
+#ifndef BASE_VLOG_IS_ON_H_
+#define BASE_VLOG_IS_ON_H_
+
+#include "glog/log_severity.h"
+
+// Annoying stuff for windows -- makes sure clients can import these functions
+#ifndef GOOGLE_GLOG_DLL_DECL
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# define GOOGLE_GLOG_DLL_DECL __declspec(dllimport)
+# else
+# define GOOGLE_GLOG_DLL_DECL
+# endif
+#endif
+
+#if defined(__GNUC__)
+// We emit an anonymous static int* variable at every VLOG_IS_ON(n) site.
+// (Normally) the first time every VLOG_IS_ON(n) site is hit,
+// we determine what variable will dynamically control logging at this site:
+// it's either FLAGS_v or an appropriate internal variable
+// matching the current source file that represents results of
+// parsing of --vmodule flag and/or SetVLOGLevel calls.
+#define VLOG_IS_ON(verboselevel) \
+ __extension__ \
+ ({ static google::int32* vlocal__ = &google::kLogSiteUninitialized; \
+ google::int32 verbose_level__ = (verboselevel); \
+ (*vlocal__ >= verbose_level__) && \
+ ((vlocal__ != &google::kLogSiteUninitialized) || \
+ (google::InitVLOG3__(&vlocal__, &FLAGS_v, \
+ __FILE__, verbose_level__))); })
+#else
+// GNU extensions not available, so we do not support --vmodule.
+// Dynamic value of FLAGS_v always controls the logging level.
+#define VLOG_IS_ON(verboselevel) (FLAGS_v >= (verboselevel))
+#endif
+
+// Set VLOG(_IS_ON) level for module_pattern to log_level.
+// This lets us dynamically control what is normally set by the --vmodule flag.
+// Returns the level that previously applied to module_pattern.
+// NOTE: To change the log level for VLOG(_IS_ON) sites
+// that have already executed after/during InitGoogleLogging,
+// one needs to supply the exact --vmodule pattern that applied to them.
+// (If no --vmodule pattern applied to them
+// the value of FLAGS_v will continue to control them.)
+extern GOOGLE_GLOG_DLL_DECL int SetVLOGLevel(const char* module_pattern,
+ int log_level);
+
+// Various declarations needed for VLOG_IS_ON above: =========================
+
+// Special value used to indicate that a VLOG_IS_ON site has not been
+// initialized. We make this a large value, so the common-case check
+// of "*vlocal__ >= verbose_level__" in VLOG_IS_ON definition
+// passes in such cases and InitVLOG3__ is then triggered.
+extern google::int32 kLogSiteUninitialized;
+
+// Helper routine which determines the logging info for a particalur VLOG site.
+// site_flag is the address of the site-local pointer to the controlling
+// verbosity level
+// site_default is the default to use for *site_flag
+// fname is the current source file name
+// verbose_level is the argument to VLOG_IS_ON
+// We will return the return value for VLOG_IS_ON
+// and if possible set *site_flag appropriately.
+extern GOOGLE_GLOG_DLL_DECL bool InitVLOG3__(
+ google::int32** site_flag,
+ google::int32* site_default,
+ const char* fname,
+ google::int32 verbose_level);
+
+#endif // BASE_VLOG_IS_ON_H_
diff --git a/extern/libmv/third_party/glog/src/logging.cc b/extern/libmv/third_party/glog/src/logging.cc
new file mode 100644
index 00000000000..1bb3867aa10
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/logging.cc
@@ -0,0 +1,1783 @@
+// Copyright (c) 1999, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#define _GNU_SOURCE 1 // needed for O_NOFOLLOW and pread()/pwrite()
+
+#include "utilities.h"
+
+#include <assert.h>
+#include <iomanip>
+#include <string>
+#ifdef HAVE_UNISTD_H
+# include <unistd.h> // For _exit.
+#endif
+#include <climits>
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_UTSNAME_H
+# include <sys/utsname.h> // For uname.
+#endif
+#include <fcntl.h>
+#include <cstdio>
+#include <iostream>
+#include <stdarg.h>
+#include <stdlib.h>
+#ifdef HAVE_PWD_H
+# include <pwd.h>
+#endif
+#ifdef HAVE_SYSLOG_H
+# include <syslog.h>
+#endif
+#include <vector>
+#include <errno.h> // for errno
+#include <sstream>
+#include "base/commandlineflags.h" // to get the program name
+#include <glog/logging.h>
+#include <glog/raw_logging.h>
+#include "base/googleinit.h"
+
+#ifdef HAVE_STACKTRACE
+# include "stacktrace.h"
+#endif
+
+using std::string;
+using std::vector;
+using std::ostrstream;
+using std::setw;
+using std::setfill;
+using std::hex;
+using std::dec;
+using std::min;
+using std::ostream;
+using std::ostringstream;
+using std::strstream;
+
+// There is no thread annotation support.
+#define EXCLUSIVE_LOCKS_REQUIRED(mu)
+
+static bool BoolFromEnv(const char *varname, bool defval) {
+ const char* const valstr = getenv(varname);
+ if (!valstr) {
+ return defval;
+ }
+ return memchr("tTyY1\0", valstr[0], 6) != NULL;
+}
+
+GLOG_DEFINE_bool(logtostderr, BoolFromEnv("GOOGLE_LOGTOSTDERR", false),
+ "log messages go to stderr instead of logfiles");
+GLOG_DEFINE_bool(alsologtostderr, BoolFromEnv("GOOGLE_ALSOLOGTOSTDERR", false),
+ "log messages go to stderr in addition to logfiles");
+#ifdef OS_LINUX
+GLOG_DEFINE_bool(drop_log_memory, true, "Drop in-memory buffers of log contents. "
+ "Logs can grow very quickly and they are rarely read before they "
+ "need to be evicted from memory. Instead, drop them from memory "
+ "as soon as they are flushed to disk.");
+_START_GOOGLE_NAMESPACE_
+namespace logging {
+static const int64 kPageSize = getpagesize();
+}
+_END_GOOGLE_NAMESPACE_
+#endif
+
+// By default, errors (including fatal errors) get logged to stderr as
+// well as the file.
+//
+// The default is ERROR instead of FATAL so that users can see problems
+// when they run a program without having to look in another file.
+DEFINE_int32(stderrthreshold,
+ GOOGLE_NAMESPACE::ERROR,
+ "log messages at or above this level are copied to stderr in "
+ "addition to logfiles. This flag obsoletes --alsologtostderr.");
+
+GLOG_DEFINE_string(alsologtoemail, "",
+ "log messages go to these email addresses "
+ "in addition to logfiles");
+GLOG_DEFINE_bool(log_prefix, true,
+ "Prepend the log prefix to the start of each log line");
+GLOG_DEFINE_int32(minloglevel, 0, "Messages logged at a lower level than this don't "
+ "actually get logged anywhere");
+GLOG_DEFINE_int32(logbuflevel, 0,
+ "Buffer log messages logged at this level or lower"
+ " (-1 means don't buffer; 0 means buffer INFO only;"
+ " ...)");
+GLOG_DEFINE_int32(logbufsecs, 30,
+ "Buffer log messages for at most this many seconds");
+GLOG_DEFINE_int32(logemaillevel, 999,
+ "Email log messages logged at this level or higher"
+ " (0 means email all; 3 means email FATAL only;"
+ " ...)");
+GLOG_DEFINE_string(logmailer, "/bin/mail",
+ "Mailer used to send logging email");
+
+// Compute the default value for --log_dir
+static const char* DefaultLogDir() {
+ const char* env;
+ env = getenv("GOOGLE_LOG_DIR");
+ if (env != NULL && env[0] != '\0') {
+ return env;
+ }
+ env = getenv("TEST_TMPDIR");
+ if (env != NULL && env[0] != '\0') {
+ return env;
+ }
+ return "";
+}
+
+GLOG_DEFINE_string(log_dir, DefaultLogDir(),
+ "If specified, logfiles are written into this directory instead "
+ "of the default logging directory.");
+GLOG_DEFINE_string(log_link, "", "Put additional links to the log "
+ "files in this directory");
+
+GLOG_DEFINE_int32(max_log_size, 1800,
+ "approx. maximum log file size (in MB). A value of 0 will "
+ "be silently overridden to 1.");
+
+GLOG_DEFINE_bool(stop_logging_if_full_disk, false,
+ "Stop attempting to log to disk if the disk is full.");
+
+GLOG_DEFINE_string(log_backtrace_at, "",
+ "Emit a backtrace when logging at file:linenum.");
+
+// TODO(hamaji): consider windows
+#define PATH_SEPARATOR '/'
+
+static void GetHostName(string* hostname) {
+#if defined(HAVE_SYS_UTSNAME_H)
+ struct utsname buf;
+ if (0 != uname(&buf)) {
+ // ensure null termination on failure
+ *buf.nodename = '\0';
+ }
+ *hostname = buf.nodename;
+#elif defined(OS_WINDOWS)
+ char buf[MAX_COMPUTERNAME_LENGTH + 1];
+ DWORD len = MAX_COMPUTERNAME_LENGTH + 1;
+ if (GetComputerNameA(buf, &len)) {
+ *hostname = buf;
+ } else {
+ hostname->clear();
+ }
+#else
+# warning There is no way to retrieve the host name.
+ *hostname = "(unknown)";
+#endif
+}
+
+_START_GOOGLE_NAMESPACE_
+
+// Safely get max_log_size, overriding to 1 if it somehow gets defined as 0
+static int32 MaxLogSize() {
+ return (FLAGS_max_log_size > 0 ? FLAGS_max_log_size : 1);
+}
+
+// A mutex that allows only one thread to log at a time, to keep things from
+// getting jumbled. Some other very uncommon logging operations (like
+// changing the destination file for log messages of a given severity) also
+// lock this mutex. Please be sure that anybody who might possibly need to
+// lock it does so.
+static Mutex log_mutex;
+
+// Number of messages sent at each severity. Under log_mutex.
+int64 LogMessage::num_messages_[NUM_SEVERITIES] = {0, 0, 0, 0};
+
+// Globally disable log writing (if disk is full)
+static bool stop_writing = false;
+
+const char*const LogSeverityNames[NUM_SEVERITIES] = {
+ "INFO", "WARNING", "ERROR", "FATAL"
+};
+
+// Has the user called SetExitOnDFatal(true)?
+static bool exit_on_dfatal = true;
+
+const char* GetLogSeverityName(LogSeverity severity) {
+ return LogSeverityNames[severity];
+}
+
+static bool SendEmailInternal(const char*dest, const char *subject,
+ const char*body, bool use_logging);
+
+base::Logger::~Logger() {
+}
+
+namespace {
+
+// Encapsulates all file-system related state
+class LogFileObject : public base::Logger {
+ public:
+ LogFileObject(LogSeverity severity, const char* base_filename);
+ ~LogFileObject();
+
+ virtual void Write(bool force_flush, // Should we force a flush here?
+ time_t timestamp, // Timestamp for this entry
+ const char* message,
+ int message_len);
+
+ // Configuration options
+ void SetBasename(const char* basename);
+ void SetExtension(const char* ext);
+ void SetSymlinkBasename(const char* symlink_basename);
+
+ // Normal flushing routine
+ virtual void Flush();
+
+ // It is the actual file length for the system loggers,
+ // i.e., INFO, ERROR, etc.
+ virtual uint32 LogSize() {
+ MutexLock l(&lock_);
+ return file_length_;
+ }
+
+ // Internal flush routine. Exposed so that FlushLogFilesUnsafe()
+ // can avoid grabbing a lock. Usually Flush() calls it after
+ // acquiring lock_.
+ void FlushUnlocked();
+
+ private:
+ static const uint32 kRolloverAttemptFrequency = 0x20;
+
+ Mutex lock_;
+ bool base_filename_selected_;
+ string base_filename_;
+ string symlink_basename_;
+ string filename_extension_; // option users can specify (eg to add port#)
+ FILE* file_;
+ LogSeverity severity_;
+ uint32 bytes_since_flush_;
+ uint32 file_length_;
+ unsigned int rollover_attempt_;
+ int64 next_flush_time_; // cycle count at which to flush log
+
+ // Actually create a logfile using the value of base_filename_ and the
+ // supplied argument time_pid_string
+ // REQUIRES: lock_ is held
+ bool CreateLogfile(const char* time_pid_string);
+};
+
+} // namespace
+
+class LogDestination {
+ public:
+ friend class LogMessage;
+ friend void ReprintFatalMessage();
+ friend base::Logger* base::GetLogger(LogSeverity);
+ friend void base::SetLogger(LogSeverity, base::Logger*);
+
+ // These methods are just forwarded to by their global versions.
+ static void SetLogDestination(LogSeverity severity,
+ const char* base_filename);
+ static void SetLogSymlink(LogSeverity severity,
+ const char* symlink_basename);
+ static void AddLogSink(LogSink *destination);
+ static void RemoveLogSink(LogSink *destination);
+ static void SetLogFilenameExtension(const char* filename_extension);
+ static void SetStderrLogging(LogSeverity min_severity);
+ static void SetEmailLogging(LogSeverity min_severity, const char* addresses);
+ static void LogToStderr();
+ // Flush all log files that are at least at the given severity level
+ static void FlushLogFiles(int min_severity);
+ static void FlushLogFilesUnsafe(int min_severity);
+
+ // we set the maximum size of our packet to be 1400, the logic being
+ // to prevent fragmentation.
+ // Really this number is arbitrary.
+ static const int kNetworkBytes = 1400;
+
+ static const string& hostname();
+ private:
+
+ LogDestination(LogSeverity severity, const char* base_filename);
+ ~LogDestination() { }
+
+ // Take a log message of a particular severity and log it to stderr
+ // iff it's of a high enough severity to deserve it.
+ static void MaybeLogToStderr(LogSeverity severity, const char* message,
+ size_t len);
+
+ // Take a log message of a particular severity and log it to email
+ // iff it's of a high enough severity to deserve it.
+ static void MaybeLogToEmail(LogSeverity severity, const char* message,
+ size_t len);
+ // Take a log message of a particular severity and log it to a file
+ // iff the base filename is not "" (which means "don't log to me")
+ static void MaybeLogToLogfile(LogSeverity severity,
+ time_t timestamp,
+ const char* message, size_t len);
+ // Take a log message of a particular severity and log it to the file
+ // for that severity and also for all files with severity less than
+ // this severity.
+ static void LogToAllLogfiles(LogSeverity severity,
+ time_t timestamp,
+ const char* message, size_t len);
+
+ // Send logging info to all registered sinks.
+ static void LogToSinks(LogSeverity severity,
+ const char *full_filename,
+ const char *base_filename,
+ int line,
+ const struct ::tm* tm_time,
+ const char* message,
+ size_t message_len);
+
+ // Wait for all registered sinks via WaitTillSent
+ // including the optional one in "data".
+ static void WaitForSinks(LogMessage::LogMessageData* data);
+
+ static LogDestination* log_destination(LogSeverity severity);
+
+ LogFileObject fileobject_;
+ base::Logger* logger_; // Either &fileobject_, or wrapper around it
+
+ static LogDestination* log_destinations_[NUM_SEVERITIES];
+ static LogSeverity email_logging_severity_;
+ static string addresses_;
+ static string hostname_;
+
+ // arbitrary global logging destinations.
+ static vector<LogSink*>* sinks_;
+
+ // Protects the vector sinks_,
+ // but not the LogSink objects its elements reference.
+ static Mutex sink_mutex_;
+
+ // Disallow
+ LogDestination(const LogDestination&);
+ LogDestination& operator=(const LogDestination&);
+};
+
+// Errors do not get logged to email by default.
+LogSeverity LogDestination::email_logging_severity_ = 99999;
+
+string LogDestination::addresses_;
+string LogDestination::hostname_;
+
+vector<LogSink*>* LogDestination::sinks_ = NULL;
+Mutex LogDestination::sink_mutex_;
+
+/* static */
+const string& LogDestination::hostname() {
+ if (hostname_.empty()) {
+ GetHostName(&hostname_);
+ if (hostname_.empty()) {
+ hostname_ = "(unknown)";
+ }
+ }
+ return hostname_;
+}
+
+LogDestination::LogDestination(LogSeverity severity,
+ const char* base_filename)
+ : fileobject_(severity, base_filename),
+ logger_(&fileobject_) {
+}
+
+inline void LogDestination::FlushLogFilesUnsafe(int min_severity) {
+ // assume we have the log_mutex or we simply don't care
+ // about it
+ for (int i = min_severity; i < NUM_SEVERITIES; i++) {
+ LogDestination* log = log_destination(i);
+ if (log != NULL) {
+ // Flush the base fileobject_ logger directly instead of going
+ // through any wrappers to reduce chance of deadlock.
+ log->fileobject_.FlushUnlocked();
+ }
+ }
+}
+
+inline void LogDestination::FlushLogFiles(int min_severity) {
+ // Prevent any subtle race conditions by wrapping a mutex lock around
+ // all this stuff.
+ MutexLock l(&log_mutex);
+ for (int i = min_severity; i < NUM_SEVERITIES; i++) {
+ LogDestination* log = log_destination(i);
+ if (log != NULL) {
+ log->logger_->Flush();
+ }
+ }
+}
+
+inline void LogDestination::SetLogDestination(LogSeverity severity,
+ const char* base_filename) {
+ assert(severity >= 0 && severity < NUM_SEVERITIES);
+ // Prevent any subtle race conditions by wrapping a mutex lock around
+ // all this stuff.
+ MutexLock l(&log_mutex);
+ log_destination(severity)->fileobject_.SetBasename(base_filename);
+}
+
+inline void LogDestination::SetLogSymlink(LogSeverity severity,
+ const char* symlink_basename) {
+ CHECK_GE(severity, 0);
+ CHECK_LT(severity, NUM_SEVERITIES);
+ MutexLock l(&log_mutex);
+ log_destination(severity)->fileobject_.SetSymlinkBasename(symlink_basename);
+}
+
+inline void LogDestination::AddLogSink(LogSink *destination) {
+ // Prevent any subtle race conditions by wrapping a mutex lock around
+ // all this stuff.
+ MutexLock l(&sink_mutex_);
+ if (!sinks_) sinks_ = new vector<LogSink*>;
+ sinks_->push_back(destination);
+}
+
+inline void LogDestination::RemoveLogSink(LogSink *destination) {
+ // Prevent any subtle race conditions by wrapping a mutex lock around
+ // all this stuff.
+ MutexLock l(&sink_mutex_);
+ // This doesn't keep the sinks in order, but who cares?
+ if (sinks_) {
+ for (int i = sinks_->size() - 1; i >= 0; i--) {
+ if ((*sinks_)[i] == destination) {
+ (*sinks_)[i] = (*sinks_)[sinks_->size() - 1];
+ sinks_->pop_back();
+ break;
+ }
+ }
+ }
+}
+
+inline void LogDestination::SetLogFilenameExtension(const char* ext) {
+ // Prevent any subtle race conditions by wrapping a mutex lock around
+ // all this stuff.
+ MutexLock l(&log_mutex);
+ for ( int severity = 0; severity < NUM_SEVERITIES; ++severity ) {
+ log_destination(severity)->fileobject_.SetExtension(ext);
+ }
+}
+
+inline void LogDestination::SetStderrLogging(LogSeverity min_severity) {
+ assert(min_severity >= 0 && min_severity < NUM_SEVERITIES);
+ // Prevent any subtle race conditions by wrapping a mutex lock around
+ // all this stuff.
+ MutexLock l(&log_mutex);
+ FLAGS_stderrthreshold = min_severity;
+}
+
+inline void LogDestination::LogToStderr() {
+ // *Don't* put this stuff in a mutex lock, since SetStderrLogging &
+ // SetLogDestination already do the locking!
+ SetStderrLogging(0); // thus everything is "also" logged to stderr
+ for ( int i = 0; i < NUM_SEVERITIES; ++i ) {
+ SetLogDestination(i, ""); // "" turns off logging to a logfile
+ }
+}
+
+inline void LogDestination::SetEmailLogging(LogSeverity min_severity,
+ const char* addresses) {
+ assert(min_severity >= 0 && min_severity < NUM_SEVERITIES);
+ // Prevent any subtle race conditions by wrapping a mutex lock around
+ // all this stuff.
+ MutexLock l(&log_mutex);
+ LogDestination::email_logging_severity_ = min_severity;
+ LogDestination::addresses_ = addresses;
+}
+
+static void WriteToStderr(const char* message, size_t len) {
+ // Avoid using cerr from this module since we may get called during
+ // exit code, and cerr may be partially or fully destroyed by then.
+ write(STDERR_FILENO, message, len);
+}
+
+inline void LogDestination::MaybeLogToStderr(LogSeverity severity,
+ const char* message, size_t len) {
+ if ((severity >= FLAGS_stderrthreshold) || FLAGS_alsologtostderr) {
+ WriteToStderr(message, len);
+#ifdef OS_WINDOWS
+ // On Windows, also output to the debugger
+ ::OutputDebugStringA(string(message,len).c_str());
+#endif
+ }
+}
+
+
+inline void LogDestination::MaybeLogToEmail(LogSeverity severity,
+ const char* message, size_t len) {
+ if (severity >= email_logging_severity_ ||
+ severity >= FLAGS_logemaillevel) {
+ string to(FLAGS_alsologtoemail);
+ if (!addresses_.empty()) {
+ if (!to.empty()) {
+ to += ",";
+ }
+ to += addresses_;
+ }
+ const string subject(string("[LOG] ") + LogSeverityNames[severity] + ": " +
+ glog_internal_namespace_::ProgramInvocationShortName());
+ string body(hostname());
+ body += "\n\n";
+ body.append(message, len);
+
+ // should NOT use SendEmail(). The caller of this function holds the
+ // log_mutex and SendEmail() calls LOG/VLOG which will block trying to
+ // acquire the log_mutex object. Use SendEmailInternal() and set
+ // use_logging to false.
+ SendEmailInternal(to.c_str(), subject.c_str(), body.c_str(), false);
+ }
+}
+
+
+inline void LogDestination::MaybeLogToLogfile(LogSeverity severity,
+ time_t timestamp,
+ const char* message,
+ size_t len) {
+ const bool should_flush = severity > FLAGS_logbuflevel;
+ LogDestination* destination = log_destination(severity);
+ destination->logger_->Write(should_flush, timestamp, message, len);
+}
+
+inline void LogDestination::LogToAllLogfiles(LogSeverity severity,
+ time_t timestamp,
+ const char* message,
+ size_t len) {
+
+ if ( FLAGS_logtostderr ) // global flag: never log to file
+ WriteToStderr(message, len);
+ else
+ for (int i = severity; i >= 0; --i)
+ LogDestination::MaybeLogToLogfile(i, timestamp, message, len);
+
+}
+
+inline void LogDestination::LogToSinks(LogSeverity severity,
+ const char *full_filename,
+ const char *base_filename,
+ int line,
+ const struct ::tm* tm_time,
+ const char* message,
+ size_t message_len) {
+ ReaderMutexLock l(&sink_mutex_);
+ if (sinks_) {
+ for (int i = sinks_->size() - 1; i >= 0; i--) {
+ (*sinks_)[i]->send(severity, full_filename, base_filename,
+ line, tm_time, message, message_len);
+ }
+ }
+}
+
+inline void LogDestination::WaitForSinks(LogMessage::LogMessageData* data) {
+ ReaderMutexLock l(&sink_mutex_);
+ if (sinks_) {
+ for (int i = sinks_->size() - 1; i >= 0; i--) {
+ (*sinks_)[i]->WaitTillSent();
+ }
+ }
+ const bool send_to_sink =
+ (data->send_method_ == &LogMessage::SendToSink) ||
+ (data->send_method_ == &LogMessage::SendToSinkAndLog);
+ if (send_to_sink && data->sink_ != NULL) {
+ data->sink_->WaitTillSent();
+ }
+}
+
+LogDestination* LogDestination::log_destinations_[NUM_SEVERITIES];
+
+inline LogDestination* LogDestination::log_destination(LogSeverity severity) {
+ assert(severity >=0 && severity < NUM_SEVERITIES);
+ if (!log_destinations_[severity]) {
+ log_destinations_[severity] = new LogDestination(severity, NULL);
+ }
+ return log_destinations_[severity];
+}
+
+namespace {
+
+LogFileObject::LogFileObject(LogSeverity severity,
+ const char* base_filename)
+ : base_filename_selected_(base_filename != NULL),
+ base_filename_((base_filename != NULL) ? base_filename : ""),
+ symlink_basename_(glog_internal_namespace_::ProgramInvocationShortName()),
+ filename_extension_(),
+ file_(NULL),
+ severity_(severity),
+ bytes_since_flush_(0),
+ file_length_(0),
+ rollover_attempt_(kRolloverAttemptFrequency-1),
+ next_flush_time_(0) {
+ assert(severity >= 0);
+ assert(severity < NUM_SEVERITIES);
+}
+
+LogFileObject::~LogFileObject() {
+ MutexLock l(&lock_);
+ if (file_ != NULL) {
+ fclose(file_);
+ file_ = NULL;
+ }
+}
+
+void LogFileObject::SetBasename(const char* basename) {
+ MutexLock l(&lock_);
+ base_filename_selected_ = true;
+ if (base_filename_ != basename) {
+ // Get rid of old log file since we are changing names
+ if (file_ != NULL) {
+ fclose(file_);
+ file_ = NULL;
+ rollover_attempt_ = kRolloverAttemptFrequency-1;
+ }
+ base_filename_ = basename;
+ }
+}
+
+void LogFileObject::SetExtension(const char* ext) {
+ MutexLock l(&lock_);
+ if (filename_extension_ != ext) {
+ // Get rid of old log file since we are changing names
+ if (file_ != NULL) {
+ fclose(file_);
+ file_ = NULL;
+ rollover_attempt_ = kRolloverAttemptFrequency-1;
+ }
+ filename_extension_ = ext;
+ }
+}
+
+void LogFileObject::SetSymlinkBasename(const char* symlink_basename) {
+ MutexLock l(&lock_);
+ symlink_basename_ = symlink_basename;
+}
+
+void LogFileObject::Flush() {
+ MutexLock l(&lock_);
+ FlushUnlocked();
+}
+
+void LogFileObject::FlushUnlocked(){
+ if (file_ != NULL) {
+ fflush(file_);
+ bytes_since_flush_ = 0;
+ }
+ // Figure out when we are due for another flush.
+ const int64 next = (FLAGS_logbufsecs
+ * static_cast<int64>(1000000)); // in usec
+ next_flush_time_ = CycleClock_Now() + UsecToCycles(next);
+}
+
+bool LogFileObject::CreateLogfile(const char* time_pid_string) {
+ string string_filename = base_filename_+filename_extension_+
+ time_pid_string;
+ const char* filename = string_filename.c_str();
+ int fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0664);
+ if (fd == -1) return false;
+#ifdef HAVE_FCNTL
+ // Mark the file close-on-exec. We don't really care if this fails
+ fcntl(fd, F_SETFD, FD_CLOEXEC);
+#endif
+
+ file_ = fdopen(fd, "a"); // Make a FILE*.
+ if (file_ == NULL) { // Man, we're screwed!
+ close(fd);
+ unlink(filename); // Erase the half-baked evidence: an unusable log file
+ return false;
+ }
+
+ // We try to create a symlink called <program_name>.<severity>,
+ // which is easier to use. (Every time we create a new logfile,
+ // we destroy the old symlink and create a new one, so it always
+ // points to the latest logfile.) If it fails, we're sad but it's
+ // no error.
+ if (!symlink_basename_.empty()) {
+ // take directory from filename
+ const char* slash = strrchr(filename, PATH_SEPARATOR);
+ const string linkname =
+ symlink_basename_ + '.' + LogSeverityNames[severity_];
+ string linkpath;
+ if ( slash ) linkpath = string(filename, slash-filename+1); // get dirname
+ linkpath += linkname;
+ unlink(linkpath.c_str()); // delete old one if it exists
+
+ // We must have unistd.h.
+#ifdef HAVE_UNISTD_H
+ // Make the symlink be relative (in the same dir) so that if the
+ // entire log directory gets relocated the link is still valid.
+ const char *linkdest = slash ? (slash + 1) : filename;
+ symlink(linkdest, linkpath.c_str()); // silently ignore failures
+
+ // Make an additional link to the log file in a place specified by
+ // FLAGS_log_link, if indicated
+ if (!FLAGS_log_link.empty()) {
+ linkpath = FLAGS_log_link + "/" + linkname;
+ unlink(linkpath.c_str()); // delete old one if it exists
+ symlink(filename, linkpath.c_str()); // silently ignore failures
+ }
+#endif
+ }
+
+ return true; // Everything worked
+}
+
+void LogFileObject::Write(bool force_flush,
+ time_t timestamp,
+ const char* message,
+ int message_len) {
+ MutexLock l(&lock_);
+
+ // We don't log if the base_name_ is "" (which means "don't write")
+ if (base_filename_selected_ && base_filename_.empty()) {
+ return;
+ }
+
+ if (static_cast<int>(file_length_ >> 20) >= MaxLogSize()) {
+ if (file_ != NULL) fclose(file_);
+ file_ = NULL;
+ file_length_ = bytes_since_flush_ = 0;
+ rollover_attempt_ = kRolloverAttemptFrequency-1;
+ }
+
+ // If there's no destination file, make one before outputting
+ if (file_ == NULL) {
+ // Try to rollover the log file every 32 log messages. The only time
+ // this could matter would be when we have trouble creating the log
+ // file. If that happens, we'll lose lots of log messages, of course!
+ if (++rollover_attempt_ != kRolloverAttemptFrequency) return;
+ rollover_attempt_ = 0;
+
+ struct ::tm tm_time;
+ localtime_r(&timestamp, &tm_time);
+
+ // The logfile's filename will have the date/time & pid in it
+ char time_pid_string[256]; // More than enough chars for time, pid, \0
+ ostrstream time_pid_stream(time_pid_string, sizeof(time_pid_string));
+ time_pid_stream.fill('0');
+ time_pid_stream << 1900+tm_time.tm_year
+ << setw(2) << 1+tm_time.tm_mon
+ << setw(2) << tm_time.tm_mday
+ << '-'
+ << setw(2) << tm_time.tm_hour
+ << setw(2) << tm_time.tm_min
+ << setw(2) << tm_time.tm_sec
+ << '.'
+ << GetMainThreadPid()
+ << '\0';
+
+ if (base_filename_selected_) {
+ if (!CreateLogfile(time_pid_string)) {
+ perror("Could not create log file");
+ fprintf(stderr, "COULD NOT CREATE LOGFILE '%s'!\n", time_pid_string);
+ return;
+ }
+ } else {
+ // If no base filename for logs of this severity has been set, use a
+ // default base filename of
+ // "<program name>.<hostname>.<user name>.log.<severity level>.". So
+ // logfiles will have names like
+ // webserver.examplehost.root.log.INFO.19990817-150000.4354, where
+ // 19990817 is a date (1999 August 17), 150000 is a time (15:00:00),
+ // and 4354 is the pid of the logging process. The date & time reflect
+ // when the file was created for output.
+ //
+ // Where does the file get put? Successively try the directories
+ // "/tmp", and "."
+ string stripped_filename(
+ glog_internal_namespace_::ProgramInvocationShortName());
+ string hostname;
+ GetHostName(&hostname);
+
+ string uidname = MyUserName();
+ // We should not call CHECK() here because this function can be
+ // called after holding on to log_mutex. We don't want to
+ // attempt to hold on to the same mutex, and get into a
+ // deadlock. Simply use a name like invalid-user.
+ if (uidname.empty()) uidname = "invalid-user";
+
+ stripped_filename = stripped_filename+'.'+hostname+'.'
+ +uidname+".log."
+ +LogSeverityNames[severity_]+'.';
+ // We're going to (potentially) try to put logs in several different dirs
+ const vector<string> & log_dirs = GetLoggingDirectories();
+
+ // Go through the list of dirs, and try to create the log file in each
+ // until we succeed or run out of options
+ bool success = false;
+ for (vector<string>::const_iterator dir = log_dirs.begin();
+ dir != log_dirs.end();
+ ++dir) {
+ base_filename_ = *dir + "/" + stripped_filename;
+ if ( CreateLogfile(time_pid_string) ) {
+ success = true;
+ break;
+ }
+ }
+ // If we never succeeded, we have to give up
+ if ( success == false ) {
+ perror("Could not create logging file");
+ fprintf(stderr, "COULD NOT CREATE A LOGGINGFILE %s!", time_pid_string);
+ return;
+ }
+ }
+
+ // Write a header message into the log file
+ char file_header_string[512]; // Enough chars for time and binary info
+ ostrstream file_header_stream(file_header_string,
+ sizeof(file_header_string));
+ file_header_stream.fill('0');
+ file_header_stream << "Log file created at: "
+ << 1900+tm_time.tm_year << '/'
+ << setw(2) << 1+tm_time.tm_mon << '/'
+ << setw(2) << tm_time.tm_mday
+ << ' '
+ << setw(2) << tm_time.tm_hour << ':'
+ << setw(2) << tm_time.tm_min << ':'
+ << setw(2) << tm_time.tm_sec << '\n'
+ << "Running on machine: "
+ << LogDestination::hostname() << '\n'
+ << "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu "
+ << "threadid file:line] msg" << '\n'
+ << '\0';
+ int header_len = strlen(file_header_string);
+ fwrite(file_header_string, 1, header_len, file_);
+ file_length_ += header_len;
+ bytes_since_flush_ += header_len;
+ }
+
+ // Write to LOG file
+ if ( !stop_writing ) {
+ // fwrite() doesn't return an error when the disk is full, for
+ // messages that are less than 4096 bytes. When the disk is full,
+ // it returns the message length for messages that are less than
+ // 4096 bytes. fwrite() returns 4096 for message lengths that are
+ // greater than 4096, thereby indicating an error.
+ errno = 0;
+ fwrite(message, 1, message_len, file_);
+ if ( FLAGS_stop_logging_if_full_disk &&
+ errno == ENOSPC ) { // disk full, stop writing to disk
+ stop_writing = true; // until the disk is
+ return;
+ } else {
+ file_length_ += message_len;
+ bytes_since_flush_ += message_len;
+ }
+ } else {
+ if ( CycleClock_Now() >= next_flush_time_ )
+ stop_writing = false; // check to see if disk has free space.
+ return; // no need to flush
+ }
+
+ // See important msgs *now*. Also, flush logs at least every 10^6 chars,
+ // or every "FLAGS_logbufsecs" seconds.
+ if ( force_flush ||
+ (bytes_since_flush_ >= 1000000) ||
+ (CycleClock_Now() >= next_flush_time_) ) {
+ FlushUnlocked();
+#ifdef OS_LINUX
+ if (FLAGS_drop_log_memory) {
+ if (file_length_ >= logging::kPageSize) {
+ // don't evict the most recent page
+ uint32 len = file_length_ & ~(logging::kPageSize - 1);
+ posix_fadvise(fileno(file_), 0, len, POSIX_FADV_DONTNEED);
+ }
+ }
+#endif
+ }
+}
+
+} // namespace
+
+// An arbitrary limit on the length of a single log message. This
+// is so that streaming can be done more efficiently.
+const size_t LogMessage::kMaxLogMessageLen = 30000;
+
+// Static log data space to avoid alloc failures in a LOG(FATAL)
+//
+// Since multiple threads may call LOG(FATAL), and we want to preserve
+// the data from the first call, we allocate two sets of space. One
+// for exclusive use by the first thread, and one for shared use by
+// all other threads.
+static Mutex fatal_msg_lock;
+static CrashReason crash_reason;
+static bool fatal_msg_exclusive = true;
+static char fatal_msg_buf_exclusive[LogMessage::kMaxLogMessageLen+1];
+static char fatal_msg_buf_shared[LogMessage::kMaxLogMessageLen+1];
+static LogMessage::LogStream fatal_msg_stream_exclusive(
+ fatal_msg_buf_exclusive, LogMessage::kMaxLogMessageLen, 0);
+static LogMessage::LogStream fatal_msg_stream_shared(
+ fatal_msg_buf_shared, LogMessage::kMaxLogMessageLen, 0);
+LogMessage::LogMessageData LogMessage::fatal_msg_data_exclusive_;
+LogMessage::LogMessageData LogMessage::fatal_msg_data_shared_;
+
+LogMessage::LogMessageData::~LogMessageData() {
+ delete[] buf_;
+ delete stream_alloc_;
+}
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
+ int ctr, void (LogMessage::*send_method)()) {
+ Init(file, line, severity, send_method);
+ data_->stream_->set_ctr(ctr);
+}
+
+LogMessage::LogMessage(const char* file, int line,
+ const CheckOpString& result) {
+ Init(file, line, FATAL, &LogMessage::SendToLog);
+ stream() << "Check failed: " << (*result.str_) << " ";
+}
+
+LogMessage::LogMessage(const char* file, int line) {
+ Init(file, line, INFO, &LogMessage::SendToLog);
+}
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity) {
+ Init(file, line, severity, &LogMessage::SendToLog);
+}
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
+ LogSink* sink, bool also_send_to_log) {
+ Init(file, line, severity, also_send_to_log ? &LogMessage::SendToSinkAndLog :
+ &LogMessage::SendToSink);
+ data_->sink_ = sink; // override Init()'s setting to NULL
+}
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
+ vector<string> *outvec) {
+ Init(file, line, severity, &LogMessage::SaveOrSendToLog);
+ data_->outvec_ = outvec; // override Init()'s setting to NULL
+}
+
+LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
+ string *message) {
+ Init(file, line, severity, &LogMessage::WriteToStringAndLog);
+ data_->message_ = message; // override Init()'s setting to NULL
+}
+
+void LogMessage::Init(const char* file,
+ int line,
+ LogSeverity severity,
+ void (LogMessage::*send_method)()) {
+ allocated_ = NULL;
+ if (severity != FATAL || !exit_on_dfatal) {
+ allocated_ = new LogMessageData();
+ data_ = allocated_;
+ data_->buf_ = new char[kMaxLogMessageLen+1];
+ data_->message_text_ = data_->buf_;
+ data_->stream_alloc_ =
+ new LogStream(data_->message_text_, kMaxLogMessageLen, 0);
+ data_->stream_ = data_->stream_alloc_;
+ data_->first_fatal_ = false;
+ } else {
+ MutexLock l(&fatal_msg_lock);
+ if (fatal_msg_exclusive) {
+ fatal_msg_exclusive = false;
+ data_ = &fatal_msg_data_exclusive_;
+ data_->message_text_ = fatal_msg_buf_exclusive;
+ data_->stream_ = &fatal_msg_stream_exclusive;
+ data_->first_fatal_ = true;
+ } else {
+ data_ = &fatal_msg_data_shared_;
+ data_->message_text_ = fatal_msg_buf_shared;
+ data_->stream_ = &fatal_msg_stream_shared;
+ data_->first_fatal_ = false;
+ }
+ data_->stream_alloc_ = NULL;
+ }
+
+ stream().fill('0');
+ data_->preserved_errno_ = errno;
+ data_->severity_ = severity;
+ data_->line_ = line;
+ data_->send_method_ = send_method;
+ data_->sink_ = NULL;
+ data_->outvec_ = NULL;
+ WallTime now = WallTime_Now();
+ data_->timestamp_ = static_cast<time_t>(now);
+ localtime_r(&data_->timestamp_, &data_->tm_time_);
+ int usecs = static_cast<int>((now - data_->timestamp_) * 1000000);
+ RawLog__SetLastTime(data_->tm_time_, usecs);
+
+ data_->num_chars_to_log_ = 0;
+ data_->num_chars_to_syslog_ = 0;
+ data_->basename_ = const_basename(file);
+ data_->fullname_ = file;
+ data_->has_been_flushed_ = false;
+
+ // If specified, prepend a prefix to each line. For example:
+ // I1018 160715 f5d4fbb0 logging.cc:1153]
+ // (log level, GMT month, date, time, thread_id, file basename, line)
+ // We exclude the thread_id for the default thread.
+ if (FLAGS_log_prefix && (line != kNoLogPrefix)) {
+ stream() << LogSeverityNames[severity][0]
+ << setw(2) << 1+data_->tm_time_.tm_mon
+ << setw(2) << data_->tm_time_.tm_mday
+ << ' '
+ << setw(2) << data_->tm_time_.tm_hour << ':'
+ << setw(2) << data_->tm_time_.tm_min << ':'
+ << setw(2) << data_->tm_time_.tm_sec << "."
+ << setw(6) << usecs
+ << ' '
+ << setfill(' ') << setw(5)
+ << static_cast<unsigned int>(GetTID()) << setfill('0')
+ << ' '
+ << data_->basename_ << ':' << data_->line_ << "] ";
+ }
+ data_->num_prefix_chars_ = data_->stream_->pcount();
+
+ if (!FLAGS_log_backtrace_at.empty()) {
+ char fileline[128];
+ snprintf(fileline, sizeof(fileline), "%s:%d", data_->basename_, line);
+#ifdef HAVE_STACKTRACE
+ if (!strcmp(FLAGS_log_backtrace_at.c_str(), fileline)) {
+ string stacktrace;
+ DumpStackTraceToString(&stacktrace);
+ stream() << " (stacktrace:\n" << stacktrace << ") ";
+ }
+#endif
+ }
+}
+
+LogMessage::~LogMessage() {
+ Flush();
+ delete allocated_;
+}
+
+// Flush buffered message, called by the destructor, or any other function
+// that needs to synchronize the log.
+void LogMessage::Flush() {
+ if (data_->has_been_flushed_ || data_->severity_ < FLAGS_minloglevel)
+ return;
+
+ data_->num_chars_to_log_ = data_->stream_->pcount();
+ data_->num_chars_to_syslog_ =
+ data_->num_chars_to_log_ - data_->num_prefix_chars_;
+
+ // Do we need to add a \n to the end of this message?
+ bool append_newline =
+ (data_->message_text_[data_->num_chars_to_log_-1] != '\n');
+ char original_final_char = '\0';
+
+ // If we do need to add a \n, we'll do it by violating the memory of the
+ // ostrstream buffer. This is quick, and we'll make sure to undo our
+ // modification before anything else is done with the ostrstream. It
+ // would be preferable not to do things this way, but it seems to be
+ // the best way to deal with this.
+ if (append_newline) {
+ original_final_char = data_->message_text_[data_->num_chars_to_log_];
+ data_->message_text_[data_->num_chars_to_log_++] = '\n';
+ }
+
+ // Prevent any subtle race conditions by wrapping a mutex lock around
+ // the actual logging action per se.
+ {
+ MutexLock l(&log_mutex);
+ (this->*(data_->send_method_))();
+ ++num_messages_[static_cast<int>(data_->severity_)];
+ }
+ LogDestination::WaitForSinks(data_);
+
+ if (append_newline) {
+ // Fix the ostrstream back how it was before we screwed with it.
+ // It's 99.44% certain that we don't need to worry about doing this.
+ data_->message_text_[data_->num_chars_to_log_-1] = original_final_char;
+ }
+
+ // If errno was already set before we enter the logging call, we'll
+ // set it back to that value when we return from the logging call.
+ // It happens often that we log an error message after a syscall
+ // failure, which can potentially set the errno to some other
+ // values. We would like to preserve the original errno.
+ if (data_->preserved_errno_ != 0) {
+ errno = data_->preserved_errno_;
+ }
+
+ // Note that this message is now safely logged. If we're asked to flush
+ // again, as a result of destruction, say, we'll do nothing on future calls.
+ data_->has_been_flushed_ = true;
+}
+
+// Copy of first FATAL log message so that we can print it out again
+// after all the stack traces. To preserve legacy behavior, we don't
+// use fatal_msg_buf_exclusive.
+static time_t fatal_time;
+static char fatal_message[256];
+
+void ReprintFatalMessage() {
+ if (fatal_message[0]) {
+ const int n = strlen(fatal_message);
+ if (!FLAGS_logtostderr) {
+ // Also write to stderr
+ WriteToStderr(fatal_message, n);
+ }
+ LogDestination::LogToAllLogfiles(ERROR, fatal_time, fatal_message, n);
+ }
+}
+
+// L >= log_mutex (callers must hold the log_mutex).
+void LogMessage::SendToLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
+ static bool already_warned_before_initgoogle = false;
+
+ log_mutex.AssertHeld();
+
+ RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
+ data_->message_text_[data_->num_chars_to_log_-1] == '\n', "");
+
+ // Messages of a given severity get logged to lower severity logs, too
+
+ if (!already_warned_before_initgoogle && !IsGoogleLoggingInitialized()) {
+ const char w[] = "WARNING: Logging before InitGoogleLogging() is "
+ "written to STDERR\n";
+ WriteToStderr(w, strlen(w));
+ already_warned_before_initgoogle = true;
+ }
+
+ // global flag: never log to file if set. Also -- don't log to a
+ // file if we haven't parsed the command line flags to get the
+ // program name.
+ if (FLAGS_logtostderr || !IsGoogleLoggingInitialized()) {
+ WriteToStderr(data_->message_text_, data_->num_chars_to_log_);
+
+ // this could be protected by a flag if necessary.
+ LogDestination::LogToSinks(data_->severity_,
+ data_->fullname_, data_->basename_,
+ data_->line_, &data_->tm_time_,
+ data_->message_text_ + data_->num_prefix_chars_,
+ (data_->num_chars_to_log_ -
+ data_->num_prefix_chars_ - 1));
+ } else {
+
+ // log this message to all log files of severity <= severity_
+ LogDestination::LogToAllLogfiles(data_->severity_, data_->timestamp_,
+ data_->message_text_,
+ data_->num_chars_to_log_);
+
+ LogDestination::MaybeLogToStderr(data_->severity_, data_->message_text_,
+ data_->num_chars_to_log_);
+ LogDestination::MaybeLogToEmail(data_->severity_, data_->message_text_,
+ data_->num_chars_to_log_);
+ LogDestination::LogToSinks(data_->severity_,
+ data_->fullname_, data_->basename_,
+ data_->line_, &data_->tm_time_,
+ data_->message_text_ + data_->num_prefix_chars_,
+ (data_->num_chars_to_log_
+ - data_->num_prefix_chars_ - 1));
+ // NOTE: -1 removes trailing \n
+ }
+
+ // If we log a FATAL message, flush all the log destinations, then toss
+ // a signal for others to catch. We leave the logs in a state that
+ // someone else can use them (as long as they flush afterwards)
+ if (data_->severity_ == FATAL && exit_on_dfatal) {
+ if (data_->first_fatal_) {
+ // Store crash information so that it is accessible from within signal
+ // handlers that may be invoked later.
+ RecordCrashReason(&crash_reason);
+ SetCrashReason(&crash_reason);
+
+ // Store shortened fatal message for other logs and GWQ status
+ const int copy = min<int>(data_->num_chars_to_log_,
+ sizeof(fatal_message)-1);
+ memcpy(fatal_message, data_->message_text_, copy);
+ fatal_message[copy] = '\0';
+ fatal_time = data_->timestamp_;
+ }
+
+ if (!FLAGS_logtostderr) {
+ for (int i = 0; i < NUM_SEVERITIES; ++i) {
+ if ( LogDestination::log_destinations_[i] )
+ LogDestination::log_destinations_[i]->logger_->Write(true, 0, "", 0);
+ }
+ }
+
+ // release the lock that our caller (directly or indirectly)
+ // LogMessage::~LogMessage() grabbed so that signal handlers
+ // can use the logging facility. Alternately, we could add
+ // an entire unsafe logging interface to bypass locking
+ // for signal handlers but this seems simpler.
+ log_mutex.Unlock();
+ LogDestination::WaitForSinks(data_);
+
+ const char* message = "*** Check failure stack trace: ***\n";
+ write(STDERR_FILENO, message, strlen(message));
+ Fail();
+ }
+}
+
+void LogMessage::RecordCrashReason(
+ glog_internal_namespace_::CrashReason* reason) {
+ reason->filename = fatal_msg_data_exclusive_.fullname_;
+ reason->line_number = fatal_msg_data_exclusive_.line_;
+ reason->message = fatal_msg_buf_exclusive +
+ fatal_msg_data_exclusive_.num_prefix_chars_;
+#ifdef HAVE_STACKTRACE
+ // Retrieve the stack trace, omitting the logging frames that got us here.
+ reason->depth = GetStackTrace(reason->stack, ARRAYSIZE(reason->stack), 4);
+#else
+ reason->depth = 0;
+#endif
+}
+
+static void logging_fail() {
+// #if defined(_DEBUG) && defined(_MSC_VER)
+// doesn't work for my laptop (sergey)
+#if 0
+ // When debugging on windows, avoid the obnoxious dialog and make
+ // it possible to continue past a LOG(FATAL) in the debugger
+ _asm int 3
+#else
+ abort();
+#endif
+}
+
+#ifdef HAVE___ATTRIBUTE__
+GOOGLE_GLOG_DLL_DECL
+void (*g_logging_fail_func)() __attribute__((noreturn)) = &logging_fail;
+#else
+GOOGLE_GLOG_DLL_DECL void (*g_logging_fail_func)() = &logging_fail;
+#endif
+
+void InstallFailureFunction(void (*fail_func)()) {
+ g_logging_fail_func = fail_func;
+}
+
+void LogMessage::Fail() {
+ g_logging_fail_func();
+}
+
+// L >= log_mutex (callers must hold the log_mutex).
+void LogMessage::SendToSink() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
+ if (data_->sink_ != NULL) {
+ RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
+ data_->message_text_[data_->num_chars_to_log_-1] == '\n', "");
+ data_->sink_->send(data_->severity_, data_->fullname_, data_->basename_,
+ data_->line_, &data_->tm_time_,
+ data_->message_text_ + data_->num_prefix_chars_,
+ (data_->num_chars_to_log_ -
+ data_->num_prefix_chars_ - 1));
+ }
+}
+
+// L >= log_mutex (callers must hold the log_mutex).
+void LogMessage::SendToSinkAndLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
+ SendToSink();
+ SendToLog();
+}
+
+// L >= log_mutex (callers must hold the log_mutex).
+void LogMessage::SaveOrSendToLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
+ if (data_->outvec_ != NULL) {
+ RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
+ data_->message_text_[data_->num_chars_to_log_-1] == '\n', "");
+ // Omit prefix of message and trailing newline when recording in outvec_.
+ const char *start = data_->message_text_ + data_->num_prefix_chars_;
+ int len = data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1;
+ data_->outvec_->push_back(string(start, len));
+ } else {
+ SendToLog();
+ }
+}
+
+void LogMessage::WriteToStringAndLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
+ if (data_->message_ != NULL) {
+ RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
+ data_->message_text_[data_->num_chars_to_log_-1] == '\n', "");
+ // Omit prefix of message and trailing newline when writing to message_.
+ const char *start = data_->message_text_ + data_->num_prefix_chars_;
+ int len = data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1;
+ data_->message_->assign(start, len);
+ }
+ SendToLog();
+}
+
+// L >= log_mutex (callers must hold the log_mutex).
+void LogMessage::SendToSyslogAndLog() {
+#ifdef HAVE_SYSLOG_H
+ // Before any calls to syslog(), make a single call to openlog()
+ static bool openlog_already_called = false;
+ if (!openlog_already_called) {
+ openlog(glog_internal_namespace_::ProgramInvocationShortName(),
+ LOG_CONS | LOG_NDELAY | LOG_PID,
+ LOG_USER);
+ openlog_already_called = true;
+ }
+
+ // This array maps Google severity levels to syslog levels
+ const int SEVERITY_TO_LEVEL[] = { LOG_INFO, LOG_WARNING, LOG_ERR, LOG_EMERG };
+ syslog(LOG_USER | SEVERITY_TO_LEVEL[static_cast<int>(data_->severity_)], "%.*s",
+ int(data_->num_chars_to_syslog_),
+ data_->message_text_ + data_->num_prefix_chars_);
+ SendToLog();
+#else
+ LOG(ERROR) << "No syslog support: message=" << data_->message_text_;
+#endif
+}
+
+base::Logger* base::GetLogger(LogSeverity severity) {
+ MutexLock l(&log_mutex);
+ return LogDestination::log_destination(severity)->logger_;
+}
+
+void base::SetLogger(LogSeverity severity, base::Logger* logger) {
+ MutexLock l(&log_mutex);
+ LogDestination::log_destination(severity)->logger_ = logger;
+}
+
+// L < log_mutex. Acquires and releases mutex_.
+int64 LogMessage::num_messages(int severity) {
+ MutexLock l(&log_mutex);
+ return num_messages_[severity];
+}
+
+// Output the COUNTER value. This is only valid if ostream is a
+// LogStream.
+ostream& operator<<(ostream &os, const PRIVATE_Counter&) {
+ LogMessage::LogStream *log = dynamic_cast<LogMessage::LogStream*>(&os);
+ CHECK(log == log->self());
+ os << log->ctr();
+ return os;
+}
+
+ErrnoLogMessage::ErrnoLogMessage(const char* file, int line,
+ LogSeverity severity, int ctr,
+ void (LogMessage::*send_method)())
+ : LogMessage(file, line, severity, ctr, send_method) {
+}
+
+ErrnoLogMessage::~ErrnoLogMessage() {
+ // Don't access errno directly because it may have been altered
+ // while streaming the message.
+ char buf[100];
+ posix_strerror_r(preserved_errno(), buf, sizeof(buf));
+ stream() << ": " << buf << " [" << preserved_errno() << "]";
+}
+
+void FlushLogFiles(LogSeverity min_severity) {
+ LogDestination::FlushLogFiles(min_severity);
+}
+
+void FlushLogFilesUnsafe(LogSeverity min_severity) {
+ LogDestination::FlushLogFilesUnsafe(min_severity);
+}
+
+void SetLogDestination(LogSeverity severity, const char* base_filename) {
+ LogDestination::SetLogDestination(severity, base_filename);
+}
+
+void SetLogSymlink(LogSeverity severity, const char* symlink_basename) {
+ LogDestination::SetLogSymlink(severity, symlink_basename);
+}
+
+LogSink::~LogSink() {
+}
+
+void LogSink::WaitTillSent() {
+ // noop default
+}
+
+string LogSink::ToString(LogSeverity severity, const char* file, int line,
+ const struct ::tm* tm_time,
+ const char* message, size_t message_len) {
+ ostringstream stream(string(message, message_len));
+ stream.fill('0');
+
+ // FIXME(jrvb): Updating this to use the correct value for usecs
+ // requires changing the signature for both this method and
+ // LogSink::send(). This change needs to be done in a separate CL
+ // so subclasses of LogSink can be updated at the same time.
+ int usecs = 0;
+
+ stream << LogSeverityNames[severity][0]
+ << setw(2) << 1+tm_time->tm_mon
+ << setw(2) << tm_time->tm_mday
+ << ' '
+ << setw(2) << tm_time->tm_hour << ':'
+ << setw(2) << tm_time->tm_min << ':'
+ << setw(2) << tm_time->tm_sec << '.'
+ << setw(6) << usecs
+ << ' '
+ << setfill(' ') << setw(5) << GetTID() << setfill('0')
+ << ' '
+ << file << ':' << line << "] ";
+
+ stream << string(message, message_len);
+ return stream.str();
+}
+
+void AddLogSink(LogSink *destination) {
+ LogDestination::AddLogSink(destination);
+}
+
+void RemoveLogSink(LogSink *destination) {
+ LogDestination::RemoveLogSink(destination);
+}
+
+void SetLogFilenameExtension(const char* ext) {
+ LogDestination::SetLogFilenameExtension(ext);
+}
+
+void SetStderrLogging(LogSeverity min_severity) {
+ LogDestination::SetStderrLogging(min_severity);
+}
+
+void SetEmailLogging(LogSeverity min_severity, const char* addresses) {
+ LogDestination::SetEmailLogging(min_severity, addresses);
+}
+
+void LogToStderr() {
+ LogDestination::LogToStderr();
+}
+
+namespace base {
+namespace internal {
+
+bool GetExitOnDFatal() {
+ MutexLock l(&log_mutex);
+ return exit_on_dfatal;
+}
+
+// Determines whether we exit the program for a LOG(DFATAL) message in
+// debug mode. It does this by skipping the call to Fail/FailQuietly.
+// This is intended for testing only.
+//
+// This can have some effects on LOG(FATAL) as well. Failure messages
+// are always allocated (rather than sharing a buffer), the crash
+// reason is not recorded, the "gwq" status message is not updated,
+// and the stack trace is not recorded. The LOG(FATAL) *will* still
+// exit the program. Since this function is used only in testing,
+// these differences are acceptable.
+void SetExitOnDFatal(bool value) {
+ MutexLock l(&log_mutex);
+ exit_on_dfatal = value;
+}
+
+} // namespace internal
+} // namespace base
+
+// use_logging controls whether the logging functions LOG/VLOG are used
+// to log errors. It should be set to false when the caller holds the
+// log_mutex.
+static bool SendEmailInternal(const char*dest, const char *subject,
+ const char*body, bool use_logging) {
+ if (dest && *dest) {
+ if ( use_logging ) {
+ VLOG(1) << "Trying to send TITLE:" << subject
+ << " BODY:" << body << " to " << dest;
+ } else {
+ fprintf(stderr, "Trying to send TITLE: %s BODY: %s to %s\n",
+ subject, body, dest);
+ }
+
+ string cmd =
+ FLAGS_logmailer + " -s\"" + subject + "\" " + dest;
+ FILE* pipe = popen(cmd.c_str(), "w");
+ if (pipe != NULL) {
+ // Add the body if we have one
+ if (body)
+ fwrite(body, sizeof(char), strlen(body), pipe);
+ bool ok = pclose(pipe) != -1;
+ if ( !ok ) {
+ if ( use_logging ) {
+ char buf[100];
+ posix_strerror_r(errno, buf, sizeof(buf));
+ LOG(ERROR) << "Problems sending mail to " << dest << ": " << buf;
+ } else {
+ char buf[100];
+ posix_strerror_r(errno, buf, sizeof(buf));
+ fprintf(stderr, "Problems sending mail to %s: %s\n", dest, buf);
+ }
+ }
+ return ok;
+ } else {
+ if ( use_logging ) {
+ LOG(ERROR) << "Unable to send mail to " << dest;
+ } else {
+ fprintf(stderr, "Unable to send mail to %s\n", dest);
+ }
+ }
+ }
+ return false;
+}
+
+bool SendEmail(const char*dest, const char *subject, const char*body){
+ return SendEmailInternal(dest, subject, body, true);
+}
+
+static void GetTempDirectories(vector<string>* list) {
+ list->clear();
+#ifdef OS_WINDOWS
+ // On windows we'll try to find a directory in this order:
+ // C:/Documents & Settings/whomever/TEMP (or whatever GetTempPath() is)
+ // C:/TMP/
+ // C:/TEMP/
+ // C:/WINDOWS/ or C:/WINNT/
+ // .
+ char tmp[MAX_PATH];
+ if (GetTempPathA(MAX_PATH, tmp))
+ list->push_back(tmp);
+ list->push_back("C:\\tmp\\");
+ list->push_back("C:\\temp\\");
+#else
+ // Directories, in order of preference. If we find a dir that
+ // exists, we stop adding other less-preferred dirs
+ const char * candidates[] = {
+ // Non-null only during unittest/regtest
+ getenv("TEST_TMPDIR"),
+
+ // Explicitly-supplied temp dirs
+ getenv("TMPDIR"), getenv("TMP"),
+
+ // If all else fails
+ "/tmp",
+ };
+
+ for (int i = 0; i < ARRAYSIZE(candidates); i++) {
+ const char *d = candidates[i];
+ if (!d) continue; // Empty env var
+
+ // Make sure we don't surprise anyone who's expecting a '/'
+ string dstr = d;
+ if (dstr[dstr.size() - 1] != '/') {
+ dstr += "/";
+ }
+ list->push_back(dstr);
+
+ struct stat statbuf;
+ if (!stat(d, &statbuf) && S_ISDIR(statbuf.st_mode)) {
+ // We found a dir that exists - we're done.
+ return;
+ }
+ }
+
+#endif
+}
+
+static vector<string>* logging_directories_list;
+
+const vector<string>& GetLoggingDirectories() {
+ // Not strictly thread-safe but we're called early in InitGoogle().
+ if (logging_directories_list == NULL) {
+ logging_directories_list = new vector<string>;
+
+ if ( !FLAGS_log_dir.empty() ) {
+ // A dir was specified, we should use it
+ logging_directories_list->push_back(FLAGS_log_dir.c_str());
+ } else {
+ GetTempDirectories(logging_directories_list);
+#ifdef OS_WINDOWS
+ char tmp[MAX_PATH];
+ if (GetWindowsDirectoryA(tmp, MAX_PATH))
+ logging_directories_list->push_back(tmp);
+ logging_directories_list->push_back(".\\");
+#else
+ logging_directories_list->push_back("./");
+#endif
+ }
+ }
+ return *logging_directories_list;
+}
+
+void TestOnly_ClearLoggingDirectoriesList() {
+ fprintf(stderr, "TestOnly_ClearLoggingDirectoriesList should only be "
+ "called from test code.\n");
+ delete logging_directories_list;
+ logging_directories_list = NULL;
+}
+
+void GetExistingTempDirectories(vector<string>* list) {
+ GetTempDirectories(list);
+ vector<string>::iterator i_dir = list->begin();
+ while( i_dir != list->end() ) {
+ // zero arg to access means test for existence; no constant
+ // defined on windows
+ if ( access(i_dir->c_str(), 0) ) {
+ i_dir = list->erase(i_dir);
+ } else {
+ ++i_dir;
+ }
+ }
+}
+
+void TruncateLogFile(const char *path, int64 limit, int64 keep) {
+#ifdef HAVE_UNISTD_H
+ struct stat statbuf;
+ const int kCopyBlockSize = 8 << 10;
+ char copybuf[kCopyBlockSize];
+ int64 read_offset, write_offset;
+ // Don't follow symlinks unless they're our own fd symlinks in /proc
+ int flags = O_RDWR;
+ const char *procfd_prefix = "/proc/self/fd/";
+ if (strncmp(procfd_prefix, path, strlen(procfd_prefix))) flags |= O_NOFOLLOW;
+
+ int fd = open(path, flags);
+ if (fd == -1) {
+ if (errno == EFBIG) {
+ // The log file in question has got too big for us to open. The
+ // real fix for this would be to compile logging.cc (or probably
+ // all of base/...) with -D_FILE_OFFSET_BITS=64 but that's
+ // rather scary.
+ // Instead just truncate the file to something we can manage
+ if (truncate(path, 0) == -1) {
+ PLOG(ERROR) << "Unable to truncate " << path;
+ } else {
+ LOG(ERROR) << "Truncated " << path << " due to EFBIG error";
+ }
+ } else {
+ PLOG(ERROR) << "Unable to open " << path;
+ }
+ return;
+ }
+
+ if (fstat(fd, &statbuf) == -1) {
+ PLOG(ERROR) << "Unable to fstat()";
+ goto out_close_fd;
+ }
+
+ // See if the path refers to a regular file bigger than the
+ // specified limit
+ if (!S_ISREG(statbuf.st_mode)) goto out_close_fd;
+ if (statbuf.st_size <= limit) goto out_close_fd;
+ if (statbuf.st_size <= keep) goto out_close_fd;
+
+ // This log file is too large - we need to truncate it
+ LOG(INFO) << "Truncating " << path << " to " << keep << " bytes";
+
+ // Copy the last "keep" bytes of the file to the beginning of the file
+ read_offset = statbuf.st_size - keep;
+ write_offset = 0;
+ int bytesin, bytesout;
+ while ((bytesin = pread(fd, copybuf, sizeof(copybuf), read_offset)) > 0) {
+ bytesout = pwrite(fd, copybuf, bytesin, write_offset);
+ if (bytesout == -1) {
+ PLOG(ERROR) << "Unable to write to " << path;
+ break;
+ } else if (bytesout != bytesin) {
+ LOG(ERROR) << "Expected to write " << bytesin << ", wrote " << bytesout;
+ }
+ read_offset += bytesin;
+ write_offset += bytesout;
+ }
+ if (bytesin == -1) PLOG(ERROR) << "Unable to read from " << path;
+
+ // Truncate the remainder of the file. If someone else writes to the
+ // end of the file after our last read() above, we lose their latest
+ // data. Too bad ...
+ if (ftruncate(fd, write_offset) == -1) {
+ PLOG(ERROR) << "Unable to truncate " << path;
+ }
+
+ out_close_fd:
+ close(fd);
+#else
+ LOG(ERROR) << "No log truncation support.";
+#endif
+}
+
+void TruncateStdoutStderr() {
+#ifdef HAVE_UNISTD_H
+ int64 limit = MaxLogSize() << 20;
+ int64 keep = 1 << 20;
+ TruncateLogFile("/proc/self/fd/1", limit, keep);
+ TruncateLogFile("/proc/self/fd/2", limit, keep);
+#else
+ LOG(ERROR) << "No log truncation support.";
+#endif
+}
+
+
+// Helper functions for string comparisons.
+#define DEFINE_CHECK_STROP_IMPL(name, func, expected) \
+ string* Check##func##expected##Impl(const char* s1, const char* s2, \
+ const char* names) { \
+ bool equal = s1 == s2 || (s1 && s2 && !func(s1, s2)); \
+ if (equal == expected) return NULL; \
+ else { \
+ strstream ss; \
+ if (!s1) s1 = ""; \
+ if (!s2) s2 = ""; \
+ ss << #name " failed: " << names << " (" << s1 << " vs. " << s2 << ")"; \
+ return new string(ss.str(), ss.pcount()); \
+ } \
+ }
+DEFINE_CHECK_STROP_IMPL(CHECK_STREQ, strcmp, true)
+DEFINE_CHECK_STROP_IMPL(CHECK_STRNE, strcmp, false)
+DEFINE_CHECK_STROP_IMPL(CHECK_STRCASEEQ, strcasecmp, true)
+DEFINE_CHECK_STROP_IMPL(CHECK_STRCASENE, strcasecmp, false)
+#undef DEFINE_CHECK_STROP_IMPL
+
+int posix_strerror_r(int err, char *buf, size_t len) {
+ // Sanity check input parameters
+ if (buf == NULL || len <= 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ // Reset buf and errno, and try calling whatever version of strerror_r()
+ // is implemented by glibc
+ buf[0] = '\000';
+ int old_errno = errno;
+ errno = 0;
+ char *rc = reinterpret_cast<char *>(strerror_r(err, buf, len));
+
+ // Both versions set errno on failure
+ if (errno) {
+ // Should already be there, but better safe than sorry
+ buf[0] = '\000';
+ return -1;
+ }
+ errno = old_errno;
+
+ // POSIX is vague about whether the string will be terminated, although
+ // is indirectly implies that typically ERANGE will be returned, instead
+ // of truncating the string. This is different from the GNU implementation.
+ // We play it safe by always terminating the string explicitly.
+ buf[len-1] = '\000';
+
+ // If the function succeeded, we can use its exit code to determine the
+ // semantics implemented by glibc
+ if (!rc) {
+ return 0;
+ } else {
+ // GNU semantics detected
+ if (rc == buf) {
+ return 0;
+ } else {
+ buf[0] = '\000';
+#if defined(OS_MACOSX) || defined(OS_FREEBSD) || defined(OS_OPENBSD)
+ if (reinterpret_cast<intptr_t>(rc) < sys_nerr) {
+ // This means an error on MacOSX or FreeBSD.
+ return -1;
+ }
+#endif
+ strncat(buf, rc, len-1);
+ return 0;
+ }
+ }
+}
+
+LogMessageFatal::LogMessageFatal(const char* file, int line) :
+ LogMessage(file, line, FATAL) {}
+
+LogMessageFatal::LogMessageFatal(const char* file, int line,
+ const CheckOpString& result) :
+ LogMessage(file, line, result) {}
+
+LogMessageFatal::~LogMessageFatal() {
+ Flush();
+ LogMessage::Fail();
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/glog/src/raw_logging.cc b/extern/libmv/third_party/glog/src/raw_logging.cc
new file mode 100644
index 00000000000..b179a1eded4
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/raw_logging.cc
@@ -0,0 +1,172 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Maxim Lifantsev
+//
+// logging_unittest.cc covers the functionality herein
+
+#include "utilities.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <errno.h>
+#ifdef HAVE_UNISTD_H
+# include <unistd.h> // for close() and write()
+#endif
+#include <fcntl.h> // for open()
+#include <time.h>
+#include "config.h"
+#include <glog/logging.h> // To pick up flag settings etc.
+#include <glog/raw_logging.h>
+#include "base/commandlineflags.h"
+
+#ifdef HAVE_STACKTRACE
+# include "stacktrace.h"
+#endif
+
+#if defined(HAVE_SYSCALL_H)
+#include <syscall.h> // for syscall()
+#elif defined(HAVE_SYS_SYSCALL_H)
+#include <sys/syscall.h> // for syscall()
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+
+#if defined(HAVE_SYSCALL_H) || defined(HAVE_SYS_SYSCALL_H)
+# define safe_write(fd, s, len) syscall(SYS_write, fd, s, len)
+#else
+ // Not so safe, but what can you do?
+# define safe_write(fd, s, len) write(fd, s, len)
+#endif
+
+_START_GOOGLE_NAMESPACE_
+
+// Data for RawLog__ below. We simply pick up the latest
+// time data created by a normal log message to avoid calling
+// localtime_r which can allocate memory.
+static struct ::tm last_tm_time_for_raw_log;
+static int last_usecs_for_raw_log;
+
+void RawLog__SetLastTime(const struct ::tm& t, int usecs) {
+ memcpy(&last_tm_time_for_raw_log, &t, sizeof(last_tm_time_for_raw_log));
+ last_usecs_for_raw_log = usecs;
+}
+
+// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
+// that invoke malloc() and getenv() that might acquire some locks.
+// If this becomes a problem we should reimplement a subset of vsnprintf
+// that does not need locks and malloc.
+
+// Helper for RawLog__ below.
+// *DoRawLog writes to *buf of *size and move them past the written portion.
+// It returns true iff there was no overflow or error.
+static bool DoRawLog(char** buf, int* size, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ int n = vsnprintf(*buf, *size, format, ap);
+ va_end(ap);
+ if (n < 0 || n > *size) return false;
+ *size -= n;
+ *buf += n;
+ return true;
+}
+
+// Helper for RawLog__ below.
+inline static bool VADoRawLog(char** buf, int* size,
+ const char* format, va_list ap) {
+ int n = vsnprintf(*buf, *size, format, ap);
+ if (n < 0 || n > *size) return false;
+ *size -= n;
+ *buf += n;
+ return true;
+}
+
+static const int kLogBufSize = 3000;
+static bool crashed = false;
+static CrashReason crash_reason;
+static char crash_buf[kLogBufSize + 1] = { 0 }; // Will end in '\0'
+
+void RawLog__(LogSeverity severity, const char* file, int line,
+ const char* format, ...) {
+ if (!(FLAGS_logtostderr || severity >= FLAGS_stderrthreshold ||
+ FLAGS_alsologtostderr || !IsGoogleLoggingInitialized())) {
+ return; // this stderr log message is suppressed
+ }
+ // can't call localtime_r here: it can allocate
+ struct ::tm& t = last_tm_time_for_raw_log;
+ char buffer[kLogBufSize];
+ char* buf = buffer;
+ int size = sizeof(buffer);
+
+ // NOTE: this format should match the specification in base/logging.h
+ DoRawLog(&buf, &size, "%c%02d%02d %02d:%02d:%02d.%06d %5u %s:%d] RAW: ",
+ LogSeverityNames[severity][0],
+ 1 + t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec,
+ last_usecs_for_raw_log,
+ static_cast<unsigned int>(GetTID()),
+ const_basename(const_cast<char *>(file)), line);
+
+ // Record the position and size of the buffer after the prefix
+ const char* msg_start = buf;
+ const int msg_size = size;
+
+ va_list ap;
+ va_start(ap, format);
+ bool no_chop = VADoRawLog(&buf, &size, format, ap);
+ va_end(ap);
+ if (no_chop) {
+ DoRawLog(&buf, &size, "\n");
+ } else {
+ DoRawLog(&buf, &size, "RAW_LOG ERROR: The Message was too long!\n");
+ }
+ // We make a raw syscall to write directly to the stderr file descriptor,
+ // avoiding FILE buffering (to avoid invoking malloc()), and bypassing
+ // libc (to side-step any libc interception).
+ // We write just once to avoid races with other invocations of RawLog__.
+ safe_write(STDERR_FILENO, buffer, strlen(buffer));
+ if (severity == FATAL) {
+ if (!sync_val_compare_and_swap(&crashed, false, true)) {
+ crash_reason.filename = file;
+ crash_reason.line_number = line;
+ memcpy(crash_buf, msg_start, msg_size); // Don't include prefix
+ crash_reason.message = crash_buf;
+#ifdef HAVE_STACKTRACE
+ crash_reason.depth =
+ GetStackTrace(crash_reason.stack, ARRAYSIZE(crash_reason.stack), 1);
+#else
+ crash_reason.depth = 0;
+#endif
+ SetCrashReason(&crash_reason);
+ }
+ LogMessage::Fail(); // abort()
+ }
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/glog/src/signalhandler.cc b/extern/libmv/third_party/glog/src/signalhandler.cc
new file mode 100644
index 00000000000..9fc91b3390d
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/signalhandler.cc
@@ -0,0 +1,348 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Satoru Takabayashi
+//
+// Implementation of InstallFailureSignalHandler().
+
+#include "utilities.h"
+#include "stacktrace.h"
+#include "symbolize.h"
+#include "glog/logging.h"
+
+#include <signal.h>
+#include <time.h>
+#ifdef HAVE_UCONTEXT_H
+# include <ucontext.h>
+#endif
+#ifdef HAVE_SYS_UCONTEXT_H
+# include <sys/ucontext.h>
+#endif
+#include <algorithm>
+
+_START_GOOGLE_NAMESPACE_
+
+namespace {
+
+// We'll install the failure signal handler for these signals. We could
+// use strsignal() to get signal names, but we don't use it to avoid
+// introducing yet another #ifdef complication.
+//
+// The list should be synced with the comment in signalhandler.h.
+const struct {
+ int number;
+ const char *name;
+} kFailureSignals[] = {
+ { SIGSEGV, "SIGSEGV" },
+ { SIGILL, "SIGILL" },
+ { SIGFPE, "SIGFPE" },
+ { SIGABRT, "SIGABRT" },
+ { SIGBUS, "SIGBUS" },
+ { SIGTERM, "SIGTERM" },
+};
+
+// Returns the program counter from signal context, NULL if unknown.
+void* GetPC(void* ucontext_in_void) {
+#if (defined(HAVE_UCONTEXT_H) || defined(HAVE_SYS_UCONTEXT_H)) && defined(PC_FROM_UCONTEXT)
+ if (ucontext_in_void != NULL) {
+ ucontext_t *context = reinterpret_cast<ucontext_t *>(ucontext_in_void);
+ return (void*)context->PC_FROM_UCONTEXT;
+ }
+#endif
+ return NULL;
+}
+
+// The class is used for formatting error messages. We don't use printf()
+// as it's not async signal safe.
+class MinimalFormatter {
+ public:
+ MinimalFormatter(char *buffer, int size)
+ : buffer_(buffer),
+ cursor_(buffer),
+ end_(buffer + size) {
+ }
+
+ // Returns the number of bytes written in the buffer.
+ int num_bytes_written() const { return cursor_ - buffer_; }
+
+ // Appends string from "str" and updates the internal cursor.
+ void AppendString(const char* str) {
+ int i = 0;
+ while (str[i] != '\0' && cursor_ + i < end_) {
+ cursor_[i] = str[i];
+ ++i;
+ }
+ cursor_ += i;
+ }
+
+ // Formats "number" in "radix" and updates the internal cursor.
+ // Lowercase letters are used for 'a' - 'z'.
+ void AppendUint64(uint64 number, int radix) {
+ int i = 0;
+ while (cursor_ + i < end_) {
+ const int tmp = number % radix;
+ number /= radix;
+ cursor_[i] = (tmp < 10 ? '0' + tmp : 'a' + tmp - 10);
+ ++i;
+ if (number == 0) {
+ break;
+ }
+ }
+ // Reverse the bytes written.
+ std::reverse(cursor_, cursor_ + i);
+ cursor_ += i;
+ }
+
+ // Formats "number" as hexadecimal number, and updates the internal
+ // cursor. Padding will be added in front if needed.
+ void AppendHexWithPadding(uint64 number, int width) {
+ char* start = cursor_;
+ AppendString("0x");
+ AppendUint64(number, 16);
+ // Move to right and add padding in front if needed.
+ if (cursor_ < start + width) {
+ const int64 delta = start + width - cursor_;
+ std::copy(start, cursor_, start + delta);
+ std::fill(start, start + delta, ' ');
+ cursor_ = start + width;
+ }
+ }
+
+ private:
+ char *buffer_;
+ char *cursor_;
+ const char * const end_;
+};
+
+// Writes the given data with the size to the standard error.
+void WriteToStderr(const char* data, int size) {
+ write(STDERR_FILENO, data, size);
+}
+
+// The writer function can be changed by InstallFailureWriter().
+void (*g_failure_writer)(const char* data, int size) = WriteToStderr;
+
+// Dumps time information. We don't dump human-readable time information
+// as localtime() is not guaranteed to be async signal safe.
+void DumpTimeInfo() {
+ time_t time_in_sec = time(NULL);
+ char buf[256]; // Big enough for time info.
+ MinimalFormatter formatter(buf, sizeof(buf));
+ formatter.AppendString("*** Aborted at ");
+ formatter.AppendUint64(time_in_sec, 10);
+ formatter.AppendString(" (unix time)");
+ formatter.AppendString(" try \"date -d @");
+ formatter.AppendUint64(time_in_sec, 10);
+ formatter.AppendString("\" if you are using GNU date ***\n");
+ g_failure_writer(buf, formatter.num_bytes_written());
+}
+
+// Dumps information about the signal to STDERR.
+void DumpSignalInfo(int signal_number, siginfo_t *siginfo) {
+ // Get the signal name.
+ const char* signal_name = NULL;
+ for (int i = 0; i < ARRAYSIZE(kFailureSignals); ++i) {
+ if (signal_number == kFailureSignals[i].number) {
+ signal_name = kFailureSignals[i].name;
+ }
+ }
+
+ char buf[256]; // Big enough for signal info.
+ MinimalFormatter formatter(buf, sizeof(buf));
+
+ formatter.AppendString("*** ");
+ if (signal_name) {
+ formatter.AppendString(signal_name);
+ } else {
+ // Use the signal number if the name is unknown. The signal name
+ // should be known, but just in case.
+ formatter.AppendString("Signal ");
+ formatter.AppendUint64(signal_number, 10);
+ }
+ formatter.AppendString(" (@0x");
+ formatter.AppendUint64(reinterpret_cast<uintptr_t>(siginfo->si_addr), 16);
+ formatter.AppendString(")");
+ formatter.AppendString(" received by PID ");
+ formatter.AppendUint64(getpid(), 10);
+ formatter.AppendString(" (TID 0x");
+ // We assume pthread_t is an integral number or a pointer, rather
+ // than a complex struct. In some environments, pthread_self()
+ // returns an uint64 but in some other environments pthread_self()
+ // returns a pointer. Hence we use C-style cast here, rather than
+ // reinterpret/static_cast, to support both types of environments.
+ formatter.AppendUint64((uintptr_t)pthread_self(), 16);
+ formatter.AppendString(") ");
+ // Only linux has the PID of the signal sender in si_pid.
+#ifdef OS_LINUX
+ formatter.AppendString("from PID ");
+ formatter.AppendUint64(siginfo->si_pid, 10);
+ formatter.AppendString("; ");
+#endif
+ formatter.AppendString("stack trace: ***\n");
+ g_failure_writer(buf, formatter.num_bytes_written());
+}
+
+// Dumps information about the stack frame to STDERR.
+void DumpStackFrameInfo(const char* prefix, void* pc) {
+ // Get the symbol name.
+ const char *symbol = "(unknown)";
+ char symbolized[1024]; // Big enough for a sane symbol.
+ // Symbolizes the previous address of pc because pc may be in the
+ // next function.
+ if (Symbolize(reinterpret_cast<char *>(pc) - 1,
+ symbolized, sizeof(symbolized))) {
+ symbol = symbolized;
+ }
+
+ char buf[1024]; // Big enough for stack frame info.
+ MinimalFormatter formatter(buf, sizeof(buf));
+
+ formatter.AppendString(prefix);
+ formatter.AppendString("@ ");
+ const int width = 2 * sizeof(void*) + 2; // + 2 for "0x".
+ formatter.AppendHexWithPadding(reinterpret_cast<uintptr_t>(pc), width);
+ formatter.AppendString(" ");
+ formatter.AppendString(symbol);
+ formatter.AppendString("\n");
+ g_failure_writer(buf, formatter.num_bytes_written());
+}
+
+// Invoke the default signal handler.
+void InvokeDefaultSignalHandler(int signal_number) {
+ struct sigaction sig_action;
+ memset(&sig_action, 0, sizeof(sig_action));
+ sigemptyset(&sig_action.sa_mask);
+ sig_action.sa_handler = SIG_DFL;
+ sigaction(signal_number, &sig_action, NULL);
+ kill(getpid(), signal_number);
+}
+
+// This variable is used for protecting FailureSignalHandler() from
+// dumping stuff while another thread is doing it. Our policy is to let
+// the first thread dump stuff and let other threads wait.
+// See also comments in FailureSignalHandler().
+static pthread_t* g_entered_thread_id_pointer = NULL;
+
+// Dumps signal and stack frame information, and invokes the default
+// signal handler once our job is done.
+void FailureSignalHandler(int signal_number,
+ siginfo_t *signal_info,
+ void *ucontext) {
+ // First check if we've already entered the function. We use an atomic
+ // compare and swap operation for platforms that support it. For other
+ // platforms, we use a naive method that could lead to a subtle race.
+
+ // We assume pthread_self() is async signal safe, though it's not
+ // officially guaranteed.
+ pthread_t my_thread_id = pthread_self();
+ // NOTE: We could simply use pthread_t rather than pthread_t* for this,
+ // if pthread_self() is guaranteed to return non-zero value for thread
+ // ids, but there is no such guarantee. We need to distinguish if the
+ // old value (value returned from __sync_val_compare_and_swap) is
+ // different from the original value (in this case NULL).
+ pthread_t* old_thread_id_pointer =
+ glog_internal_namespace_::sync_val_compare_and_swap(
+ &g_entered_thread_id_pointer,
+ static_cast<pthread_t*>(NULL),
+ &my_thread_id);
+ if (old_thread_id_pointer != NULL) {
+ // We've already entered the signal handler. What should we do?
+ if (pthread_equal(my_thread_id, *g_entered_thread_id_pointer)) {
+ // It looks the current thread is reentering the signal handler.
+ // Something must be going wrong (maybe we are reentering by another
+ // type of signal?). Kill ourself by the default signal handler.
+ InvokeDefaultSignalHandler(signal_number);
+ }
+ // Another thread is dumping stuff. Let's wait until that thread
+ // finishes the job and kills the process.
+ while (true) {
+ sleep(1);
+ }
+ }
+ // This is the first time we enter the signal handler. We are going to
+ // do some interesting stuff from here.
+ // TODO(satorux): We might want to set timeout here using alarm(), but
+ // mixing alarm() and sleep() can be a bad idea.
+
+ // First dump time info.
+ DumpTimeInfo();
+
+ // Get the program counter from ucontext.
+ void *pc = GetPC(ucontext);
+ DumpStackFrameInfo("PC: ", pc);
+
+#ifdef HAVE_STACKTRACE
+ // Get the stack traces.
+ void *stack[32];
+ // +1 to exclude this function.
+ const int depth = GetStackTrace(stack, ARRAYSIZE(stack), 1);
+ DumpSignalInfo(signal_number, signal_info);
+ // Dump the stack traces.
+ for (int i = 0; i < depth; ++i) {
+ DumpStackFrameInfo(" ", stack[i]);
+ }
+#endif
+
+ // *** TRANSITION ***
+ //
+ // BEFORE this point, all code must be async-termination-safe!
+ // (See WARNING above.)
+ //
+ // AFTER this point, we do unsafe things, like using LOG()!
+ // The process could be terminated or hung at any time. We try to
+ // do more useful things first and riskier things later.
+
+ // Flush the logs before we do anything in case 'anything'
+ // causes problems.
+ FlushLogFilesUnsafe(0);
+
+ // Kill ourself by the default signal handler.
+ InvokeDefaultSignalHandler(signal_number);
+}
+
+} // namespace
+
+void InstallFailureSignalHandler() {
+ // Build the sigaction struct.
+ struct sigaction sig_action;
+ memset(&sig_action, 0, sizeof(sig_action));
+ sigemptyset(&sig_action.sa_mask);
+ sig_action.sa_flags |= SA_SIGINFO;
+ sig_action.sa_sigaction = &FailureSignalHandler;
+
+ for (int i = 0; i < ARRAYSIZE(kFailureSignals); ++i) {
+ CHECK_ERR(sigaction(kFailureSignals[i].number, &sig_action, NULL));
+ }
+}
+
+void InstallFailureWriter(void (*writer)(const char* data, int size)) {
+ g_failure_writer = writer;
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/glog/src/stacktrace.h b/extern/libmv/third_party/glog/src/stacktrace.h
new file mode 100644
index 00000000000..8c3e8fe8f8d
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/stacktrace.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2000 - 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Routines to extract the current stack trace. These functions are
+// thread-safe.
+
+#ifndef BASE_STACKTRACE_H_
+#define BASE_STACKTRACE_H_
+
+#include "config.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// This is similar to the GetStackFrames routine, except that it returns
+// the stack trace only, and not the stack frame sizes as well.
+// Example:
+// main() { foo(); }
+// foo() { bar(); }
+// bar() {
+// void* result[10];
+// int depth = GetStackFrames(result, 10, 1);
+// }
+//
+// This produces:
+// result[0] foo
+// result[1] main
+// .... ...
+//
+// "result" must not be NULL.
+extern int GetStackTrace(void** result, int max_depth, int skip_count);
+
+_END_GOOGLE_NAMESPACE_
+
+#endif // BASE_STACKTRACE_H_
diff --git a/extern/libmv/third_party/glog/src/stacktrace_generic-inl.h b/extern/libmv/third_party/glog/src/stacktrace_generic-inl.h
new file mode 100644
index 00000000000..fad81d3e3f4
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/stacktrace_generic-inl.h
@@ -0,0 +1,59 @@
+// Copyright (c) 2000 - 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Portable implementation - just use glibc
+//
+// Note: The glibc implementation may cause a call to malloc.
+// This can cause a deadlock in HeapProfiler.
+#include <execinfo.h>
+#include <string.h>
+#include "stacktrace.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// If you change this function, also change GetStackFrames below.
+int GetStackTrace(void** result, int max_depth, int skip_count) {
+ static const int kStackLength = 64;
+ void * stack[kStackLength];
+ int size;
+
+ size = backtrace(stack, kStackLength);
+ skip_count++; // we want to skip the current frame as well
+ int result_count = size - skip_count;
+ if (result_count < 0)
+ result_count = 0;
+ if (result_count > max_depth)
+ result_count = max_depth;
+ for (int i = 0; i < result_count; i++)
+ result[i] = stack[i + skip_count];
+
+ return result_count;
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/glog/src/stacktrace_libunwind-inl.h b/extern/libmv/third_party/glog/src/stacktrace_libunwind-inl.h
new file mode 100644
index 00000000000..0dc14c6506e
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/stacktrace_libunwind-inl.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2005 - 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Arun Sharma
+//
+// Produce stack trace using libunwind
+
+#include "utilities.h"
+
+extern "C" {
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+}
+#include "glog/raw_logging.h"
+#include "stacktrace.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// Sometimes, we can try to get a stack trace from within a stack
+// trace, because libunwind can call mmap (maybe indirectly via an
+// internal mmap based memory allocator), and that mmap gets trapped
+// and causes a stack-trace request. If were to try to honor that
+// recursive request, we'd end up with infinite recursion or deadlock.
+// Luckily, it's safe to ignore those subsequent traces. In such
+// cases, we return 0 to indicate the situation.
+static bool g_now_entering = false;
+
+// If you change this function, also change GetStackFrames below.
+int GetStackTrace(void** result, int max_depth, int skip_count) {
+ void *ip;
+ int n = 0;
+ unw_cursor_t cursor;
+ unw_context_t uc;
+
+ if (sync_val_compare_and_swap(&g_now_entering, false, true)) {
+ return 0;
+ }
+
+ unw_getcontext(&uc);
+ RAW_CHECK(unw_init_local(&cursor, &uc) >= 0, "unw_init_local failed");
+ skip_count++; // Do not include the "GetStackTrace" frame
+
+ while (n < max_depth) {
+ int ret = unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *) &ip);
+ if (ret < 0)
+ break;
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n++] = ip;
+ }
+ ret = unw_step(&cursor);
+ if (ret <= 0)
+ break;
+ }
+
+ g_now_entering = false;
+ return n;
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/glog/src/stacktrace_powerpc-inl.h b/extern/libmv/third_party/glog/src/stacktrace_powerpc-inl.h
new file mode 100644
index 00000000000..1090ddedbc7
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/stacktrace_powerpc-inl.h
@@ -0,0 +1,130 @@
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Craig Silverstein
+//
+// Produce stack trace. I'm guessing (hoping!) the code is much like
+// for x86. For apple machines, at least, it seems to be; see
+// http://developer.apple.com/documentation/mac/runtimehtml/RTArch-59.html
+// http://www.linux-foundation.org/spec/ELF/ppc64/PPC-elf64abi-1.9.html#STACK
+// Linux has similar code: http://patchwork.ozlabs.org/linuxppc/patch?id=8882
+
+#include <stdio.h>
+#include <stdint.h> // for uintptr_t
+#include "stacktrace.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return NULL if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING>
+static void **NextStackFrame(void **old_sp) {
+ void **new_sp = (void **) *old_sp;
+
+ // Check that the transition from frame pointer old_sp to frame
+ // pointer new_sp isn't clearly bogus
+ if (STRICT_UNWINDING) {
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (new_sp <= old_sp) return NULL;
+ // Assume stack frames larger than 100,000 bytes are bogus.
+ if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return NULL;
+ } else {
+ // In the non-strict mode, allow discontiguous stack frames.
+ // (alternate-signal-stacks for example).
+ if (new_sp == old_sp) return NULL;
+ // And allow frames upto about 1MB.
+ if ((new_sp > old_sp)
+ && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return NULL;
+ }
+ if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return NULL;
+ return new_sp;
+}
+
+// This ensures that GetStackTrace stes up the Link Register properly.
+void StacktracePowerPCDummyFunction() __attribute__((noinline));
+void StacktracePowerPCDummyFunction() { __asm__ volatile(""); }
+
+// If you change this function, also change GetStackFrames below.
+int GetStackTrace(void** result, int max_depth, int skip_count) {
+ void **sp;
+ // Apple OS X uses an old version of gnu as -- both Darwin 7.9.0 (Panther)
+ // and Darwin 8.8.1 (Tiger) use as 1.38. This means we have to use a
+ // different asm syntax. I don't know quite the best way to discriminate
+ // systems using the old as from the new one; I've gone with __APPLE__.
+#ifdef __APPLE__
+ __asm__ volatile ("mr %0,r1" : "=r" (sp));
+#else
+ __asm__ volatile ("mr %0,1" : "=r" (sp));
+#endif
+
+ // On PowerPC, the "Link Register" or "Link Record" (LR), is a stack
+ // entry that holds the return address of the subroutine call (what
+ // instruction we run after our function finishes). This is the
+ // same as the stack-pointer of our parent routine, which is what we
+ // want here. While the compiler will always(?) set up LR for
+ // subroutine calls, it may not for leaf functions (such as this one).
+ // This routine forces the compiler (at least gcc) to push it anyway.
+ StacktracePowerPCDummyFunction();
+
+ // The LR save area is used by the callee, so the top entry is bogus.
+ skip_count++;
+
+ int n = 0;
+ while (sp && n < max_depth) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ // PowerPC has 3 main ABIs, which say where in the stack the
+ // Link Register is. For DARWIN and AIX (used by apple and
+ // linux ppc64), it's in sp[2]. For SYSV (used by linux ppc),
+ // it's in sp[1].
+#if defined(_CALL_AIX) || defined(_CALL_DARWIN)
+ result[n++] = *(sp+2);
+#elif defined(_CALL_SYSV)
+ result[n++] = *(sp+1);
+#elif defined(__APPLE__) || (defined(__linux) && defined(__PPC64__))
+ // This check is in case the compiler doesn't define _CALL_AIX/etc.
+ result[n++] = *(sp+2);
+#elif defined(__linux)
+ // This check is in case the compiler doesn't define _CALL_SYSV.
+ result[n++] = *(sp+1);
+#else
+#error Need to specify the PPC ABI for your archiecture.
+#endif
+ }
+ // Use strict unwinding rules.
+ sp = NextStackFrame<true>(sp);
+ }
+ return n;
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/glog/src/stacktrace_x86-inl.h b/extern/libmv/third_party/glog/src/stacktrace_x86-inl.h
new file mode 100644
index 00000000000..cfd31f783e3
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/stacktrace_x86-inl.h
@@ -0,0 +1,139 @@
+// Copyright (c) 2000 - 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Produce stack trace
+
+#include <stdint.h> // for uintptr_t
+
+#include "utilities.h" // for OS_* macros
+
+#if !defined(OS_WINDOWS)
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+
+#include <stdio.h> // for NULL
+#include "stacktrace.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// Given a pointer to a stack frame, locate and return the calling
+// stackframe, or return NULL if no stackframe can be found. Perform sanity
+// checks (the strictness of which is controlled by the boolean parameter
+// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
+template<bool STRICT_UNWINDING>
+static void **NextStackFrame(void **old_sp) {
+ void **new_sp = (void **) *old_sp;
+
+ // Check that the transition from frame pointer old_sp to frame
+ // pointer new_sp isn't clearly bogus
+ if (STRICT_UNWINDING) {
+ // With the stack growing downwards, older stack frame must be
+ // at a greater address that the current one.
+ if (new_sp <= old_sp) return NULL;
+ // Assume stack frames larger than 100,000 bytes are bogus.
+ if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return NULL;
+ } else {
+ // In the non-strict mode, allow discontiguous stack frames.
+ // (alternate-signal-stacks for example).
+ if (new_sp == old_sp) return NULL;
+ // And allow frames upto about 1MB.
+ if ((new_sp > old_sp)
+ && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return NULL;
+ }
+ if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return NULL;
+#ifdef __i386__
+ // On 64-bit machines, the stack pointer can be very close to
+ // 0xffffffff, so we explicitly check for a pointer into the
+ // last two pages in the address space
+ if ((uintptr_t)new_sp >= 0xffffe000) return NULL;
+#endif
+#if !defined(OS_WINDOWS)
+ if (!STRICT_UNWINDING) {
+ // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test
+ // on AMD-based machines with VDSO-enabled kernels.
+ // Make an extra sanity check to insure new_sp is readable.
+ // Note: NextStackFrame<false>() is only called while the program
+ // is already on its last leg, so it's ok to be slow here.
+ static int page_size = getpagesize();
+ void *new_sp_aligned = (void *)((uintptr_t)new_sp & ~(page_size - 1));
+ if (msync(new_sp_aligned, page_size, MS_ASYNC) == -1)
+ return NULL;
+ }
+#endif
+ return new_sp;
+}
+
+// If you change this function, also change GetStackFrames below.
+int GetStackTrace(void** result, int max_depth, int skip_count) {
+ void **sp;
+#ifdef __i386__
+ // Stack frame format:
+ // sp[0] pointer to previous frame
+ // sp[1] caller address
+ // sp[2] first argument
+ // ...
+ sp = (void **)&result - 2;
+#endif
+
+#ifdef __x86_64__
+ // __builtin_frame_address(0) can return the wrong address on gcc-4.1.0-k8
+ unsigned long rbp;
+ // Move the value of the register %rbp into the local variable rbp.
+ // We need 'volatile' to prevent this instruction from getting moved
+ // around during optimization to before function prologue is done.
+ // An alternative way to achieve this
+ // would be (before this __asm__ instruction) to call Noop() defined as
+ // static void Noop() __attribute__ ((noinline)); // prevent inlining
+ // static void Noop() { asm(""); } // prevent optimizing-away
+ __asm__ volatile ("mov %%rbp, %0" : "=r" (rbp));
+ // Arguments are passed in registers on x86-64, so we can't just
+ // offset from &result
+ sp = (void **) rbp;
+#endif
+
+ int n = 0;
+ while (sp && n < max_depth) {
+ if (*(sp+1) == (void *)0) {
+ // In 64-bit code, we often see a frame that
+ // points to itself and has a return address of 0.
+ break;
+ }
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ result[n++] = *(sp+1);
+ }
+ // Use strict unwinding rules.
+ sp = NextStackFrame<true>(sp);
+ }
+ return n;
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/glog/src/stacktrace_x86_64-inl.h b/extern/libmv/third_party/glog/src/stacktrace_x86_64-inl.h
new file mode 100644
index 00000000000..f7d1dca85bc
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/stacktrace_x86_64-inl.h
@@ -0,0 +1,105 @@
+// Copyright (c) 2005 - 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Arun Sharma
+//
+// Produce stack trace using libgcc
+
+extern "C" {
+#include <stdlib.h> // for NULL
+#include <unwind.h> // ABI defined unwinder
+}
+#include "stacktrace.h"
+
+_START_GOOGLE_NAMESPACE_
+
+typedef struct {
+ void **result;
+ int max_depth;
+ int skip_count;
+ int count;
+} trace_arg_t;
+
+
+// Workaround for the malloc() in _Unwind_Backtrace() issue.
+static _Unwind_Reason_Code nop_backtrace(struct _Unwind_Context *uc, void *opq) {
+ return _URC_NO_REASON;
+}
+
+
+// This code is not considered ready to run until
+// static initializers run so that we are guaranteed
+// that any malloc-related initialization is done.
+static bool ready_to_run = false;
+class StackTraceInit {
+ public:
+ StackTraceInit() {
+ // Extra call to force initialization
+ _Unwind_Backtrace(nop_backtrace, NULL);
+ ready_to_run = true;
+ }
+};
+
+static StackTraceInit module_initializer; // Force initialization
+
+static _Unwind_Reason_Code GetOneFrame(struct _Unwind_Context *uc, void *opq) {
+ trace_arg_t *targ = (trace_arg_t *) opq;
+
+ if (targ->skip_count > 0) {
+ targ->skip_count--;
+ } else {
+ targ->result[targ->count++] = (void *) _Unwind_GetIP(uc);
+ }
+
+ if (targ->count == targ->max_depth)
+ return _URC_END_OF_STACK;
+
+ return _URC_NO_REASON;
+}
+
+// If you change this function, also change GetStackFrames below.
+int GetStackTrace(void** result, int max_depth, int skip_count) {
+ if (!ready_to_run)
+ return 0;
+
+ trace_arg_t targ;
+
+ skip_count += 1; // Do not include the "GetStackTrace" frame
+
+ targ.result = result;
+ targ.max_depth = max_depth;
+ targ.skip_count = skip_count;
+ targ.count = 0;
+
+ _Unwind_Backtrace(GetOneFrame, &targ);
+
+ return targ.count;
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/glog/src/symbolize.cc b/extern/libmv/third_party/glog/src/symbolize.cc
new file mode 100644
index 00000000000..3465de6c6fe
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/symbolize.cc
@@ -0,0 +1,681 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Satoru Takabayashi
+// Stack-footprint reduction work done by Raksit Ashok
+//
+// Implementation note:
+//
+// We don't use heaps but only use stacks. We want to reduce the
+// stack consumption so that the symbolizer can run on small stacks.
+//
+// Here are some numbers collected with GCC 4.1.0 on x86:
+// - sizeof(Elf32_Sym) = 16
+// - sizeof(Elf32_Shdr) = 40
+// - sizeof(Elf64_Sym) = 24
+// - sizeof(Elf64_Shdr) = 64
+//
+// This implementation is intended to be async-signal-safe but uses
+// some functions which are not guaranteed to be so, such as memchr()
+// and memmove(). We assume they are async-signal-safe.
+//
+
+#include "utilities.h"
+
+#if defined(HAVE_SYMBOLIZE)
+
+#include <limits>
+
+#include "symbolize.h"
+#include "demangle.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// We don't use assert() since it's not guaranteed to be
+// async-signal-safe. Instead we define a minimal assertion
+// macro. So far, we don't need pretty printing for __FILE__, etc.
+
+// A wrapper for abort() to make it callable in ? :.
+static int AssertFail() {
+ abort();
+ return 0; // Should not reach.
+}
+
+#define SAFE_ASSERT(expr) ((expr) ? 0 : AssertFail())
+
+static SymbolizeCallback g_symbolize_callback = NULL;
+void InstallSymbolizeCallback(SymbolizeCallback callback) {
+ g_symbolize_callback = callback;
+}
+
+// This function wraps the Demangle function to provide an interface
+// where the input symbol is demangled in-place.
+// To keep stack consumption low, we would like this function to not
+// get inlined.
+static ATTRIBUTE_NOINLINE void DemangleInplace(char *out, int out_size) {
+ char demangled[256]; // Big enough for sane demangled symbols.
+ if (Demangle(out, demangled, sizeof(demangled))) {
+ // Demangling succeeded. Copy to out if the space allows.
+ int len = strlen(demangled);
+ if (len + 1 <= out_size) { // +1 for '\0'.
+ SAFE_ASSERT(len < sizeof(demangled));
+ memmove(out, demangled, len + 1);
+ }
+ }
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#if defined(__ELF__)
+
+#include <dlfcn.h>
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <link.h> // For ElfW() macro.
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "symbolize.h"
+#include "config.h"
+#include "glog/raw_logging.h"
+
+// Re-runs fn until it doesn't cause EINTR.
+#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
+
+_START_GOOGLE_NAMESPACE_
+
+// Read up to "count" bytes from file descriptor "fd" into the buffer
+// starting at "buf" while handling short reads and EINTR. On
+// success, return the number of bytes read. Otherwise, return -1.
+static ssize_t ReadPersistent(const int fd, void *buf, const size_t count) {
+ SAFE_ASSERT(fd >= 0);
+ SAFE_ASSERT(count >= 0 && count <= std::numeric_limits<ssize_t>::max());
+ char *buf0 = reinterpret_cast<char *>(buf);
+ ssize_t num_bytes = 0;
+ while (num_bytes < count) {
+ ssize_t len;
+ NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes));
+ if (len < 0) { // There was an error other than EINTR.
+ return -1;
+ }
+ if (len == 0) { // Reached EOF.
+ break;
+ }
+ num_bytes += len;
+ }
+ SAFE_ASSERT(num_bytes <= count);
+ return num_bytes;
+}
+
+// Read up to "count" bytes from "offset" in the file pointed by file
+// descriptor "fd" into the buffer starting at "buf". On success,
+// return the number of bytes read. Otherwise, return -1.
+static ssize_t ReadFromOffset(const int fd, void *buf,
+ const size_t count, const off_t offset) {
+ off_t off = lseek(fd, offset, SEEK_SET);
+ if (off == (off_t)-1) {
+ return -1;
+ }
+ return ReadPersistent(fd, buf, count);
+}
+
+// Try reading exactly "count" bytes from "offset" bytes in a file
+// pointed by "fd" into the buffer starting at "buf" while handling
+// short reads and EINTR. On success, return true. Otherwise, return
+// false.
+static bool ReadFromOffsetExact(const int fd, void *buf,
+ const size_t count, const off_t offset) {
+ ssize_t len = ReadFromOffset(fd, buf, count, offset);
+ return len == count;
+}
+
+// Returns elf_header.e_type if the file pointed by fd is an ELF binary.
+static int FileGetElfType(const int fd) {
+ ElfW(Ehdr) elf_header;
+ if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+ return -1;
+ }
+ if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) {
+ return -1;
+ }
+ return elf_header.e_type;
+}
+
+// Read the section headers in the given ELF binary, and if a section
+// of the specified type is found, set the output to this section header
+// and return true. Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static ATTRIBUTE_NOINLINE bool
+GetSectionHeaderByType(const int fd, ElfW(Half) sh_num, const off_t sh_offset,
+ ElfW(Word) type, ElfW(Shdr) *out) {
+ // Read at most 16 section headers at a time to save read calls.
+ ElfW(Shdr) buf[16];
+ for (int i = 0; i < sh_num;) {
+ const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
+ const ssize_t num_bytes_to_read =
+ (sizeof(buf) > num_bytes_left) ? num_bytes_left : sizeof(buf);
+ const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read,
+ sh_offset + i * sizeof(buf[0]));
+ SAFE_ASSERT(len % sizeof(buf[0]) == 0);
+ const ssize_t num_headers_in_buf = len / sizeof(buf[0]);
+ SAFE_ASSERT(num_headers_in_buf <= sizeof(buf) / sizeof(buf[0]));
+ for (int j = 0; j < num_headers_in_buf; ++j) {
+ if (buf[j].sh_type == type) {
+ *out = buf[j];
+ return true;
+ }
+ }
+ i += num_headers_in_buf;
+ }
+ return false;
+}
+
+// There is no particular reason to limit section name to 63 characters,
+// but there has (as yet) been no need for anything longer either.
+const int kMaxSectionNameLen = 64;
+
+// name_len should include terminating '\0'.
+bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+ ElfW(Shdr) *out) {
+ ElfW(Ehdr) elf_header;
+ if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+ return false;
+ }
+
+ ElfW(Shdr) shstrtab;
+ off_t shstrtab_offset = (elf_header.e_shoff +
+ elf_header.e_shentsize * elf_header.e_shstrndx);
+ if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
+ return false;
+ }
+
+ for (int i = 0; i < elf_header.e_shnum; ++i) {
+ off_t section_header_offset = (elf_header.e_shoff +
+ elf_header.e_shentsize * i);
+ if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
+ return false;
+ }
+ char header_name[kMaxSectionNameLen];
+ if (sizeof(header_name) < name_len) {
+ RAW_LOG(WARNING, "Section name '%s' is too long (%"PRIuS"); "
+ "section will not be found (even if present).", name, name_len);
+ // No point in even trying.
+ return false;
+ }
+ off_t name_offset = shstrtab.sh_offset + out->sh_name;
+ ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
+ if (n_read == -1) {
+ return false;
+ } else if (n_read != name_len) {
+ // Short read -- name could be at end of file.
+ continue;
+ }
+ if (memcmp(header_name, name, name_len) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Read a symbol table and look for the symbol containing the
+// pc. Iterate over symbols in a symbol table and look for the symbol
+// containing "pc". On success, return true and write the symbol name
+// to out. Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static ATTRIBUTE_NOINLINE bool
+FindSymbol(uint64_t pc, const int fd, char *out, int out_size,
+ uint64_t symbol_offset, const ElfW(Shdr) *strtab,
+ const ElfW(Shdr) *symtab) {
+ if (symtab == NULL) {
+ return false;
+ }
+ const int num_symbols = symtab->sh_size / symtab->sh_entsize;
+ for (int i = 0; i < num_symbols;) {
+ off_t offset = symtab->sh_offset + i * symtab->sh_entsize;
+
+ // If we are reading Elf64_Sym's, we want to limit this array to
+ // 32 elements (to keep stack consumption low), otherwise we can
+ // have a 64 element Elf32_Sym array.
+#if __WORDSIZE == 64
+#define NUM_SYMBOLS 32
+#else
+#define NUM_SYMBOLS 64
+#endif
+
+ // Read at most NUM_SYMBOLS symbols at once to save read() calls.
+ ElfW(Sym) buf[NUM_SYMBOLS];
+ const ssize_t len = ReadFromOffset(fd, &buf, sizeof(buf), offset);
+ SAFE_ASSERT(len % sizeof(buf[0]) == 0);
+ const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
+ SAFE_ASSERT(num_symbols_in_buf <= sizeof(buf)/sizeof(buf[0]));
+ for (int j = 0; j < num_symbols_in_buf; ++j) {
+ const ElfW(Sym)& symbol = buf[j];
+ uint64_t start_address = symbol.st_value;
+ start_address += symbol_offset;
+ uint64_t end_address = start_address + symbol.st_size;
+ if (symbol.st_value != 0 && // Skip null value symbols.
+ symbol.st_shndx != 0 && // Skip undefined symbols.
+ start_address <= pc && pc < end_address) {
+ ssize_t len1 = ReadFromOffset(fd, out, out_size,
+ strtab->sh_offset + symbol.st_name);
+ if (len1 <= 0 || memchr(out, '\0', out_size) == NULL) {
+ return false;
+ }
+ return true; // Obtained the symbol name.
+ }
+ }
+ i += num_symbols_in_buf;
+ }
+ return false;
+}
+
+// Get the symbol name of "pc" from the file pointed by "fd". Process
+// both regular and dynamic symbol tables if necessary. On success,
+// write the symbol name to "out" and return true. Otherwise, return
+// false.
+static bool GetSymbolFromObjectFile(const int fd, uint64_t pc,
+ char *out, int out_size,
+ uint64_t map_start_address) {
+ // Read the ELF header.
+ ElfW(Ehdr) elf_header;
+ if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+ return false;
+ }
+
+ uint64_t symbol_offset = 0;
+ if (elf_header.e_type == ET_DYN) { // DSO needs offset adjustment.
+ symbol_offset = map_start_address;
+ }
+
+ ElfW(Shdr) symtab, strtab;
+
+ // Consult a regular symbol table first.
+ if (!GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+ SHT_SYMTAB, &symtab)) {
+ return false;
+ }
+ if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+ symtab.sh_link * sizeof(symtab))) {
+ return false;
+ }
+ if (FindSymbol(pc, fd, out, out_size, symbol_offset,
+ &strtab, &symtab)) {
+ return true; // Found the symbol in a regular symbol table.
+ }
+
+ // If the symbol is not found, then consult a dynamic symbol table.
+ if (!GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+ SHT_DYNSYM, &symtab)) {
+ return false;
+ }
+ if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+ symtab.sh_link * sizeof(symtab))) {
+ return false;
+ }
+ if (FindSymbol(pc, fd, out, out_size, symbol_offset,
+ &strtab, &symtab)) {
+ return true; // Found the symbol in a dynamic symbol table.
+ }
+
+ return false;
+}
+
+namespace {
+// Thin wrapper around a file descriptor so that the file descriptor
+// gets closed for sure.
+struct FileDescriptor {
+ const int fd_;
+ explicit FileDescriptor(int fd) : fd_(fd) {}
+ ~FileDescriptor() {
+ if (fd_ >= 0) {
+ NO_INTR(close(fd_));
+ }
+ }
+ int get() { return fd_; }
+
+ private:
+ explicit FileDescriptor(const FileDescriptor&);
+ void operator=(const FileDescriptor&);
+};
+
+// Helper class for reading lines from file.
+//
+// Note: we don't use ProcMapsIterator since the object is big (it has
+// a 5k array member) and uses async-unsafe functions such as sscanf()
+// and snprintf().
+class LineReader {
+ public:
+ explicit LineReader(int fd, char *buf, int buf_len) : fd_(fd),
+ buf_(buf), buf_len_(buf_len), bol_(buf), eol_(buf), eod_(buf) {
+ }
+
+ // Read '\n'-terminated line from file. On success, modify "bol"
+ // and "eol", then return true. Otherwise, return false.
+ //
+ // Note: if the last line doesn't end with '\n', the line will be
+ // dropped. It's an intentional behavior to make the code simple.
+ bool ReadLine(const char **bol, const char **eol) {
+ if (BufferIsEmpty()) { // First time.
+ const ssize_t num_bytes = ReadPersistent(fd_, buf_, buf_len_);
+ if (num_bytes <= 0) { // EOF or error.
+ return false;
+ }
+ eod_ = buf_ + num_bytes;
+ bol_ = buf_;
+ } else {
+ bol_ = eol_ + 1; // Advance to the next line in the buffer.
+ SAFE_ASSERT(bol_ <= eod_); // "bol_" can point to "eod_".
+ if (!HasCompleteLine()) {
+ const int incomplete_line_length = eod_ - bol_;
+ // Move the trailing incomplete line to the beginning.
+ memmove(buf_, bol_, incomplete_line_length);
+ // Read text from file and append it.
+ char * const append_pos = buf_ + incomplete_line_length;
+ const int capacity_left = buf_len_ - incomplete_line_length;
+ const ssize_t num_bytes = ReadPersistent(fd_, append_pos,
+ capacity_left);
+ if (num_bytes <= 0) { // EOF or error.
+ return false;
+ }
+ eod_ = append_pos + num_bytes;
+ bol_ = buf_;
+ }
+ }
+ eol_ = FindLineFeed();
+ if (eol_ == NULL) { // '\n' not found. Malformed line.
+ return false;
+ }
+ *eol_ = '\0'; // Replace '\n' with '\0'.
+
+ *bol = bol_;
+ *eol = eol_;
+ return true;
+ }
+
+ // Beginning of line.
+ const char *bol() {
+ return bol_;
+ }
+
+ // End of line.
+ const char *eol() {
+ return eol_;
+ }
+
+ private:
+ explicit LineReader(const LineReader&);
+ void operator=(const LineReader&);
+
+ char *FindLineFeed() {
+ return reinterpret_cast<char *>(memchr(bol_, '\n', eod_ - bol_));
+ }
+
+ bool BufferIsEmpty() {
+ return buf_ == eod_;
+ }
+
+ bool HasCompleteLine() {
+ return !BufferIsEmpty() && FindLineFeed() != NULL;
+ }
+
+ const int fd_;
+ char * const buf_;
+ const int buf_len_;
+ char *bol_;
+ char *eol_;
+ const char *eod_; // End of data in "buf_".
+};
+} // namespace
+
+// Place the hex number read from "start" into "*hex". The pointer to
+// the first non-hex character or "end" is returned.
+static char *GetHex(const char *start, const char *end, uint64_t *hex) {
+ *hex = 0;
+ const char *p;
+ for (p = start; p < end; ++p) {
+ int ch = *p;
+ if ((ch >= '0' && ch <= '9') ||
+ (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')) {
+ *hex = (*hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
+ } else { // Encountered the first non-hex character.
+ break;
+ }
+ }
+ SAFE_ASSERT(p <= end);
+ return const_cast<char *>(p);
+}
+
+// Search for the object file (from /proc/self/maps) that contains
+// the specified pc. If found, open this file and return the file handle,
+// and also set start_address to the start address of where this object
+// file is mapped to in memory. Otherwise, return -1.
+static ATTRIBUTE_NOINLINE int
+OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
+ uint64_t &start_address) {
+ int object_fd;
+
+ // Open /proc/self/maps.
+ int maps_fd;
+ NO_INTR(maps_fd = open("/proc/self/maps", O_RDONLY));
+ FileDescriptor wrapped_maps_fd(maps_fd);
+ if (wrapped_maps_fd.get() < 0) {
+ return -1;
+ }
+
+ // Iterate over maps and look for the map containing the pc. Then
+ // look into the symbol tables inside.
+ char buf[1024]; // Big enough for line of sane /proc/self/maps
+ LineReader reader(wrapped_maps_fd.get(), buf, sizeof(buf));
+ while (true) {
+ const char *cursor;
+ const char *eol;
+ if (!reader.ReadLine(&cursor, &eol)) { // EOF or malformed line.
+ return -1;
+ }
+
+ // Start parsing line in /proc/self/maps. Here is an example:
+ //
+ // 08048000-0804c000 r-xp 00000000 08:01 2142121 /bin/cat
+ //
+ // We want start address (08048000), end address (0804c000), flags
+ // (r-xp) and file name (/bin/cat).
+
+ // Read start address.
+ cursor = GetHex(cursor, eol, &start_address);
+ if (cursor == eol || *cursor != '-') {
+ return -1; // Malformed line.
+ }
+ ++cursor; // Skip '-'.
+
+ // Read end address.
+ uint64_t end_address;
+ cursor = GetHex(cursor, eol, &end_address);
+ if (cursor == eol || *cursor != ' ') {
+ return -1; // Malformed line.
+ }
+ ++cursor; // Skip ' '.
+
+ // Check start and end addresses.
+ if (!(start_address <= pc && pc < end_address)) {
+ continue; // We skip this map. PC isn't in this map.
+ }
+
+ // Read flags. Skip flags until we encounter a space or eol.
+ const char * const flags_start = cursor;
+ while (cursor < eol && *cursor != ' ') {
+ ++cursor;
+ }
+ // We expect at least four letters for flags (ex. "r-xp").
+ if (cursor == eol || cursor < flags_start + 4) {
+ return -1; // Malformed line.
+ }
+
+ // Check flags. We are only interested in "r-x" maps.
+ if (memcmp(flags_start, "r-x", 3) != 0) { // Not a "r-x" map.
+ continue; // We skip this map.
+ }
+ ++cursor; // Skip ' '.
+
+ // Skip to file name. "cursor" now points to file offset. We need to
+ // skip at least three spaces for file offset, dev, and inode.
+ int num_spaces = 0;
+ while (cursor < eol) {
+ if (*cursor == ' ') {
+ ++num_spaces;
+ } else if (num_spaces >= 3) {
+ // The first non-space character after skipping three spaces
+ // is the beginning of the file name.
+ break;
+ }
+ ++cursor;
+ }
+ if (cursor == eol) {
+ return -1; // Malformed line.
+ }
+
+ // Finally, "cursor" now points to file name of our interest.
+ NO_INTR(object_fd = open(cursor, O_RDONLY));
+ if (object_fd < 0) {
+ return -1;
+ }
+ return object_fd;
+ }
+}
+
+// The implementation of our symbolization routine. If it
+// successfully finds the symbol containing "pc" and obtains the
+// symbol name, returns true and write the symbol name to "out".
+// Otherwise, returns false. If Callback function is installed via
+// InstallSymbolizeCallback(), the function is also called in this function,
+// and "out" is used as its output.
+// To keep stack consumption low, we would like this function to not
+// get inlined.
+static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
+ int out_size) {
+ uint64_t pc0 = reinterpret_cast<uintptr_t>(pc);
+ uint64_t start_address = 0;
+
+ int object_fd = OpenObjectFileContainingPcAndGetStartAddress(pc0,
+ start_address);
+ if (object_fd == -1) {
+ return false;
+ }
+ FileDescriptor wrapped_object_fd(object_fd);
+ int elf_type = FileGetElfType(wrapped_object_fd.get());
+ if (elf_type == -1) {
+ return false;
+ }
+ if (g_symbolize_callback) {
+ // Run the call back if it's installed.
+ // Note: relocation (and much of the rest of this code) will be
+ // wrong for prelinked shared libraries and PIE executables.
+ uint64 relocation = (elf_type == ET_DYN) ? start_address : 0;
+ int num_bytes_written = g_symbolize_callback(wrapped_object_fd.get(),
+ pc, out, out_size,
+ relocation);
+ if (num_bytes_written > 0) {
+ out += num_bytes_written;
+ out_size -= num_bytes_written;
+ }
+ }
+ if (!GetSymbolFromObjectFile(wrapped_object_fd.get(), pc0,
+ out, out_size, start_address)) {
+ return false;
+ }
+
+ // Symbolization succeeded. Now we try to demangle the symbol.
+ DemangleInplace(out, out_size);
+ return true;
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#elif defined(OS_MACOSX) && defined(HAVE_DLADDR)
+
+#include <dlfcn.h>
+#include <string.h>
+
+_START_GOOGLE_NAMESPACE_
+
+static ATTRIBUTE_NOINLINE bool SymbolizeAndDemangle(void *pc, char *out,
+ int out_size) {
+ Dl_info info;
+ if (dladdr(pc, &info)) {
+ if (strlen(info.dli_sname) < out_size) {
+ strcpy(out, info.dli_sname);
+ // Symbolization succeeded. Now we try to demangle the symbol.
+ DemangleInplace(out, out_size);
+ return true;
+ }
+ }
+ return false;
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#else
+# error BUG: HAVE_SYMBOLIZE was wrongly set
+#endif
+
+_START_GOOGLE_NAMESPACE_
+
+bool Symbolize(void *pc, char *out, int out_size) {
+ SAFE_ASSERT(out_size >= 0);
+ return SymbolizeAndDemangle(pc, out, out_size);
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#else /* HAVE_SYMBOLIZE */
+
+#include <assert.h>
+
+#include "config.h"
+
+_START_GOOGLE_NAMESPACE_
+
+// TODO: Support other environments.
+bool Symbolize(void *pc, char *out, int out_size) {
+ assert(0);
+ return false;
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#endif
diff --git a/extern/libmv/third_party/glog/src/symbolize.h b/extern/libmv/third_party/glog/src/symbolize.h
new file mode 100644
index 00000000000..1ebe4dd94a2
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/symbolize.h
@@ -0,0 +1,116 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Satoru Takabayashi
+//
+// This library provides Symbolize() function that symbolizes program
+// counters to their corresponding symbol names on linux platforms.
+// This library has a minimal implementation of an ELF symbol table
+// reader (i.e. it doesn't depend on libelf, etc.).
+//
+// The algorithm used in Symbolize() is as follows.
+//
+// 1. Go through a list of maps in /proc/self/maps and find the map
+// containing the program counter.
+//
+// 2. Open the mapped file and find a regular symbol table inside.
+// Iterate over symbols in the symbol table and look for the symbol
+// containing the program counter. If such a symbol is found,
+// obtain the symbol name, and demangle the symbol if possible.
+// If the symbol isn't found in the regular symbol table (binary is
+// stripped), try the same thing with a dynamic symbol table.
+//
+// Note that Symbolize() is originally implemented to be used in
+// FailureSignalHandler() in base/google.cc. Hence it doesn't use
+// malloc() and other unsafe operations. It should be both
+// thread-safe and async-signal-safe.
+
+#ifndef BASE_SYMBOLIZE_H_
+#define BASE_SYMBOLIZE_H_
+
+#include "utilities.h"
+#include "config.h"
+#include "glog/logging.h"
+
+#ifdef HAVE_SYMBOLIZE
+
+#if defined(__ELF__) // defined by gcc on Linux
+#include <elf.h>
+#include <link.h> // For ElfW() macro.
+
+// If there is no ElfW macro, let's define it by ourself.
+#ifndef ElfW
+# if SIZEOF_VOID_P == 4
+# define ElfW(type) Elf32_##type
+# elif SIZEOF_VOID_P == 8
+# define ElfW(type) Elf64_##type
+# else
+# error "Unknown sizeof(void *)"
+# endif
+#endif
+
+_START_GOOGLE_NAMESPACE_
+
+// Gets the section header for the given name, if it exists. Returns true on
+// success. Otherwise, returns false.
+bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
+ ElfW(Shdr) *out);
+
+_END_GOOGLE_NAMESPACE_
+
+#endif /* __ELF__ */
+
+_START_GOOGLE_NAMESPACE_
+
+// Installs a callback function, which will be called right before a symbol name
+// is printed. The callback is intended to be used for showing a file name and a
+// line number preceding a symbol name.
+// "fd" is a file descriptor of the object file containing the program
+// counter "pc". The callback function should write output to "out"
+// and return the size of the output written. On error, the callback
+// function should return -1.
+typedef int (*SymbolizeCallback)(int fd, void *pc, char *out, size_t out_size,
+ uint64 relocation);
+void InstallSymbolizeCallback(SymbolizeCallback callback);
+
+_END_GOOGLE_NAMESPACE_
+
+#endif
+
+_START_GOOGLE_NAMESPACE_
+
+// Symbolizes a program counter. On success, returns true and write the
+// symbol name to "out". The symbol name is demangled if possible
+// (supports symbols generated by GCC 3.x or newer). Otherwise,
+// returns false.
+bool Symbolize(void *pc, char *out, int out_size);
+
+_END_GOOGLE_NAMESPACE_
+
+#endif // BASE_SYMBOLIZE_H_
diff --git a/extern/libmv/third_party/glog/src/utilities.cc b/extern/libmv/third_party/glog/src/utilities.cc
new file mode 100644
index 00000000000..e97d4f237ec
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/utilities.cc
@@ -0,0 +1,335 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Shinichiro Hamaji
+
+#include "utilities.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <signal.h>
+#ifdef HAVE_SYS_TIME_H
+# include <sys/time.h>
+#endif
+#include <time.h>
+#if defined(HAVE_SYSCALL_H)
+#include <syscall.h> // for syscall()
+#elif defined(HAVE_SYS_SYSCALL_H)
+#include <sys/syscall.h> // for syscall()
+#endif
+#ifdef HAVE_SYSLOG_H
+# include <syslog.h>
+#endif
+
+#include "base/googleinit.h"
+
+using std::string;
+
+_START_GOOGLE_NAMESPACE_
+
+static const char* g_program_invocation_short_name = NULL;
+static pthread_t g_main_thread_id;
+
+_END_GOOGLE_NAMESPACE_
+
+// The following APIs are all internal.
+#ifdef HAVE_STACKTRACE
+
+#include "stacktrace.h"
+#include "symbolize.h"
+#include "base/commandlineflags.h"
+
+GLOG_DEFINE_bool(symbolize_stacktrace, true,
+ "Symbolize the stack trace in the tombstone");
+
+_START_GOOGLE_NAMESPACE_
+
+typedef void DebugWriter(const char*, void*);
+
+// The %p field width for printf() functions is two characters per byte.
+// For some environments, add two extra bytes for the leading "0x".
+static const int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
+
+static void DebugWriteToStderr(const char* data, void *unused) {
+ // This one is signal-safe.
+ write(STDERR_FILENO, data, strlen(data));
+}
+
+void DebugWriteToString(const char* data, void *arg) {
+ reinterpret_cast<string*>(arg)->append(data);
+}
+
+#ifdef HAVE_SYMBOLIZE
+// Print a program counter and its symbol name.
+static void DumpPCAndSymbol(DebugWriter *writerfn, void *arg, void *pc,
+ const char * const prefix) {
+ char tmp[1024];
+ const char *symbol = "(unknown)";
+ // Symbolizes the previous address of pc because pc may be in the
+ // next function. The overrun happens when the function ends with
+ // a call to a function annotated noreturn (e.g. CHECK).
+ if (Symbolize(reinterpret_cast<char *>(pc) - 1, tmp, sizeof(tmp))) {
+ symbol = tmp;
+ }
+ char buf[1024];
+ snprintf(buf, sizeof(buf), "%s@ %*p %s\n",
+ prefix, kPrintfPointerFieldWidth, pc, symbol);
+ writerfn(buf, arg);
+}
+#endif
+
+static void DumpPC(DebugWriter *writerfn, void *arg, void *pc,
+ const char * const prefix) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%s@ %*p\n",
+ prefix, kPrintfPointerFieldWidth, pc);
+ writerfn(buf, arg);
+}
+
+// Dump current stack trace as directed by writerfn
+static void DumpStackTrace(int skip_count, DebugWriter *writerfn, void *arg) {
+ // Print stack trace
+ void* stack[32];
+ int depth = GetStackTrace(stack, ARRAYSIZE(stack), skip_count+1);
+ for (int i = 0; i < depth; i++) {
+#if defined(HAVE_SYMBOLIZE)
+ if (FLAGS_symbolize_stacktrace) {
+ DumpPCAndSymbol(writerfn, arg, stack[i], " ");
+ } else {
+ DumpPC(writerfn, arg, stack[i], " ");
+ }
+#else
+ DumpPC(writerfn, arg, stack[i], " ");
+#endif
+ }
+}
+
+static void DumpStackTraceAndExit() {
+ DumpStackTrace(1, DebugWriteToStderr, NULL);
+
+ // Set the default signal handler for SIGABRT, to avoid invoking our
+ // own signal handler installed by InstallFailedSignalHandler().
+ struct sigaction sig_action;
+ memset(&sig_action, 0, sizeof(sig_action));
+ sigemptyset(&sig_action.sa_mask);
+ sig_action.sa_handler = SIG_DFL;
+ sigaction(SIGABRT, &sig_action, NULL);
+
+ abort();
+}
+
+_END_GOOGLE_NAMESPACE_
+
+#endif // HAVE_STACKTRACE
+
+_START_GOOGLE_NAMESPACE_
+
+namespace glog_internal_namespace_ {
+
+const char* ProgramInvocationShortName() {
+ if (g_program_invocation_short_name != NULL) {
+ return g_program_invocation_short_name;
+ } else {
+ // TODO(hamaji): Use /proc/self/cmdline and so?
+ return "UNKNOWN";
+ }
+}
+
+bool IsGoogleLoggingInitialized() {
+ return g_program_invocation_short_name != NULL;
+}
+
+bool is_default_thread() {
+ if (g_program_invocation_short_name == NULL) {
+ // InitGoogleLogging() not yet called, so unlikely to be in a different
+ // thread
+ return true;
+ } else {
+ return pthread_equal(pthread_self(), g_main_thread_id);
+ }
+}
+
+#ifdef OS_WINDOWS
+struct timeval {
+ long tv_sec, tv_usec;
+};
+
+// Based on: http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/os_win32.c&q=GetSystemTimeAsFileTime%20license:bsd
+// See COPYING for copyright information.
+static int gettimeofday(struct timeval *tv, void* tz) {
+#define EPOCHFILETIME (116444736000000000ULL)
+ FILETIME ft;
+ LARGE_INTEGER li;
+ uint64 tt;
+
+ GetSystemTimeAsFileTime(&ft);
+ li.LowPart = ft.dwLowDateTime;
+ li.HighPart = ft.dwHighDateTime;
+ tt = (li.QuadPart - EPOCHFILETIME) / 10;
+ tv->tv_sec = tt / 1000000;
+ tv->tv_usec = tt % 1000000;
+
+ return 0;
+}
+#endif
+
+int64 CycleClock_Now() {
+ // TODO(hamaji): temporary impementation - it might be too slow.
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return static_cast<int64>(tv.tv_sec) * 1000000 + tv.tv_usec;
+}
+
+int64 UsecToCycles(int64 usec) {
+ return usec;
+}
+
+WallTime WallTime_Now() {
+ // Now, cycle clock is retuning microseconds since the epoch.
+ return CycleClock_Now() * 0.000001;
+}
+
+static int32 g_main_thread_pid = getpid();
+int32 GetMainThreadPid() {
+ return g_main_thread_pid;
+}
+
+pid_t GetTID() {
+ // On Linux and FreeBSD, we try to use gettid().
+#if defined OS_LINUX || defined OS_FREEBSD || defined OS_MACOSX
+#ifndef __NR_gettid
+#ifdef OS_MACOSX
+#define __NR_gettid SYS_gettid
+#elif ! defined __i386__
+#error "Must define __NR_gettid for non-x86 platforms"
+#else
+#define __NR_gettid 224
+#endif
+#endif
+ static bool lacks_gettid = false;
+ if (!lacks_gettid) {
+ pid_t tid = syscall(__NR_gettid);
+ if (tid != -1) {
+ return tid;
+ }
+ // Technically, this variable has to be volatile, but there is a small
+ // performance penalty in accessing volatile variables and there should
+ // not be any serious adverse effect if a thread does not immediately see
+ // the value change to "true".
+ lacks_gettid = true;
+ }
+#endif // OS_LINUX || OS_FREEBSD
+
+ // If gettid() could not be used, we use one of the following.
+#if defined OS_LINUX
+ return getpid(); // Linux: getpid returns thread ID when gettid is absent
+#elif defined OS_WINDOWS || defined OS_CYGWIN
+ return GetCurrentThreadId();
+#else
+ // If none of the techniques above worked, we use pthread_self().
+ return (pid_t)(uintptr_t)pthread_self();
+#endif
+}
+
+const char* const_basename(const char* filepath) {
+ const char* base = strrchr(filepath, '/');
+#ifdef OS_WINDOWS // Look for either path separator in Windows
+ if (!base)
+ base = strrchr(filepath, '\\');
+#endif
+ return base ? (base+1) : filepath;
+}
+
+static string g_my_user_name;
+const string& MyUserName() {
+ return g_my_user_name;
+}
+static void MyUserNameInitializer() {
+ // TODO(hamaji): Probably this is not portable.
+#if defined(OS_WINDOWS)
+ const char* user = getenv("USERNAME");
+#else
+ const char* user = getenv("USER");
+#endif
+ if (user != NULL) {
+ g_my_user_name = user;
+ } else {
+ g_my_user_name = "invalid-user";
+ }
+}
+REGISTER_MODULE_INITIALIZER(utilities, MyUserNameInitializer());
+
+#ifdef HAVE_STACKTRACE
+void DumpStackTraceToString(string* stacktrace) {
+ DumpStackTrace(1, DebugWriteToString, stacktrace);
+}
+#endif
+
+// We use an atomic operation to prevent problems with calling CrashReason
+// from inside the Mutex implementation (potentially through RAW_CHECK).
+static const CrashReason* g_reason = 0;
+
+void SetCrashReason(const CrashReason* r) {
+ sync_val_compare_and_swap(&g_reason,
+ reinterpret_cast<const CrashReason*>(0),
+ r);
+}
+
+} // namespace glog_internal_namespace_
+
+void InitGoogleLogging(const char* argv0) {
+ CHECK(!IsGoogleLoggingInitialized())
+ << "You called InitGoogleLogging() twice!";
+ const char* slash = strrchr(argv0, '/');
+#ifdef OS_WINDOWS
+ if (!slash) slash = strrchr(argv0, '\\');
+#endif
+ g_program_invocation_short_name = slash ? slash + 1 : argv0;
+ g_main_thread_id = pthread_self();
+
+#ifdef HAVE_STACKTRACE
+ InstallFailureFunction(&DumpStackTraceAndExit);
+#endif
+}
+
+void ShutdownGoogleLogging() {
+ CHECK(IsGoogleLoggingInitialized())
+ << "You called ShutdownGoogleLogging() without InitGoogleLogging() first!";
+#ifdef HAVE_SYSLOG_H
+ closelog();
+#endif
+}
+
+_END_GOOGLE_NAMESPACE_
+
+// Make an implementation of stacktrace compiled.
+#ifdef STACKTRACE_H
+# include STACKTRACE_H
+#endif
diff --git a/extern/libmv/third_party/glog/src/utilities.h b/extern/libmv/third_party/glog/src/utilities.h
new file mode 100644
index 00000000000..2d4e99e595e
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/utilities.h
@@ -0,0 +1,222 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Shinichiro Hamaji
+//
+// Define utilties for glog internal usage.
+
+#ifndef UTILITIES_H__
+#define UTILITIES_H__
+
+#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__)
+# define OS_WINDOWS
+#elif defined(__CYGWIN__) || defined(__CYGWIN32__)
+# define OS_CYGWIN
+#elif defined(linux) || defined(__linux) || defined(__linux__)
+# define OS_LINUX
+#elif defined(macintosh) || defined(__APPLE__) || defined(__APPLE_CC__)
+# define OS_MACOSX
+#elif defined(__FreeBSD__)
+# define OS_FREEBSD
+#elif defined(__NetBSD__)
+# define OS_NETBSD
+#elif defined(__OpenBSD__)
+# define OS_OPENBSD
+#else
+// TODO(hamaji): Add other platforms.
+#endif
+
+// printf macros for size_t, in the style of inttypes.h
+#ifdef _LP64
+#define __PRIS_PREFIX "z"
+#else
+#define __PRIS_PREFIX
+#endif
+
+// Use these macros after a % in a printf format string
+// to get correct 32/64 bit behavior, like this:
+// size_t size = records.size();
+// printf("%"PRIuS"\n", size);
+
+#define PRIdS __PRIS_PREFIX "d"
+#define PRIxS __PRIS_PREFIX "x"
+#define PRIuS __PRIS_PREFIX "u"
+#define PRIXS __PRIS_PREFIX "X"
+#define PRIoS __PRIS_PREFIX "o"
+
+#include "base/mutex.h" // This must go first so we get _XOPEN_SOURCE
+
+#include <string>
+
+#if defined(OS_WINDOWS)
+# include "port.h"
+#endif
+
+#include "config.h"
+#include <glog/logging.h>
+
+// There are three different ways we can try to get the stack trace:
+//
+// 1) The libunwind library. This is still in development, and as a
+// separate library adds a new dependency, but doesn't need a frame
+// pointer. It also doesn't call malloc.
+//
+// 2) Our hand-coded stack-unwinder. This depends on a certain stack
+// layout, which is used by gcc (and those systems using a
+// gcc-compatible ABI) on x86 systems, at least since gcc 2.95.
+// It uses the frame pointer to do its work.
+//
+// 3) The gdb unwinder -- also the one used by the c++ exception code.
+// It's obviously well-tested, but has a fatal flaw: it can call
+// malloc() from the unwinder. This is a problem because we're
+// trying to use the unwinder to instrument malloc().
+//
+// Note: if you add a new implementation here, make sure it works
+// correctly when GetStackTrace() is called with max_depth == 0.
+// Some code may do that.
+
+#if defined(HAVE_LIB_UNWIND)
+# define STACKTRACE_H "stacktrace_libunwind-inl.h"
+#elif !defined(NO_FRAME_POINTER)
+# if defined(__i386__) && __GNUC__ >= 2
+# define STACKTRACE_H "stacktrace_x86-inl.h"
+# elif defined(__x86_64__) && __GNUC__ >= 2
+# define STACKTRACE_H "stacktrace_x86_64-inl.h"
+# elif (defined(__ppc__) || defined(__PPC__)) && __GNUC__ >= 2
+# define STACKTRACE_H "stacktrace_powerpc-inl.h"
+# endif
+#endif
+
+#if !defined(STACKTRACE_H) && defined(HAVE_EXECINFO_H)
+# define STACKTRACE_H "stacktrace_generic-inl.h"
+#endif
+
+#if defined(STACKTRACE_H)
+# define HAVE_STACKTRACE
+#endif
+
+// defined by gcc
+#if defined(__ELF__) && defined(OS_LINUX)
+# define HAVE_SYMBOLIZE
+#elif defined(OS_MACOSX) && defined(HAVE_DLADDR)
+// Use dladdr to symbolize.
+# define HAVE_SYMBOLIZE
+#endif
+
+#ifndef ARRAYSIZE
+// There is a better way, but this is good enough for our purpose.
+# define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
+#endif
+
+_START_GOOGLE_NAMESPACE_
+
+namespace glog_internal_namespace_ {
+
+#ifdef HAVE___ATTRIBUTE__
+# define ATTRIBUTE_NOINLINE __attribute__ ((noinline))
+# define HAVE_ATTRIBUTE_NOINLINE
+#else
+# define ATTRIBUTE_NOINLINE
+#endif
+
+const char* ProgramInvocationShortName();
+
+bool IsGoogleLoggingInitialized();
+
+bool is_default_thread();
+
+int64 CycleClock_Now();
+
+int64 UsecToCycles(int64 usec);
+
+typedef double WallTime;
+WallTime WallTime_Now();
+
+int32 GetMainThreadPid();
+
+pid_t GetTID();
+
+const std::string& MyUserName();
+
+// Get the part of filepath after the last path separator.
+// (Doesn't modify filepath, contrary to basename() in libgen.h.)
+const char* const_basename(const char* filepath);
+
+// Wrapper of __sync_val_compare_and_swap. If the GCC extension isn't
+// defined, we try the CPU specific logics (we only support x86 and
+// x86_64 for now) first, then use a naive implementation, which has a
+// race condition.
+template<typename T>
+inline T sync_val_compare_and_swap(T* ptr, T oldval, T newval) {
+#if defined(HAVE___SYNC_VAL_COMPARE_AND_SWAP)
+ return __sync_val_compare_and_swap(ptr, oldval, newval);
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+ T ret;
+ __asm__ __volatile__("lock; cmpxchg %1, (%2);"
+ :"=a"(ret)
+ // GCC may produces %sil or %dil for
+ // constraint "r", but some of apple's gas
+ // dosn't know the 8 bit registers.
+ // We use "q" to avoid these registers.
+ :"q"(newval), "q"(ptr), "a"(oldval)
+ :"memory", "cc");
+ return ret;
+#else
+ T ret = *ptr;
+ if (ret == oldval) {
+ *ptr = newval;
+ }
+ return ret;
+#endif
+}
+
+void DumpStackTraceToString(std::string* stacktrace);
+
+struct CrashReason {
+ CrashReason() : filename(0), line_number(0), message(0), depth(0) {}
+
+ const char* filename;
+ int line_number;
+ const char* message;
+
+ // We'll also store a bit of stack trace context at the time of crash as
+ // it may not be available later on.
+ void* stack[32];
+ int depth;
+};
+
+void SetCrashReason(const CrashReason* r);
+
+} // namespace glog_internal_namespace_
+
+_END_GOOGLE_NAMESPACE_
+
+using namespace GOOGLE_NAMESPACE::glog_internal_namespace_;
+
+#endif // UTILITIES_H__
diff --git a/extern/libmv/third_party/glog/src/vlog_is_on.cc b/extern/libmv/third_party/glog/src/vlog_is_on.cc
new file mode 100644
index 00000000000..ed88514dce5
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/vlog_is_on.cc
@@ -0,0 +1,249 @@
+// Copyright (c) 1999, 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Ray Sidney and many others
+//
+// Broken out from logging.cc by Soren Lassen
+// logging_unittest.cc covers the functionality herein
+
+#include "utilities.h"
+
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <cstdio>
+#include <string>
+#include "base/commandlineflags.h"
+#include <glog/logging.h>
+#include <glog/raw_logging.h>
+#include "base/googleinit.h"
+
+// glog doesn't have annotation
+#define ANNOTATE_BENIGN_RACE(address, description)
+
+using std::string;
+
+GLOG_DEFINE_int32(v, 0, "Show all VLOG(m) messages for m <= this."
+" Overridable by --vmodule.");
+
+GLOG_DEFINE_string(vmodule, "", "per-module verbose level."
+" Argument is a comma-separated list of <module name>=<log level>."
+" <module name> is a glob pattern, matched against the filename base"
+" (that is, name ignoring .cc/.h./-inl.h)."
+" <log level> overrides any value given by --v.");
+
+_START_GOOGLE_NAMESPACE_
+
+namespace glog_internal_namespace_ {
+
+// Implementation of fnmatch that does not need 0-termination
+// of arguments and does not allocate any memory,
+// but we only support "*" and "?" wildcards, not the "[...]" patterns.
+// It's not a static function for the unittest.
+GOOGLE_GLOG_DLL_DECL bool SafeFNMatch_(const char* pattern,
+ size_t patt_len,
+ const char* str,
+ size_t str_len) {
+ int p = 0;
+ int s = 0;
+ while (1) {
+ if (p == patt_len && s == str_len) return true;
+ if (p == patt_len) return false;
+ if (s == str_len) return p+1 == patt_len && pattern[p] == '*';
+ if (pattern[p] == str[s] || pattern[p] == '?') {
+ p += 1;
+ s += 1;
+ continue;
+ }
+ if (pattern[p] == '*') {
+ if (p+1 == patt_len) return true;
+ do {
+ if (SafeFNMatch_(pattern+(p+1), patt_len-(p+1), str+s, str_len-s)) {
+ return true;
+ }
+ s += 1;
+ } while (s != str_len);
+ return false;
+ }
+ return false;
+ }
+}
+
+} // namespace glog_internal_namespace_
+
+using glog_internal_namespace_::SafeFNMatch_;
+
+int32 kLogSiteUninitialized = 1000;
+
+// List of per-module log levels from FLAGS_vmodule.
+// Once created each element is never deleted/modified
+// except for the vlog_level: other threads will read VModuleInfo blobs
+// w/o locks and we'll store pointers to vlog_level at VLOG locations
+// that will never go away.
+// We can't use an STL struct here as we wouldn't know
+// when it's safe to delete/update it: other threads need to use it w/o locks.
+struct VModuleInfo {
+ string module_pattern;
+ mutable int32 vlog_level; // Conceptually this is an AtomicWord, but it's
+ // too much work to use AtomicWord type here
+ // w/o much actual benefit.
+ const VModuleInfo* next;
+};
+
+// This protects the following global variables.
+static Mutex vmodule_lock;
+// Pointer to head of the VModuleInfo list.
+// It's a map from module pattern to logging level for those module(s).
+static VModuleInfo* vmodule_list = 0;
+// Boolean initialization flag.
+static bool inited_vmodule = false;
+
+// L >= vmodule_lock.
+static void VLOG2Initializer() {
+ vmodule_lock.AssertHeld();
+ // Can now parse --vmodule flag and initialize mapping of module-specific
+ // logging levels.
+ inited_vmodule = false;
+ const char* vmodule = FLAGS_vmodule.c_str();
+ const char* sep;
+ VModuleInfo* head = NULL;
+ VModuleInfo* tail = NULL;
+ while ((sep = strchr(vmodule, '=')) != NULL) {
+ string pattern(vmodule, sep - vmodule);
+ int module_level;
+ if (sscanf(sep, "=%d", &module_level) == 1) {
+ VModuleInfo* info = new VModuleInfo;
+ info->module_pattern = pattern;
+ info->vlog_level = module_level;
+ if (head) tail->next = info;
+ else head = info;
+ tail = info;
+ }
+ // Skip past this entry
+ vmodule = strchr(sep, ',');
+ if (vmodule == NULL) break;
+ vmodule++; // Skip past ","
+ }
+ if (head) { // Put them into the list at the head:
+ tail->next = vmodule_list;
+ vmodule_list = head;
+ }
+ inited_vmodule = true;
+}
+
+// This can be called very early, so we use SpinLock and RAW_VLOG here.
+int SetVLOGLevel(const char* module_pattern, int log_level) {
+ int result = FLAGS_v;
+ int const pattern_len = strlen(module_pattern);
+ bool found = false;
+ MutexLock l(&vmodule_lock); // protect whole read-modify-write
+ for (const VModuleInfo* info = vmodule_list;
+ info != NULL; info = info->next) {
+ if (info->module_pattern == module_pattern) {
+ if (!found) {
+ result = info->vlog_level;
+ found = true;
+ }
+ info->vlog_level = log_level;
+ } else if (!found &&
+ SafeFNMatch_(info->module_pattern.c_str(),
+ info->module_pattern.size(),
+ module_pattern, pattern_len)) {
+ result = info->vlog_level;
+ found = true;
+ }
+ }
+ if (!found) {
+ VModuleInfo* info = new VModuleInfo;
+ info->module_pattern = module_pattern;
+ info->vlog_level = log_level;
+ info->next = vmodule_list;
+ vmodule_list = info;
+ }
+ RAW_VLOG(1, "Set VLOG level for \"%s\" to %d", module_pattern, log_level);
+ return result;
+}
+
+// NOTE: Individual VLOG statements cache the integer log level pointers.
+// NOTE: This function must not allocate memory or require any locks.
+bool InitVLOG3__(int32** site_flag, int32* site_default,
+ const char* fname, int32 verbose_level) {
+ MutexLock l(&vmodule_lock);
+ bool read_vmodule_flag = inited_vmodule;
+ if (!read_vmodule_flag) {
+ VLOG2Initializer();
+ }
+
+ // protect the errno global in case someone writes:
+ // VLOG(..) << "The last error was " << strerror(errno)
+ int old_errno = errno;
+
+ // site_default normally points to FLAGS_v
+ int32* site_flag_value = site_default;
+
+ // Get basename for file
+ const char* base = strrchr(fname, '/');
+ base = base ? (base+1) : fname;
+ const char* base_end = strchr(base, '.');
+ size_t base_length = base_end ? (base_end - base) : strlen(base);
+
+ // Trim out trailing "-inl" if any
+ if (base_length >= 4 && (memcmp(base+base_length-4, "-inl", 4) == 0)) {
+ base_length -= 4;
+ }
+
+ // TODO: Trim out _unittest suffix? Perhaps it is better to have
+ // the extra control and just leave it there.
+
+ // find target in vector of modules, replace site_flag_value with
+ // a module-specific verbose level, if any.
+ for (const VModuleInfo* info = vmodule_list;
+ info != NULL; info = info->next) {
+ if (SafeFNMatch_(info->module_pattern.c_str(), info->module_pattern.size(),
+ base, base_length)) {
+ site_flag_value = &info->vlog_level;
+ // value at info->vlog_level is now what controls
+ // the VLOG at the caller site forever
+ break;
+ }
+ }
+
+ // Cache the vlog value pointer if --vmodule flag has been parsed.
+ ANNOTATE_BENIGN_RACE(site_flag,
+ "*site_flag may be written by several threads,"
+ " but the value will be the same");
+ if (read_vmodule_flag) *site_flag = site_flag_value;
+
+ // restore the errno in case something recoverable went wrong during
+ // the initialization of the VLOG mechanism (see above note "protect the..")
+ errno = old_errno;
+ return *site_flag_value >= verbose_level;
+}
+
+_END_GOOGLE_NAMESPACE_
diff --git a/extern/libmv/third_party/glog/src/windows/config.h b/extern/libmv/third_party/glog/src/windows/config.h
new file mode 100644
index 00000000000..682a1b9309d
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/windows/config.h
@@ -0,0 +1,136 @@
+/* src/config.h.in. Generated from configure.ac by autoheader. */
+
+/* Namespace for Google classes */
+#define GOOGLE_NAMESPACE google
+
+/* Define if you have the `dladdr' function */
+#undef HAVE_DLADDR
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#undef HAVE_DLFCN_H
+
+/* Define to 1 if you have the <execinfo.h> header file. */
+#undef HAVE_EXECINFO_H
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if you have the <libunwind.h> header file. */
+#undef HAVE_LIBUNWIND_H
+
+/* define if you have google gflags library */
+#define HAVE_LIB_GFLAGS 1
+
+/* define if you have libunwind */
+#undef HAVE_LIB_UNWIND
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* define if the compiler implements namespaces */
+#undef HAVE_NAMESPACES
+
+/* Define if you have POSIX threads libraries and header files. */
+#undef HAVE_PTHREAD
+
+/* define if the compiler implements pthread_rwlock_* */
+#undef HAVE_RWLOCK
+
+/* Define if you have the `sigaltstack' function */
+#undef HAVE_SIGALTSTACK
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if you have the <syscall.h> header file. */
+#undef HAVE_SYSCALL_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/syscall.h> header file. */
+#undef HAVE_SYS_SYSCALL_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <ucontext.h> header file. */
+#undef HAVE_UCONTEXT_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* define if the compiler supports using expression for operator */
+#undef HAVE_USING_OPERATOR
+
+/* define if your compiler has __attribute__ */
+#undef HAVE___ATTRIBUTE__
+
+/* define if your compiler has __builtin_expect */
+#undef HAVE___BUILTIN_EXPECT
+
+/* define if your compiler has __sync_val_compare_and_swap */
+#undef HAVE___SYNC_VAL_COMPARE_AND_SWAP
+
+/* Name of package */
+#undef PACKAGE
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* How to access the PC from a struct ucontext */
+#undef PC_FROM_UCONTEXT
+
+/* Define to necessary symbol if this constant uses a non-standard name on
+ your system. */
+#undef PTHREAD_CREATE_JOINABLE
+
+/* The size of `void *', as computed by sizeof. */
+#undef SIZEOF_VOID_P
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* the namespace where STL code like vector<> is defined */
+#undef STL_NAMESPACE
+
+/* Version number of package */
+#undef VERSION
+
+/* Stops putting the code inside the Google namespace */
+#define _END_GOOGLE_NAMESPACE_ }
+
+/* Puts following code inside the Google namespace */
+#define _START_GOOGLE_NAMESPACE_ namespace google {
+
+/* Always the empty-string on non-windows systems. On windows, should be
+ "__declspec(dllexport)". This way, when we compile the dll, we export our
+ functions/classes. It's safe to define this here because config.h is only
+ used internally, to compile the DLL, and every DLL source file #includes
+ "config.h" before anything else. */
+#ifndef GOOGLE_GLOG_DLL_DECL
+# define GOOGLE_GLOG_IS_A_DLL 1 /* not set if you're statically linking */
+# define GOOGLE_GLOG_DLL_DECL __declspec(dllexport)
+# define GOOGLE_GLOG_DLL_DECL_FOR_UNITTESTS __declspec(dllimport)
+#endif
diff --git a/extern/libmv/third_party/glog/src/windows/glog/log_severity.h b/extern/libmv/third_party/glog/src/windows/glog/log_severity.h
new file mode 100644
index 00000000000..5e7d09effb2
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/windows/glog/log_severity.h
@@ -0,0 +1,88 @@
+// This file is automatically generated from src/glog/log_severity.h
+// using src/windows/preprocess.sh.
+// DO NOT EDIT!
+
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef BASE_LOG_SEVERITY_H__
+#define BASE_LOG_SEVERITY_H__
+
+// Annoying stuff for windows -- makes sure clients can import these functions
+#ifndef GOOGLE_GLOG_DLL_DECL
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# define GOOGLE_GLOG_DLL_DECL __declspec(dllimport)
+# else
+# define GOOGLE_GLOG_DLL_DECL
+# endif
+#endif
+
+// Variables of type LogSeverity are widely taken to lie in the range
+// [0, NUM_SEVERITIES-1]. Be careful to preserve this assumption if
+// you ever need to change their values or add a new severity.
+typedef int LogSeverity;
+
+const int INFO = 0, WARNING = 1, ERROR = 2, FATAL = 3, NUM_SEVERITIES = 4;
+
+// DFATAL is FATAL in debug mode, ERROR in normal mode
+#ifdef NDEBUG
+#define DFATAL_LEVEL ERROR
+#else
+#define DFATAL_LEVEL FATAL
+#endif
+
+extern GOOGLE_GLOG_DLL_DECL const char* const LogSeverityNames[NUM_SEVERITIES];
+
+// NDEBUG usage helpers related to (RAW_)DCHECK:
+//
+// DEBUG_MODE is for small !NDEBUG uses like
+// if (DEBUG_MODE) foo.CheckThatFoo();
+// instead of substantially more verbose
+// #ifndef NDEBUG
+// foo.CheckThatFoo();
+// #endif
+//
+// IF_DEBUG_MODE is for small !NDEBUG uses like
+// IF_DEBUG_MODE( string error; )
+// DCHECK(Foo(&error)) << error;
+// instead of substantially more verbose
+// #ifndef NDEBUG
+// string error;
+// DCHECK(Foo(&error)) << error;
+// #endif
+//
+#ifdef NDEBUG
+enum { DEBUG_MODE = 0 };
+#define IF_DEBUG_MODE(x)
+#else
+enum { DEBUG_MODE = 1 };
+#define IF_DEBUG_MODE(x) x
+#endif
+
+#endif // BASE_LOG_SEVERITY_H__
diff --git a/extern/libmv/third_party/glog/src/windows/glog/logging.h b/extern/libmv/third_party/glog/src/windows/glog/logging.h
new file mode 100644
index 00000000000..de51586f8e3
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/windows/glog/logging.h
@@ -0,0 +1,1510 @@
+// This file is automatically generated from src/glog/logging.h.in
+// using src/windows/preprocess.sh.
+// DO NOT EDIT!
+
+// Copyright (c) 1999, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Ray Sidney
+//
+// This file contains #include information about logging-related stuff.
+// Pretty much everybody needs to #include this file so that they can
+// log various happenings.
+//
+#ifndef _LOGGING_H_
+#define _LOGGING_H_
+
+#include <errno.h>
+#include <string.h>
+#include <time.h>
+#include <string>
+#if 0
+# include <unistd.h>
+#endif
+#ifdef __DEPRECATED
+// Make GCC quiet.
+# undef __DEPRECATED
+# include <strstream>
+# define __DEPRECATED
+#else
+# include <strstream>
+#endif
+#include <vector>
+
+// Annoying stuff for windows -- makes sure clients can import these functions
+#ifndef GOOGLE_GLOG_DLL_DECL
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# define GOOGLE_GLOG_DLL_DECL __declspec(dllimport)
+# else
+# define GOOGLE_GLOG_DLL_DECL
+# endif
+#endif
+
+// We care a lot about number of bits things take up. Unfortunately,
+// systems define their bit-specific ints in a lot of different ways.
+// We use our own way, and have a typedef to get there.
+// Note: these commands below may look like "#if 1" or "#if 0", but
+// that's because they were constructed that way at ./configure time.
+// Look at logging.h.in to see how they're calculated (based on your config).
+#if 0
+#include <stdint.h> // the normal place uint16_t is defined
+#endif
+#if 0
+#include <sys/types.h> // the normal place u_int16_t is defined
+#endif
+#if 0
+#include <inttypes.h> // a third place for uint16_t or u_int16_t
+#endif
+
+#if 1
+#include "third_party/gflags/gflags.h"
+#endif
+
+namespace google {
+
+#if 0 // the C99 format
+typedef int32_t int32;
+typedef uint32_t uint32;
+typedef int64_t int64;
+typedef uint64_t uint64;
+#elif 0 // the BSD format
+typedef int32_t int32;
+typedef u_int32_t uint32;
+typedef int64_t int64;
+typedef u_int64_t uint64;
+#elif 1 // the windows (vc7) format
+typedef __int32 int32;
+typedef unsigned __int32 uint32;
+typedef __int64 int64;
+typedef unsigned __int64 uint64;
+#else
+#error Do not know how to define a 32-bit integer quantity on your system
+#endif
+
+}
+
+// The global value of GOOGLE_STRIP_LOG. All the messages logged to
+// LOG(XXX) with severity less than GOOGLE_STRIP_LOG will not be displayed.
+// If it can be determined at compile time that the message will not be
+// printed, the statement will be compiled out.
+//
+// Example: to strip out all INFO and WARNING messages, use the value
+// of 2 below. To make an exception for WARNING messages from a single
+// file, add "#define GOOGLE_STRIP_LOG 1" to that file _before_ including
+// base/logging.h
+#ifndef GOOGLE_STRIP_LOG
+#define GOOGLE_STRIP_LOG 0
+#endif
+
+// GCC can be told that a certain branch is not likely to be taken (for
+// instance, a CHECK failure), and use that information in static analysis.
+// Giving it this information can help it optimize for the common case in
+// the absence of better information (ie. -fprofile-arcs).
+//
+#ifndef GOOGLE_PREDICT_BRANCH_NOT_TAKEN
+#if 0
+#define GOOGLE_PREDICT_BRANCH_NOT_TAKEN(x) (__builtin_expect(x, 0))
+#else
+#define GOOGLE_PREDICT_BRANCH_NOT_TAKEN(x) x
+#endif
+#endif
+
+// Make a bunch of macros for logging. The way to log things is to stream
+// things to LOG(<a particular severity level>). E.g.,
+//
+// LOG(INFO) << "Found " << num_cookies << " cookies";
+//
+// You can capture log messages in a string, rather than reporting them
+// immediately:
+//
+// vector<string> errors;
+// LOG_STRING(ERROR, &errors) << "Couldn't parse cookie #" << cookie_num;
+//
+// This pushes back the new error onto 'errors'; if given a NULL pointer,
+// it reports the error via LOG(ERROR).
+//
+// You can also do conditional logging:
+//
+// LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// You can also do occasional logging (log every n'th occurrence of an
+// event):
+//
+// LOG_EVERY_N(INFO, 10) << "Got the " << COUNTER << "th cookie";
+//
+// The above will cause log messages to be output on the 1st, 11th, 21st, ...
+// times it is executed. Note that the special COUNTER value is used to
+// identify which repetition is happening.
+//
+// You can also do occasional conditional logging (log every n'th
+// occurrence of an event, when condition is satisfied):
+//
+// LOG_IF_EVERY_N(INFO, (size > 1024), 10) << "Got the " << COUNTER
+// << "th big cookie";
+//
+// You can log messages the first N times your code executes a line. E.g.
+//
+// LOG_FIRST_N(INFO, 20) << "Got the " << COUNTER << "th cookie";
+//
+// Outputs log messages for the first 20 times it is executed.
+//
+// Analogous SYSLOG, SYSLOG_IF, and SYSLOG_EVERY_N macros are available.
+// These log to syslog as well as to the normal logs. If you use these at
+// all, you need to be aware that syslog can drastically reduce performance,
+// especially if it is configured for remote logging! Don't use these
+// unless you fully understand this and have a concrete need to use them.
+// Even then, try to minimize your use of them.
+//
+// There are also "debug mode" logging macros like the ones above:
+//
+// DLOG(INFO) << "Found cookies";
+//
+// DLOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
+//
+// DLOG_EVERY_N(INFO, 10) << "Got the " << COUNTER << "th cookie";
+//
+// All "debug mode" logging is compiled away to nothing for non-debug mode
+// compiles.
+//
+// We also have
+//
+// LOG_ASSERT(assertion);
+// DLOG_ASSERT(assertion);
+//
+// which is syntactic sugar for {,D}LOG_IF(FATAL, assert fails) << assertion;
+//
+// There are "verbose level" logging macros. They look like
+//
+// VLOG(1) << "I'm printed when you run the program with --v=1 or more";
+// VLOG(2) << "I'm printed when you run the program with --v=2 or more";
+//
+// These always log at the INFO log level (when they log at all).
+// The verbose logging can also be turned on module-by-module. For instance,
+// --vmodule=mapreduce=2,file=1,gfs*=3 --v=0
+// will cause:
+// a. VLOG(2) and lower messages to be printed from mapreduce.{h,cc}
+// b. VLOG(1) and lower messages to be printed from file.{h,cc}
+// c. VLOG(3) and lower messages to be printed from files prefixed with "gfs"
+// d. VLOG(0) and lower messages to be printed from elsewhere
+//
+// The wildcarding functionality shown by (c) supports both '*' (match
+// 0 or more characters) and '?' (match any single character) wildcards.
+//
+// There's also VLOG_IS_ON(n) "verbose level" condition macro. To be used as
+//
+// if (VLOG_IS_ON(2)) {
+// // do some logging preparation and logging
+// // that can't be accomplished with just VLOG(2) << ...;
+// }
+//
+// There are also VLOG_IF, VLOG_EVERY_N and VLOG_IF_EVERY_N "verbose level"
+// condition macros for sample cases, when some extra computation and
+// preparation for logs is not needed.
+// VLOG_IF(1, (size > 1024))
+// << "I'm printed when size is more than 1024 and when you run the "
+// "program with --v=1 or more";
+// VLOG_EVERY_N(1, 10)
+// << "I'm printed every 10th occurrence, and when you run the program "
+// "with --v=1 or more. Present occurence is " << COUNTER;
+// VLOG_IF_EVERY_N(1, (size > 1024), 10)
+// << "I'm printed on every 10th occurence of case when size is more "
+// " than 1024, when you run the program with --v=1 or more. ";
+// "Present occurence is " << COUNTER;
+//
+// The supported severity levels for macros that allow you to specify one
+// are (in increasing order of severity) INFO, WARNING, ERROR, and FATAL.
+// Note that messages of a given severity are logged not only in the
+// logfile for that severity, but also in all logfiles of lower severity.
+// E.g., a message of severity FATAL will be logged to the logfiles of
+// severity FATAL, ERROR, WARNING, and INFO.
+//
+// There is also the special severity of DFATAL, which logs FATAL in
+// debug mode, ERROR in normal mode.
+//
+// Very important: logging a message at the FATAL severity level causes
+// the program to terminate (after the message is logged).
+//
+// Unless otherwise specified, logs will be written to the filename
+// "<program name>.<hostname>.<user name>.log.<severity level>.", followed
+// by the date, time, and pid (you can't prevent the date, time, and pid
+// from being in the filename).
+//
+// The logging code takes two flags:
+// --v=# set the verbose level
+// --logtostderr log all the messages to stderr instead of to logfiles
+
+// LOG LINE PREFIX FORMAT
+//
+// Log lines have this form:
+//
+// Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+//
+// where the fields are defined as follows:
+//
+// L A single character, representing the log level
+// (eg 'I' for INFO)
+// mm The month (zero padded; ie May is '05')
+// dd The day (zero padded)
+// hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
+// threadid The space-padded thread ID as returned by GetTID()
+// (this matches the PID on Linux)
+// file The file name
+// line The line number
+// msg The user-supplied message
+//
+// Example:
+//
+// I1103 11:57:31.739339 24395 google.cc:2341] Command line: ./some_prog
+// I1103 11:57:31.739403 24395 google.cc:2342] Process id 24395
+//
+// NOTE: although the microseconds are useful for comparing events on
+// a single machine, clocks on different machines may not be well
+// synchronized. Hence, use caution when comparing the low bits of
+// timestamps from different machines.
+
+#ifndef DECLARE_VARIABLE
+#define MUST_UNDEF_GFLAGS_DECLARE_MACROS
+#define DECLARE_VARIABLE(type, name, tn) \
+ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##tn##_instead { \
+ extern GOOGLE_GLOG_DLL_DECL type FLAGS_##name; \
+ } \
+ using FLAG__namespace_do_not_use_directly_use_DECLARE_##tn##_instead::FLAGS_##name
+
+// bool specialization
+#define DECLARE_bool(name) \
+ DECLARE_VARIABLE(bool, name, bool)
+
+// int32 specialization
+#define DECLARE_int32(name) \
+ DECLARE_VARIABLE(google::int32, name, int32)
+
+// Special case for string, because we have to specify the namespace
+// std::string, which doesn't play nicely with our FLAG__namespace hackery.
+#define DECLARE_string(name) \
+ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
+ extern GOOGLE_GLOG_DLL_DECL std::string FLAGS_##name; \
+ } \
+ using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
+#endif
+
+// Set whether log messages go to stderr instead of logfiles
+DECLARE_bool(logtostderr);
+
+// Set whether log messages go to stderr in addition to logfiles.
+DECLARE_bool(alsologtostderr);
+
+// Log messages at a level >= this flag are automatically sent to
+// stderr in addition to log files.
+DECLARE_int32(stderrthreshold);
+
+// Set whether the log prefix should be prepended to each line of output.
+DECLARE_bool(log_prefix);
+
+// Log messages at a level <= this flag are buffered.
+// Log messages at a higher level are flushed immediately.
+DECLARE_int32(logbuflevel);
+
+// Sets the maximum number of seconds which logs may be buffered for.
+DECLARE_int32(logbufsecs);
+
+// Log suppression level: messages logged at a lower level than this
+// are suppressed.
+DECLARE_int32(minloglevel);
+
+// If specified, logfiles are written into this directory instead of the
+// default logging directory.
+DECLARE_string(log_dir);
+
+// Sets the path of the directory into which to put additional links
+// to the log files.
+DECLARE_string(log_link);
+
+DECLARE_int32(v); // in vlog_is_on.cc
+
+// Sets the maximum log file size (in MB).
+DECLARE_int32(max_log_size);
+
+// Sets whether to avoid logging to the disk if the disk is full.
+DECLARE_bool(stop_logging_if_full_disk);
+
+#ifdef MUST_UNDEF_GFLAGS_DECLARE_MACROS
+#undef MUST_UNDEF_GFLAGS_DECLARE_MACROS
+#undef DECLARE_VARIABLE
+#undef DECLARE_bool
+#undef DECLARE_int32
+#undef DECLARE_string
+#endif
+
+// Log messages below the GOOGLE_STRIP_LOG level will be compiled away for
+// security reasons. See LOG(severtiy) below.
+
+// A few definitions of macros that don't generate much code. Since
+// LOG(INFO) and its ilk are used all over our code, it's
+// better to have compact code for these operations.
+
+#if GOOGLE_STRIP_LOG == 0
+#define COMPACT_GOOGLE_LOG_INFO google::LogMessage( \
+ __FILE__, __LINE__)
+#define LOG_TO_STRING_INFO(message) google::LogMessage( \
+ __FILE__, __LINE__, google::INFO, message)
+#else
+#define COMPACT_GOOGLE_LOG_INFO google::NullStream()
+#define LOG_TO_STRING_INFO(message) google::NullStream()
+#endif
+
+#if GOOGLE_STRIP_LOG <= 1
+#define COMPACT_GOOGLE_LOG_WARNING google::LogMessage( \
+ __FILE__, __LINE__, google::WARNING)
+#define LOG_TO_STRING_WARNING(message) google::LogMessage( \
+ __FILE__, __LINE__, google::WARNING, message)
+#else
+#define COMPACT_GOOGLE_LOG_WARNING google::NullStream()
+#define LOG_TO_STRING_WARNING(message) google::NullStream()
+#endif
+
+#if GOOGLE_STRIP_LOG <= 2
+#define COMPACT_GOOGLE_LOG_ERROR google::LogMessage( \
+ __FILE__, __LINE__, google::ERROR)
+#define LOG_TO_STRING_ERROR(message) google::LogMessage( \
+ __FILE__, __LINE__, google::ERROR, message)
+#else
+#define COMPACT_GOOGLE_LOG_ERROR google::NullStream()
+#define LOG_TO_STRING_ERROR(message) google::NullStream()
+#endif
+
+#if GOOGLE_STRIP_LOG <= 3
+#define COMPACT_GOOGLE_LOG_FATAL google::LogMessageFatal( \
+ __FILE__, __LINE__)
+#define LOG_TO_STRING_FATAL(message) google::LogMessage( \
+ __FILE__, __LINE__, google::FATAL, message)
+#else
+#define COMPACT_GOOGLE_LOG_FATAL google::NullStreamFatal()
+#define LOG_TO_STRING_FATAL(message) google::NullStreamFatal()
+#endif
+
+// For DFATAL, we want to use LogMessage (as opposed to
+// LogMessageFatal), to be consistent with the original behavior.
+#ifdef NDEBUG
+#define COMPACT_GOOGLE_LOG_DFATAL COMPACT_GOOGLE_LOG_ERROR
+#elif GOOGLE_STRIP_LOG <= 3
+#define COMPACT_GOOGLE_LOG_DFATAL google::LogMessage( \
+ __FILE__, __LINE__, google::FATAL)
+#else
+#define COMPACT_GOOGLE_LOG_DFATAL google::NullStreamFatal()
+#endif
+
+#define GOOGLE_LOG_INFO(counter) google::LogMessage(__FILE__, __LINE__, google::INFO, counter, &google::LogMessage::SendToLog)
+#define SYSLOG_INFO(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::INFO, counter, \
+ &google::LogMessage::SendToSyslogAndLog)
+#define GOOGLE_LOG_WARNING(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::WARNING, counter, \
+ &google::LogMessage::SendToLog)
+#define SYSLOG_WARNING(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::WARNING, counter, \
+ &google::LogMessage::SendToSyslogAndLog)
+#define GOOGLE_LOG_ERROR(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::ERROR, counter, \
+ &google::LogMessage::SendToLog)
+#define SYSLOG_ERROR(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::ERROR, counter, \
+ &google::LogMessage::SendToSyslogAndLog)
+#define GOOGLE_LOG_FATAL(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::FATAL, counter, \
+ &google::LogMessage::SendToLog)
+#define SYSLOG_FATAL(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::FATAL, counter, \
+ &google::LogMessage::SendToSyslogAndLog)
+#define GOOGLE_LOG_DFATAL(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::DFATAL_LEVEL, counter, \
+ &google::LogMessage::SendToLog)
+#define SYSLOG_DFATAL(counter) \
+ google::LogMessage(__FILE__, __LINE__, google::DFATAL_LEVEL, counter, \
+ &google::LogMessage::SendToSyslogAndLog)
+
+#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) || defined(__CYGWIN32__)
+// A very useful logging macro to log windows errors:
+#define LOG_SYSRESULT(result) \
+ if (FAILED(result)) { \
+ LPTSTR message = NULL; \
+ LPTSTR msg = reinterpret_cast<LPTSTR>(&message); \
+ DWORD message_length = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | \
+ FORMAT_MESSAGE_FROM_SYSTEM, \
+ 0, result, 0, msg, 100, NULL); \
+ if (message_length > 0) { \
+ google::LogMessage(__FILE__, __LINE__, ERROR, 0, \
+ &google::LogMessage::SendToLog).stream() << message; \
+ LocalFree(message); \
+ } \
+ }
+#endif
+
+// We use the preprocessor's merging operator, "##", so that, e.g.,
+// LOG(INFO) becomes the token GOOGLE_LOG_INFO. There's some funny
+// subtle difference between ostream member streaming functions (e.g.,
+// ostream::operator<<(int) and ostream non-member streaming functions
+// (e.g., ::operator<<(ostream&, string&): it turns out that it's
+// impossible to stream something like a string directly to an unnamed
+// ostream. We employ a neat hack by calling the stream() member
+// function of LogMessage which seems to avoid the problem.
+#define LOG(severity) COMPACT_GOOGLE_LOG_ ## severity.stream()
+#define SYSLOG(severity) SYSLOG_ ## severity(0).stream()
+
+namespace google {
+
+// They need the definitions of integer types.
+#include "glog/log_severity.h"
+#include "glog/vlog_is_on.h"
+
+// Initialize google's logging library. You will see the program name
+// specified by argv0 in log outputs.
+GOOGLE_GLOG_DLL_DECL void InitGoogleLogging(const char* argv0);
+
+// Shutdown google's logging library.
+GOOGLE_GLOG_DLL_DECL void ShutdownGoogleLogging();
+
+// Install a function which will be called after LOG(FATAL).
+GOOGLE_GLOG_DLL_DECL void InstallFailureFunction(void (*fail_func)());
+
+class LogSink; // defined below
+
+// If a non-NULL sink pointer is given, we push this message to that sink.
+// For LOG_TO_SINK we then do normal LOG(severity) logging as well.
+// This is useful for capturing messages and passing/storing them
+// somewhere more specific than the global log of the process.
+// Argument types:
+// LogSink* sink;
+// LogSeverity severity;
+// The cast is to disambiguate NULL arguments.
+#define LOG_TO_SINK(sink, severity) \
+ google::LogMessage( \
+ __FILE__, __LINE__, \
+ google::severity, \
+ static_cast<google::LogSink*>(sink), true).stream()
+#define LOG_TO_SINK_BUT_NOT_TO_LOGFILE(sink, severity) \
+ google::LogMessage( \
+ __FILE__, __LINE__, \
+ google::severity, \
+ static_cast<google::LogSink*>(sink), false).stream()
+
+// If a non-NULL string pointer is given, we write this message to that string.
+// We then do normal LOG(severity) logging as well.
+// This is useful for capturing messages and storing them somewhere more
+// specific than the global log of the process.
+// Argument types:
+// string* message;
+// LogSeverity severity;
+// The cast is to disambiguate NULL arguments.
+// NOTE: LOG(severity) expands to LogMessage().stream() for the specified
+// severity.
+#define LOG_TO_STRING(severity, message) \
+ LOG_TO_STRING_##severity(static_cast<string*>(message)).stream()
+
+// If a non-NULL pointer is given, we push the message onto the end
+// of a vector of strings; otherwise, we report it with LOG(severity).
+// This is handy for capturing messages and perhaps passing them back
+// to the caller, rather than reporting them immediately.
+// Argument types:
+// LogSeverity severity;
+// vector<string> *outvec;
+// The cast is to disambiguate NULL arguments.
+#define LOG_STRING(severity, outvec) \
+ LOG_TO_STRING_##severity(static_cast<vector<string>*>(outvec)).stream()
+
+#define LOG_IF(severity, condition) \
+ !(condition) ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+#define SYSLOG_IF(severity, condition) \
+ !(condition) ? (void) 0 : google::LogMessageVoidify() & SYSLOG(severity)
+
+#define LOG_ASSERT(condition) \
+ LOG_IF(FATAL, !(condition)) << "Assert failed: " #condition
+#define SYSLOG_ASSERT(condition) \
+ SYSLOG_IF(FATAL, !(condition)) << "Assert failed: " #condition
+
+// CHECK dies with a fatal error if condition is not true. It is *not*
+// controlled by NDEBUG, so the check will be executed regardless of
+// compilation mode. Therefore, it is safe to do things like:
+// CHECK(fp->Write(x) == 4)
+#define CHECK(condition) \
+ LOG_IF(FATAL, GOOGLE_PREDICT_BRANCH_NOT_TAKEN(!(condition))) \
+ << "Check failed: " #condition " "
+
+// A container for a string pointer which can be evaluated to a bool -
+// true iff the pointer is NULL.
+struct CheckOpString {
+ CheckOpString(std::string* str) : str_(str) { }
+ // No destructor: if str_ is non-NULL, we're about to LOG(FATAL),
+ // so there's no point in cleaning up str_.
+ operator bool() const {
+ return GOOGLE_PREDICT_BRANCH_NOT_TAKEN(str_ != NULL);
+ }
+ std::string* str_;
+};
+
+// Function is overloaded for integral types to allow static const
+// integrals declared in classes and not defined to be used as arguments to
+// CHECK* macros. It's not encouraged though.
+template <class T>
+inline const T& GetReferenceableValue(const T& t) { return t; }
+inline char GetReferenceableValue(char t) { return t; }
+inline unsigned char GetReferenceableValue(unsigned char t) { return t; }
+inline signed char GetReferenceableValue(signed char t) { return t; }
+inline short GetReferenceableValue(short t) { return t; }
+inline unsigned short GetReferenceableValue(unsigned short t) { return t; }
+inline int GetReferenceableValue(int t) { return t; }
+inline unsigned int GetReferenceableValue(unsigned int t) { return t; }
+inline long GetReferenceableValue(long t) { return t; }
+inline unsigned long GetReferenceableValue(unsigned long t) { return t; }
+inline long long GetReferenceableValue(long long t) { return t; }
+inline unsigned long long GetReferenceableValue(unsigned long long t) {
+ return t;
+}
+
+// This is a dummy class to define the following operator.
+struct DummyClassToDefineOperator {};
+
+}
+
+// Define global operator<< to declare using ::operator<<.
+// This declaration will allow use to use CHECK macros for user
+// defined classes which have operator<< (e.g., stl_logging.h).
+inline std::ostream& operator<<(
+ std::ostream& out, const google::DummyClassToDefineOperator&) {
+ return out;
+}
+
+namespace google {
+
+// Build the error message string.
+template<class t1, class t2>
+std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) {
+ // It means that we cannot use stl_logging if compiler doesn't
+ // support using expression for operator.
+ // TODO(hamaji): Figure out a way to fix.
+#if 1
+ using ::operator<<;
+#endif
+ std::strstream ss;
+ ss << names << " (" << v1 << " vs. " << v2 << ")";
+ return new std::string(ss.str(), ss.pcount());
+}
+
+// Helper functions for CHECK_OP macro.
+// The (int, int) specialization works around the issue that the compiler
+// will not instantiate the template version of the function on values of
+// unnamed enum type - see comment below.
+#define DEFINE_CHECK_OP_IMPL(name, op) \
+ template <class t1, class t2> \
+ inline std::string* Check##name##Impl(const t1& v1, const t2& v2, \
+ const char* names) { \
+ if (v1 op v2) return NULL; \
+ else return MakeCheckOpString(v1, v2, names); \
+ } \
+ inline std::string* Check##name##Impl(int v1, int v2, const char* names) { \
+ return Check##name##Impl<int, int>(v1, v2, names); \
+ }
+
+// Use _EQ, _NE, _LE, etc. in case the file including base/logging.h
+// provides its own #defines for the simpler names EQ, NE, LE, etc.
+// This happens if, for example, those are used as token names in a
+// yacc grammar.
+DEFINE_CHECK_OP_IMPL(_EQ, ==)
+DEFINE_CHECK_OP_IMPL(_NE, !=)
+DEFINE_CHECK_OP_IMPL(_LE, <=)
+DEFINE_CHECK_OP_IMPL(_LT, < )
+DEFINE_CHECK_OP_IMPL(_GE, >=)
+DEFINE_CHECK_OP_IMPL(_GT, > )
+#undef DEFINE_CHECK_OP_IMPL
+
+// Helper macro for binary operators.
+// Don't use this macro directly in your code, use CHECK_EQ et al below.
+
+#if defined(STATIC_ANALYSIS)
+// Only for static analysis tool to know that it is equivalent to assert
+#define CHECK_OP_LOG(name, op, val1, val2, log) CHECK((val1) op (val2))
+#elif !defined(NDEBUG)
+// In debug mode, avoid constructing CheckOpStrings if possible,
+// to reduce the overhead of CHECK statments by 2x.
+// Real DCHECK-heavy tests have seen 1.5x speedups.
+
+// The meaning of "string" might be different between now and
+// when this macro gets invoked (e.g., if someone is experimenting
+// with other string implementations that get defined after this
+// file is included). Save the current meaning now and use it
+// in the macro.
+typedef std::string _Check_string;
+#define CHECK_OP_LOG(name, op, val1, val2, log) \
+ while (google::_Check_string* _result = \
+ google::Check##name##Impl( \
+ google::GetReferenceableValue(val1), \
+ google::GetReferenceableValue(val2), \
+ #val1 " " #op " " #val2)) \
+ log(__FILE__, __LINE__, \
+ google::CheckOpString(_result)).stream()
+#else
+// In optimized mode, use CheckOpString to hint to compiler that
+// the while condition is unlikely.
+#define CHECK_OP_LOG(name, op, val1, val2, log) \
+ while (google::CheckOpString _result = \
+ google::Check##name##Impl( \
+ google::GetReferenceableValue(val1), \
+ google::GetReferenceableValue(val2), \
+ #val1 " " #op " " #val2)) \
+ log(__FILE__, __LINE__, _result).stream()
+#endif // STATIC_ANALYSIS, !NDEBUG
+
+#if GOOGLE_STRIP_LOG <= 3
+#define CHECK_OP(name, op, val1, val2) \
+ CHECK_OP_LOG(name, op, val1, val2, google::LogMessageFatal)
+#else
+#define CHECK_OP(name, op, val1, val2) \
+ CHECK_OP_LOG(name, op, val1, val2, google::NullStreamFatal)
+#endif // STRIP_LOG <= 3
+
+// Equality/Inequality checks - compare two values, and log a FATAL message
+// including the two values when the result is not as expected. The values
+// must have operator<<(ostream, ...) defined.
+//
+// You may append to the error message like so:
+// CHECK_NE(1, 2) << ": The world must be ending!";
+//
+// We are very careful to ensure that each argument is evaluated exactly
+// once, and that anything which is legal to pass as a function argument is
+// legal here. In particular, the arguments may be temporary expressions
+// which will end up being destroyed at the end of the apparent statement,
+// for example:
+// CHECK_EQ(string("abc")[1], 'b');
+//
+// WARNING: These don't compile correctly if one of the arguments is a pointer
+// and the other is NULL. To work around this, simply static_cast NULL to the
+// type of the desired pointer.
+
+#define CHECK_EQ(val1, val2) CHECK_OP(_EQ, ==, val1, val2)
+#define CHECK_NE(val1, val2) CHECK_OP(_NE, !=, val1, val2)
+#define CHECK_LE(val1, val2) CHECK_OP(_LE, <=, val1, val2)
+#define CHECK_LT(val1, val2) CHECK_OP(_LT, < , val1, val2)
+#define CHECK_GE(val1, val2) CHECK_OP(_GE, >=, val1, val2)
+#define CHECK_GT(val1, val2) CHECK_OP(_GT, > , val1, val2)
+
+// Check that the input is non NULL. This very useful in constructor
+// initializer lists.
+
+#define CHECK_NOTNULL(val) \
+ google::CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
+
+// Helper functions for string comparisons.
+// To avoid bloat, the definitions are in logging.cc.
+#define DECLARE_CHECK_STROP_IMPL(func, expected) \
+ GOOGLE_GLOG_DLL_DECL std::string* Check##func##expected##Impl( \
+ const char* s1, const char* s2, const char* names);
+DECLARE_CHECK_STROP_IMPL(strcmp, true)
+DECLARE_CHECK_STROP_IMPL(strcmp, false)
+DECLARE_CHECK_STROP_IMPL(strcasecmp, true)
+DECLARE_CHECK_STROP_IMPL(strcasecmp, false)
+#undef DECLARE_CHECK_STROP_IMPL
+
+// Helper macro for string comparisons.
+// Don't use this macro directly in your code, use CHECK_STREQ et al below.
+#define CHECK_STROP(func, op, expected, s1, s2) \
+ while (google::CheckOpString _result = \
+ google::Check##func##expected##Impl((s1), (s2), \
+ #s1 " " #op " " #s2)) \
+ LOG(FATAL) << *_result.str_
+
+
+// String (char*) equality/inequality checks.
+// CASE versions are case-insensitive.
+//
+// Note that "s1" and "s2" may be temporary strings which are destroyed
+// by the compiler at the end of the current "full expression"
+// (e.g. CHECK_STREQ(Foo().c_str(), Bar().c_str())).
+
+#define CHECK_STREQ(s1, s2) CHECK_STROP(strcmp, ==, true, s1, s2)
+#define CHECK_STRNE(s1, s2) CHECK_STROP(strcmp, !=, false, s1, s2)
+#define CHECK_STRCASEEQ(s1, s2) CHECK_STROP(strcasecmp, ==, true, s1, s2)
+#define CHECK_STRCASENE(s1, s2) CHECK_STROP(strcasecmp, !=, false, s1, s2)
+
+#define CHECK_INDEX(I,A) CHECK(I < (sizeof(A)/sizeof(A[0])))
+#define CHECK_BOUND(B,A) CHECK(B <= (sizeof(A)/sizeof(A[0])))
+
+#define CHECK_DOUBLE_EQ(val1, val2) \
+ do { \
+ CHECK_LE((val1), (val2)+0.000000000000001L); \
+ CHECK_GE((val1), (val2)-0.000000000000001L); \
+ } while (0)
+
+#define CHECK_NEAR(val1, val2, margin) \
+ do { \
+ CHECK_LE((val1), (val2)+(margin)); \
+ CHECK_GE((val1), (val2)-(margin)); \
+ } while (0)
+
+// perror()..googly style!
+//
+// PLOG() and PLOG_IF() and PCHECK() behave exactly like their LOG* and
+// CHECK equivalents with the addition that they postpend a description
+// of the current state of errno to their output lines.
+
+#define PLOG(severity) GOOGLE_PLOG(severity, 0).stream()
+
+#define GOOGLE_PLOG(severity, counter) \
+ google::ErrnoLogMessage( \
+ __FILE__, __LINE__, google::severity, counter, \
+ &google::LogMessage::SendToLog)
+
+#define PLOG_IF(severity, condition) \
+ !(condition) ? (void) 0 : google::LogMessageVoidify() & PLOG(severity)
+
+// A CHECK() macro that postpends errno if the condition is false. E.g.
+//
+// if (poll(fds, nfds, timeout) == -1) { PCHECK(errno == EINTR); ... }
+#define PCHECK(condition) \
+ PLOG_IF(FATAL, GOOGLE_PREDICT_BRANCH_NOT_TAKEN(!(condition))) \
+ << "Check failed: " #condition " "
+
+// A CHECK() macro that lets you assert the success of a function that
+// returns -1 and sets errno in case of an error. E.g.
+//
+// CHECK_ERR(mkdir(path, 0700));
+//
+// or
+//
+// int fd = open(filename, flags); CHECK_ERR(fd) << ": open " << filename;
+#define CHECK_ERR(invocation) \
+PLOG_IF(FATAL, GOOGLE_PREDICT_BRANCH_NOT_TAKEN((invocation) == -1)) \
+ << #invocation
+
+// Use macro expansion to create, for each use of LOG_EVERY_N(), static
+// variables with the __LINE__ expansion as part of the variable name.
+#define LOG_EVERY_N_VARNAME(base, line) LOG_EVERY_N_VARNAME_CONCAT(base, line)
+#define LOG_EVERY_N_VARNAME_CONCAT(base, line) base ## line
+
+#define LOG_OCCURRENCES LOG_EVERY_N_VARNAME(occurrences_, __LINE__)
+#define LOG_OCCURRENCES_MOD_N LOG_EVERY_N_VARNAME(occurrences_mod_n_, __LINE__)
+
+#define SOME_KIND_OF_LOG_EVERY_N(severity, n, what_to_do) \
+ static int LOG_OCCURRENCES = 0, LOG_OCCURRENCES_MOD_N = 0; \
+ ++LOG_OCCURRENCES; \
+ if (++LOG_OCCURRENCES_MOD_N > n) LOG_OCCURRENCES_MOD_N -= n; \
+ if (LOG_OCCURRENCES_MOD_N == 1) \
+ google::LogMessage( \
+ __FILE__, __LINE__, google::severity, LOG_OCCURRENCES, \
+ &what_to_do).stream()
+
+#define SOME_KIND_OF_LOG_IF_EVERY_N(severity, condition, n, what_to_do) \
+ static int LOG_OCCURRENCES = 0, LOG_OCCURRENCES_MOD_N = 0; \
+ ++LOG_OCCURRENCES; \
+ if (condition && \
+ ((LOG_OCCURRENCES_MOD_N=(LOG_OCCURRENCES_MOD_N + 1) % n) == (1 % n))) \
+ google::LogMessage( \
+ __FILE__, __LINE__, google::severity, LOG_OCCURRENCES, \
+ &what_to_do).stream()
+
+#define SOME_KIND_OF_PLOG_EVERY_N(severity, n, what_to_do) \
+ static int LOG_OCCURRENCES = 0, LOG_OCCURRENCES_MOD_N = 0; \
+ ++LOG_OCCURRENCES; \
+ if (++LOG_OCCURRENCES_MOD_N > n) LOG_OCCURRENCES_MOD_N -= n; \
+ if (LOG_OCCURRENCES_MOD_N == 1) \
+ google::ErrnoLogMessage( \
+ __FILE__, __LINE__, google::severity, LOG_OCCURRENCES, \
+ &what_to_do).stream()
+
+#define SOME_KIND_OF_LOG_FIRST_N(severity, n, what_to_do) \
+ static int LOG_OCCURRENCES = 0; \
+ if (LOG_OCCURRENCES <= n) \
+ ++LOG_OCCURRENCES; \
+ if (LOG_OCCURRENCES <= n) \
+ google::LogMessage( \
+ __FILE__, __LINE__, google::severity, LOG_OCCURRENCES, \
+ &what_to_do).stream()
+
+namespace glog_internal_namespace_ {
+template <bool>
+struct CompileAssert {
+};
+struct CrashReason;
+} // namespace glog_internal_namespace_
+
+#define GOOGLE_GLOG_COMPILE_ASSERT(expr, msg) \
+ typedef google::glog_internal_namespace_::CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
+
+#define LOG_EVERY_N(severity, n) \
+ GOOGLE_GLOG_COMPILE_ASSERT(google::severity < \
+ google::NUM_SEVERITIES, \
+ INVALID_REQUESTED_LOG_SEVERITY); \
+ SOME_KIND_OF_LOG_EVERY_N(severity, (n), google::LogMessage::SendToLog)
+
+#define SYSLOG_EVERY_N(severity, n) \
+ SOME_KIND_OF_LOG_EVERY_N(severity, (n), google::LogMessage::SendToSyslogAndLog)
+
+#define PLOG_EVERY_N(severity, n) \
+ SOME_KIND_OF_PLOG_EVERY_N(severity, (n), google::LogMessage::SendToLog)
+
+#define LOG_FIRST_N(severity, n) \
+ SOME_KIND_OF_LOG_FIRST_N(severity, (n), google::LogMessage::SendToLog)
+
+#define LOG_IF_EVERY_N(severity, condition, n) \
+ SOME_KIND_OF_LOG_IF_EVERY_N(severity, (condition), (n), google::LogMessage::SendToLog)
+
+// We want the special COUNTER value available for LOG_EVERY_X()'ed messages
+enum PRIVATE_Counter {COUNTER};
+
+
+// Plus some debug-logging macros that get compiled to nothing for production
+
+#ifndef NDEBUG
+
+#define DLOG(severity) LOG(severity)
+#define DVLOG(verboselevel) VLOG(verboselevel)
+#define DLOG_IF(severity, condition) LOG_IF(severity, condition)
+#define DLOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n)
+#define DLOG_IF_EVERY_N(severity, condition, n) \
+ LOG_IF_EVERY_N(severity, condition, n)
+#define DLOG_ASSERT(condition) LOG_ASSERT(condition)
+
+// debug-only checking. not executed in NDEBUG mode.
+#define DCHECK(condition) CHECK(condition)
+#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
+#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
+#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
+#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
+#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
+#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
+#define DCHECK_NOTNULL(val) CHECK_NOTNULL(val)
+#define DCHECK_STREQ(str1, str2) CHECK_STREQ(str1, str2)
+#define DCHECK_STRCASEEQ(str1, str2) CHECK_STRCASEEQ(str1, str2)
+#define DCHECK_STRNE(str1, str2) CHECK_STRNE(str1, str2)
+#define DCHECK_STRCASENE(str1, str2) CHECK_STRCASENE(str1, str2)
+
+#else // NDEBUG
+
+#define DLOG(severity) \
+ true ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+
+#define DVLOG(verboselevel) \
+ (true || !VLOG_IS_ON(verboselevel)) ?\
+ (void) 0 : google::LogMessageVoidify() & LOG(INFO)
+
+#define DLOG_IF(severity, condition) \
+ (true || !(condition)) ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+
+#define DLOG_EVERY_N(severity, n) \
+ true ? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+
+#define DLOG_IF_EVERY_N(severity, condition, n) \
+ (true || !(condition))? (void) 0 : google::LogMessageVoidify() & LOG(severity)
+
+#define DLOG_ASSERT(condition) \
+ true ? (void) 0 : LOG_ASSERT(condition)
+
+#define DCHECK(condition) \
+ while (false) \
+ CHECK(condition)
+
+#define DCHECK_EQ(val1, val2) \
+ while (false) \
+ CHECK_EQ(val1, val2)
+
+#define DCHECK_NE(val1, val2) \
+ while (false) \
+ CHECK_NE(val1, val2)
+
+#define DCHECK_LE(val1, val2) \
+ while (false) \
+ CHECK_LE(val1, val2)
+
+#define DCHECK_LT(val1, val2) \
+ while (false) \
+ CHECK_LT(val1, val2)
+
+#define DCHECK_GE(val1, val2) \
+ while (false) \
+ CHECK_GE(val1, val2)
+
+#define DCHECK_GT(val1, val2) \
+ while (false) \
+ CHECK_GT(val1, val2)
+
+#define DCHECK_NOTNULL(val) (val)
+
+#define DCHECK_STREQ(str1, str2) \
+ while (false) \
+ CHECK_STREQ(str1, str2)
+
+#define DCHECK_STRCASEEQ(str1, str2) \
+ while (false) \
+ CHECK_STRCASEEQ(str1, str2)
+
+#define DCHECK_STRNE(str1, str2) \
+ while (false) \
+ CHECK_STRNE(str1, str2)
+
+#define DCHECK_STRCASENE(str1, str2) \
+ while (false) \
+ CHECK_STRCASENE(str1, str2)
+
+
+#endif // NDEBUG
+
+// Log only in verbose mode.
+
+#define VLOG(verboselevel) LOG_IF(INFO, VLOG_IS_ON(verboselevel))
+
+#define VLOG_IF(verboselevel, condition) \
+ LOG_IF(INFO, (condition) && VLOG_IS_ON(verboselevel))
+
+#define VLOG_EVERY_N(verboselevel, n) \
+ LOG_IF_EVERY_N(INFO, VLOG_IS_ON(verboselevel), n)
+
+#define VLOG_IF_EVERY_N(verboselevel, condition, n) \
+ LOG_IF_EVERY_N(INFO, (condition) && VLOG_IS_ON(verboselevel), n)
+
+//
+// This class more or less represents a particular log message. You
+// create an instance of LogMessage and then stream stuff to it.
+// When you finish streaming to it, ~LogMessage is called and the
+// full message gets streamed to the appropriate destination.
+//
+// You shouldn't actually use LogMessage's constructor to log things,
+// though. You should use the LOG() macro (and variants thereof)
+// above.
+class GOOGLE_GLOG_DLL_DECL LogMessage {
+public:
+ enum {
+ // Passing kNoLogPrefix for the line number disables the
+ // log-message prefix. Useful for using the LogMessage
+ // infrastructure as a printing utility. See also the --log_prefix
+ // flag for controlling the log-message prefix on an
+ // application-wide basis.
+ kNoLogPrefix = -1
+ };
+
+ // LogStream inherit from non-DLL-exported class (std::ostrstream)
+ // and VC++ produces a warning for this situation.
+ // However, MSDN says "C4275 can be ignored in Microsoft Visual C++
+ // 2005 if you are deriving from a type in the Standard C++ Library"
+ // http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+ // Let's just ignore the warning.
+#ifdef _MSC_VER
+# pragma warning(disable: 4275)
+#endif
+ class GOOGLE_GLOG_DLL_DECL LogStream : public std::ostrstream {
+#ifdef _MSC_VER
+# pragma warning(default: 4275)
+#endif
+ public:
+ LogStream(char *buf, int len, int ctr)
+ : ostrstream(buf, len),
+ ctr_(ctr) {
+ self_ = this;
+ }
+
+ int ctr() const { return ctr_; }
+ void set_ctr(int ctr) { ctr_ = ctr; }
+ LogStream* self() const { return self_; }
+
+ private:
+ int ctr_; // Counter hack (for the LOG_EVERY_X() macro)
+ LogStream *self_; // Consistency check hack
+ };
+
+public:
+ // icc 8 requires this typedef to avoid an internal compiler error.
+ typedef void (LogMessage::*SendMethod)();
+
+ LogMessage(const char* file, int line, LogSeverity severity, int ctr,
+ SendMethod send_method);
+
+ // Two special constructors that generate reduced amounts of code at
+ // LOG call sites for common cases.
+
+ // Used for LOG(INFO): Implied are:
+ // severity = INFO, ctr = 0, send_method = &LogMessage::SendToLog.
+ //
+ // Using this constructor instead of the more complex constructor above
+ // saves 19 bytes per call site.
+ LogMessage(const char* file, int line);
+
+ // Used for LOG(severity) where severity != INFO. Implied
+ // are: ctr = 0, send_method = &LogMessage::SendToLog
+ //
+ // Using this constructor instead of the more complex constructor above
+ // saves 17 bytes per call site.
+ LogMessage(const char* file, int line, LogSeverity severity);
+
+ // Constructor to log this message to a specified sink (if not NULL).
+ // Implied are: ctr = 0, send_method = &LogMessage::SendToSinkAndLog if
+ // also_send_to_log is true, send_method = &LogMessage::SendToSink otherwise.
+ LogMessage(const char* file, int line, LogSeverity severity, LogSink* sink,
+ bool also_send_to_log);
+
+ // Constructor where we also give a vector<string> pointer
+ // for storing the messages (if the pointer is not NULL).
+ // Implied are: ctr = 0, send_method = &LogMessage::SaveOrSendToLog.
+ LogMessage(const char* file, int line, LogSeverity severity,
+ std::vector<std::string>* outvec);
+
+ // Constructor where we also give a string pointer for storing the
+ // message (if the pointer is not NULL). Implied are: ctr = 0,
+ // send_method = &LogMessage::WriteToStringAndLog.
+ LogMessage(const char* file, int line, LogSeverity severity,
+ std::string* message);
+
+ // A special constructor used for check failures
+ LogMessage(const char* file, int line, const CheckOpString& result);
+
+ ~LogMessage();
+
+ // Flush a buffered message to the sink set in the constructor. Always
+ // called by the destructor, it may also be called from elsewhere if
+ // needed. Only the first call is actioned; any later ones are ignored.
+ void Flush();
+
+ // An arbitrary limit on the length of a single log message. This
+ // is so that streaming can be done more efficiently.
+ static const size_t kMaxLogMessageLen;
+
+ // Theses should not be called directly outside of logging.*,
+ // only passed as SendMethod arguments to other LogMessage methods:
+ void SendToLog(); // Actually dispatch to the logs
+ void SendToSyslogAndLog(); // Actually dispatch to syslog and the logs
+
+ // Call abort() or similar to perform LOG(FATAL) crash.
+ static void Fail() ;
+
+ std::ostream& stream() { return *(data_->stream_); }
+
+ int preserved_errno() const { return data_->preserved_errno_; }
+
+ // Must be called without the log_mutex held. (L < log_mutex)
+ static int64 num_messages(int severity);
+
+private:
+ // Fully internal SendMethod cases:
+ void SendToSinkAndLog(); // Send to sink if provided and dispatch to the logs
+ void SendToSink(); // Send to sink if provided, do nothing otherwise.
+
+ // Write to string if provided and dispatch to the logs.
+ void WriteToStringAndLog();
+
+ void SaveOrSendToLog(); // Save to stringvec if provided, else to logs
+
+ void Init(const char* file, int line, LogSeverity severity,
+ void (LogMessage::*send_method)());
+
+ // Used to fill in crash information during LOG(FATAL) failures.
+ void RecordCrashReason(glog_internal_namespace_::CrashReason* reason);
+
+ // Counts of messages sent at each priority:
+ static int64 num_messages_[NUM_SEVERITIES]; // under log_mutex
+
+ // We keep the data in a separate struct so that each instance of
+ // LogMessage uses less stack space.
+ struct GOOGLE_GLOG_DLL_DECL LogMessageData {
+ LogMessageData() {};
+
+ int preserved_errno_; // preserved errno
+ char* buf_;
+ char* message_text_; // Complete message text (points to selected buffer)
+ LogStream* stream_alloc_;
+ LogStream* stream_;
+ char severity_; // What level is this LogMessage logged at?
+ int line_; // line number where logging call is.
+ void (LogMessage::*send_method_)(); // Call this in destructor to send
+ union { // At most one of these is used: union to keep the size low.
+ LogSink* sink_; // NULL or sink to send message to
+ std::vector<std::string>* outvec_; // NULL or vector to push message onto
+ std::string* message_; // NULL or string to write message into
+ };
+ time_t timestamp_; // Time of creation of LogMessage
+ struct ::tm tm_time_; // Time of creation of LogMessage
+ size_t num_prefix_chars_; // # of chars of prefix in this message
+ size_t num_chars_to_log_; // # of chars of msg to send to log
+ size_t num_chars_to_syslog_; // # of chars of msg to send to syslog
+ const char* basename_; // basename of file that called LOG
+ const char* fullname_; // fullname of file that called LOG
+ bool has_been_flushed_; // false => data has not been flushed
+ bool first_fatal_; // true => this was first fatal msg
+
+ ~LogMessageData();
+ private:
+ LogMessageData(const LogMessageData&);
+ void operator=(const LogMessageData&);
+ };
+
+ static LogMessageData fatal_msg_data_exclusive_;
+ static LogMessageData fatal_msg_data_shared_;
+
+ LogMessageData* allocated_;
+ LogMessageData* data_;
+
+ friend class LogDestination;
+
+ LogMessage(const LogMessage&);
+ void operator=(const LogMessage&);
+};
+
+// This class happens to be thread-hostile because all instances share
+// a single data buffer, but since it can only be created just before
+// the process dies, we don't worry so much.
+class GOOGLE_GLOG_DLL_DECL LogMessageFatal : public LogMessage {
+ public:
+ LogMessageFatal(const char* file, int line);
+ LogMessageFatal(const char* file, int line, const CheckOpString& result);
+ ~LogMessageFatal() ;
+};
+
+// A non-macro interface to the log facility; (useful
+// when the logging level is not a compile-time constant).
+inline void LogAtLevel(int const severity, std::string const &msg) {
+ LogMessage(__FILE__, __LINE__, severity).stream() << msg;
+}
+
+// A macro alternative of LogAtLevel. New code may want to use this
+// version since there are two advantages: 1. this version outputs the
+// file name and the line number where this macro is put like other
+// LOG macros, 2. this macro can be used as C++ stream.
+#define LOG_AT_LEVEL(severity) google::LogMessage(__FILE__, __LINE__, severity).stream()
+
+// A small helper for CHECK_NOTNULL().
+template <typename T>
+T* CheckNotNull(const char *file, int line, const char *names, T* t) {
+ if (t == NULL) {
+ LogMessageFatal(file, line, new std::string(names));
+ }
+ return t;
+}
+
+// Allow folks to put a counter in the LOG_EVERY_X()'ed messages. This
+// only works if ostream is a LogStream. If the ostream is not a
+// LogStream you'll get an assert saying as much at runtime.
+GOOGLE_GLOG_DLL_DECL std::ostream& operator<<(std::ostream &os,
+ const PRIVATE_Counter&);
+
+
+// Derived class for PLOG*() above.
+class GOOGLE_GLOG_DLL_DECL ErrnoLogMessage : public LogMessage {
+ public:
+
+ ErrnoLogMessage(const char* file, int line, LogSeverity severity, int ctr,
+ void (LogMessage::*send_method)());
+
+ // Postpends ": strerror(errno) [errno]".
+ ~ErrnoLogMessage();
+
+ private:
+ ErrnoLogMessage(const ErrnoLogMessage&);
+ void operator=(const ErrnoLogMessage&);
+};
+
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros. This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+
+class GOOGLE_GLOG_DLL_DECL LogMessageVoidify {
+ public:
+ LogMessageVoidify() { }
+ // This has to be an operator with a precedence lower than << but
+ // higher than ?:
+ void operator&(std::ostream&) { }
+};
+
+
+// Flushes all log files that contains messages that are at least of
+// the specified severity level. Thread-safe.
+GOOGLE_GLOG_DLL_DECL void FlushLogFiles(LogSeverity min_severity);
+
+// Flushes all log files that contains messages that are at least of
+// the specified severity level. Thread-hostile because it ignores
+// locking -- used for catastrophic failures.
+GOOGLE_GLOG_DLL_DECL void FlushLogFilesUnsafe(LogSeverity min_severity);
+
+//
+// Set the destination to which a particular severity level of log
+// messages is sent. If base_filename is "", it means "don't log this
+// severity". Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void SetLogDestination(LogSeverity severity,
+ const char* base_filename);
+
+//
+// Set the basename of the symlink to the latest log file at a given
+// severity. If symlink_basename is empty, do not make a symlink. If
+// you don't call this function, the symlink basename is the
+// invocation name of the program. Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void SetLogSymlink(LogSeverity severity,
+ const char* symlink_basename);
+
+//
+// Used to send logs to some other kind of destination
+// Users should subclass LogSink and override send to do whatever they want.
+// Implementations must be thread-safe because a shared instance will
+// be called from whichever thread ran the LOG(XXX) line.
+class GOOGLE_GLOG_DLL_DECL LogSink {
+ public:
+ virtual ~LogSink();
+
+ // Sink's logging logic (message_len is such as to exclude '\n' at the end).
+ // This method can't use LOG() or CHECK() as logging system mutex(s) are held
+ // during this call.
+ virtual void send(LogSeverity severity, const char* full_filename,
+ const char* base_filename, int line,
+ const struct ::tm* tm_time,
+ const char* message, size_t message_len) = 0;
+
+ // Redefine this to implement waiting for
+ // the sink's logging logic to complete.
+ // It will be called after each send() returns,
+ // but before that LogMessage exits or crashes.
+ // By default this function does nothing.
+ // Using this function one can implement complex logic for send()
+ // that itself involves logging; and do all this w/o causing deadlocks and
+ // inconsistent rearrangement of log messages.
+ // E.g. if a LogSink has thread-specific actions, the send() method
+ // can simply add the message to a queue and wake up another thread that
+ // handles real logging while itself making some LOG() calls;
+ // WaitTillSent() can be implemented to wait for that logic to complete.
+ // See our unittest for an example.
+ virtual void WaitTillSent();
+
+ // Returns the normal text output of the log message.
+ // Can be useful to implement send().
+ static std::string ToString(LogSeverity severity, const char* file, int line,
+ const struct ::tm* tm_time,
+ const char* message, size_t message_len);
+};
+
+// Add or remove a LogSink as a consumer of logging data. Thread-safe.
+GOOGLE_GLOG_DLL_DECL void AddLogSink(LogSink *destination);
+GOOGLE_GLOG_DLL_DECL void RemoveLogSink(LogSink *destination);
+
+//
+// Specify an "extension" added to the filename specified via
+// SetLogDestination. This applies to all severity levels. It's
+// often used to append the port we're listening on to the logfile
+// name. Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void SetLogFilenameExtension(
+ const char* filename_extension);
+
+//
+// Make it so that all log messages of at least a particular severity
+// are logged to stderr (in addition to logging to the usual log
+// file(s)). Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void SetStderrLogging(LogSeverity min_severity);
+
+//
+// Make it so that all log messages go only to stderr. Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void LogToStderr();
+
+//
+// Make it so that all log messages of at least a particular severity are
+// logged via email to a list of addresses (in addition to logging to the
+// usual log file(s)). The list of addresses is just a string containing
+// the email addresses to send to (separated by spaces, say). Thread-safe.
+//
+GOOGLE_GLOG_DLL_DECL void SetEmailLogging(LogSeverity min_severity,
+ const char* addresses);
+
+// A simple function that sends email. dest is a commma-separated
+// list of addressess. Thread-safe.
+GOOGLE_GLOG_DLL_DECL bool SendEmail(const char *dest,
+ const char *subject, const char *body);
+
+GOOGLE_GLOG_DLL_DECL const std::vector<std::string>& GetLoggingDirectories();
+
+// For tests only: Clear the internal [cached] list of logging directories to
+// force a refresh the next time GetLoggingDirectories is called.
+// Thread-hostile.
+void TestOnly_ClearLoggingDirectoriesList();
+
+// Returns a set of existing temporary directories, which will be a
+// subset of the directories returned by GetLogginDirectories().
+// Thread-safe.
+GOOGLE_GLOG_DLL_DECL void GetExistingTempDirectories(
+ std::vector<std::string>* list);
+
+// Print any fatal message again -- useful to call from signal handler
+// so that the last thing in the output is the fatal message.
+// Thread-hostile, but a race is unlikely.
+GOOGLE_GLOG_DLL_DECL void ReprintFatalMessage();
+
+// Truncate a log file that may be the append-only output of multiple
+// processes and hence can't simply be renamed/reopened (typically a
+// stdout/stderr). If the file "path" is > "limit" bytes, copy the
+// last "keep" bytes to offset 0 and truncate the rest. Since we could
+// be racing with other writers, this approach has the potential to
+// lose very small amounts of data. For security, only follow symlinks
+// if the path is /proc/self/fd/*
+GOOGLE_GLOG_DLL_DECL void TruncateLogFile(const char *path,
+ int64 limit, int64 keep);
+
+// Truncate stdout and stderr if they are over the value specified by
+// --max_log_size; keep the final 1MB. This function has the same
+// race condition as TruncateLogFile.
+GOOGLE_GLOG_DLL_DECL void TruncateStdoutStderr();
+
+// Return the string representation of the provided LogSeverity level.
+// Thread-safe.
+GOOGLE_GLOG_DLL_DECL const char* GetLogSeverityName(LogSeverity severity);
+
+// ---------------------------------------------------------------------
+// Implementation details that are not useful to most clients
+// ---------------------------------------------------------------------
+
+// A Logger is the interface used by logging modules to emit entries
+// to a log. A typical implementation will dump formatted data to a
+// sequence of files. We also provide interfaces that will forward
+// the data to another thread so that the invoker never blocks.
+// Implementations should be thread-safe since the logging system
+// will write to them from multiple threads.
+
+namespace base {
+
+class GOOGLE_GLOG_DLL_DECL Logger {
+ public:
+ virtual ~Logger();
+
+ // Writes "message[0,message_len-1]" corresponding to an event that
+ // occurred at "timestamp". If "force_flush" is true, the log file
+ // is flushed immediately.
+ //
+ // The input message has already been formatted as deemed
+ // appropriate by the higher level logging facility. For example,
+ // textual log messages already contain timestamps, and the
+ // file:linenumber header.
+ virtual void Write(bool force_flush,
+ time_t timestamp,
+ const char* message,
+ int message_len) = 0;
+
+ // Flush any buffered messages
+ virtual void Flush() = 0;
+
+ // Get the current LOG file size.
+ // The returned value is approximate since some
+ // logged data may not have been flushed to disk yet.
+ virtual uint32 LogSize() = 0;
+};
+
+// Get the logger for the specified severity level. The logger
+// remains the property of the logging module and should not be
+// deleted by the caller. Thread-safe.
+extern GOOGLE_GLOG_DLL_DECL Logger* GetLogger(LogSeverity level);
+
+// Set the logger for the specified severity level. The logger
+// becomes the property of the logging module and should not
+// be deleted by the caller. Thread-safe.
+extern GOOGLE_GLOG_DLL_DECL void SetLogger(LogSeverity level, Logger* logger);
+
+}
+
+// glibc has traditionally implemented two incompatible versions of
+// strerror_r(). There is a poorly defined convention for picking the
+// version that we want, but it is not clear whether it even works with
+// all versions of glibc.
+// So, instead, we provide this wrapper that automatically detects the
+// version that is in use, and then implements POSIX semantics.
+// N.B. In addition to what POSIX says, we also guarantee that "buf" will
+// be set to an empty string, if this function failed. This means, in most
+// cases, you do not need to check the error code and you can directly
+// use the value of "buf". It will never have an undefined value.
+GOOGLE_GLOG_DLL_DECL int posix_strerror_r(int err, char *buf, size_t len);
+
+
+// A class for which we define operator<<, which does nothing.
+class GOOGLE_GLOG_DLL_DECL NullStream : public LogMessage::LogStream {
+ public:
+ // Initialize the LogStream so the messages can be written somewhere
+ // (they'll never be actually displayed). This will be needed if a
+ // NullStream& is implicitly converted to LogStream&, in which case
+ // the overloaded NullStream::operator<< will not be invoked.
+ NullStream() : LogMessage::LogStream(message_buffer_, 1, 0) { }
+ NullStream(const char* /*file*/, int /*line*/,
+ const CheckOpString& /*result*/) :
+ LogMessage::LogStream(message_buffer_, 1, 0) { }
+ NullStream &stream() { return *this; }
+ private:
+ // A very short buffer for messages (which we discard anyway). This
+ // will be needed if NullStream& converted to LogStream& (e.g. as a
+ // result of a conditional expression).
+ char message_buffer_[2];
+};
+
+// Do nothing. This operator is inline, allowing the message to be
+// compiled away. The message will not be compiled away if we do
+// something like (flag ? LOG(INFO) : LOG(ERROR)) << message; when
+// SKIP_LOG=WARNING. In those cases, NullStream will be implicitly
+// converted to LogStream and the message will be computed and then
+// quietly discarded.
+template<class T>
+inline NullStream& operator<<(NullStream &str, const T &value) { return str; }
+
+// Similar to NullStream, but aborts the program (without stack
+// trace), like LogMessageFatal.
+class GOOGLE_GLOG_DLL_DECL NullStreamFatal : public NullStream {
+ public:
+ NullStreamFatal() { }
+ NullStreamFatal(const char* file, int line, const CheckOpString& result) :
+ NullStream(file, line, result) { }
+ ~NullStreamFatal() { _exit(1); }
+};
+
+// Install a signal handler that will dump signal information and a stack
+// trace when the program crashes on certain signals. We'll install the
+// signal handler for the following signals.
+//
+// SIGSEGV, SIGILL, SIGFPE, SIGABRT, SIGBUS, and SIGTERM.
+//
+// By default, the signal handler will write the failure dump to the
+// standard error. You can customize the destination by installing your
+// own writer function by InstallFailureWriter() below.
+//
+// Note on threading:
+//
+// The function should be called before threads are created, if you want
+// to use the failure signal handler for all threads. The stack trace
+// will be shown only for the thread that receives the signal. In other
+// words, stack traces of other threads won't be shown.
+GOOGLE_GLOG_DLL_DECL void InstallFailureSignalHandler();
+
+// Installs a function that is used for writing the failure dump. "data"
+// is the pointer to the beginning of a message to be written, and "size"
+// is the size of the message. You should not expect the data is
+// terminated with '\0'.
+GOOGLE_GLOG_DLL_DECL void InstallFailureWriter(
+ void (*writer)(const char* data, int size));
+
+}
+
+#endif // _LOGGING_H_
diff --git a/extern/libmv/third_party/glog/src/windows/glog/raw_logging.h b/extern/libmv/third_party/glog/src/windows/glog/raw_logging.h
new file mode 100644
index 00000000000..c81e67bf99c
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/windows/glog/raw_logging.h
@@ -0,0 +1,189 @@
+// This file is automatically generated from src/glog/raw_logging.h.in
+// using src/windows/preprocess.sh.
+// DO NOT EDIT!
+
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Maxim Lifantsev
+//
+// Thread-safe logging routines that do not allocate any memory or
+// acquire any locks, and can therefore be used by low-level memory
+// allocation and synchronization code.
+
+#ifndef BASE_RAW_LOGGING_H_
+#define BASE_RAW_LOGGING_H_
+
+#include <time.h>
+
+namespace google {
+
+#include "glog/log_severity.h"
+#include "glog/vlog_is_on.h"
+
+// Annoying stuff for windows -- makes sure clients can import these functions
+#ifndef GOOGLE_GLOG_DLL_DECL
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# define GOOGLE_GLOG_DLL_DECL __declspec(dllimport)
+# else
+# define GOOGLE_GLOG_DLL_DECL
+# endif
+#endif
+
+// This is similar to LOG(severity) << format... and VLOG(level) << format..,
+// but
+// * it is to be used ONLY by low-level modules that can't use normal LOG()
+// * it is desiged to be a low-level logger that does not allocate any
+// memory and does not need any locks, hence:
+// * it logs straight and ONLY to STDERR w/o buffering
+// * it uses an explicit format and arguments list
+// * it will silently chop off really long message strings
+// Usage example:
+// RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
+// RAW_VLOG(3, "status is %i", status);
+// These will print an almost standard log lines like this to stderr only:
+// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
+// I0821 211317 file.cc:142] RAW: status is 20
+#define RAW_LOG(severity, ...) \
+ do { \
+ switch (google::severity) { \
+ case 0: \
+ RAW_LOG_INFO(__VA_ARGS__); \
+ break; \
+ case 1: \
+ RAW_LOG_WARNING(__VA_ARGS__); \
+ break; \
+ case 2: \
+ RAW_LOG_ERROR(__VA_ARGS__); \
+ break; \
+ case 3: \
+ RAW_LOG_FATAL(__VA_ARGS__); \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+// The following STRIP_LOG testing is performed in the header file so that it's
+// possible to completely compile out the logging code and the log messages.
+#if STRIP_LOG == 0
+#define RAW_VLOG(verboselevel, ...) \
+ do { \
+ if (VLOG_IS_ON(verboselevel)) { \
+ RAW_LOG_INFO(__VA_ARGS__); \
+ } \
+ } while (0)
+#else
+#define RAW_VLOG(verboselevel, ...) RawLogStub__(0, __VA_ARGS__)
+#endif // STRIP_LOG == 0
+
+#if STRIP_LOG == 0
+#define RAW_LOG_INFO(...) google::RawLog__(google::INFO, \
+ __FILE__, __LINE__, __VA_ARGS__)
+#else
+#define RAW_LOG_INFO(...) google::RawLogStub__(0, __VA_ARGS__)
+#endif // STRIP_LOG == 0
+
+#if STRIP_LOG <= 1
+#define RAW_LOG_WARNING(...) google::RawLog__(google::WARNING, \
+ __FILE__, __LINE__, __VA_ARGS__)
+#else
+#define RAW_LOG_WARNING(...) google::RawLogStub__(0, __VA_ARGS__)
+#endif // STRIP_LOG <= 1
+
+#if STRIP_LOG <= 2
+#define RAW_LOG_ERROR(...) google::RawLog__(google::ERROR, \
+ __FILE__, __LINE__, __VA_ARGS__)
+#else
+#define RAW_LOG_ERROR(...) google::RawLogStub__(0, __VA_ARGS__)
+#endif // STRIP_LOG <= 2
+
+#if STRIP_LOG <= 3
+#define RAW_LOG_FATAL(...) google::RawLog__(google::FATAL, \
+ __FILE__, __LINE__, __VA_ARGS__)
+#else
+#define RAW_LOG_FATAL(...) \
+ do { \
+ google::RawLogStub__(0, __VA_ARGS__); \
+ exit(1); \
+ } while (0)
+#endif // STRIP_LOG <= 3
+
+// Similar to CHECK(condition) << message,
+// but for low-level modules: we use only RAW_LOG that does not allocate memory.
+// We do not want to provide args list here to encourage this usage:
+// if (!cond) RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
+// so that the args are not computed when not needed.
+#define RAW_CHECK(condition, message) \
+ do { \
+ if (!(condition)) { \
+ RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
+ } \
+ } while (0)
+
+// Debug versions of RAW_LOG and RAW_CHECK
+#ifndef NDEBUG
+
+#define RAW_DLOG(severity, ...) RAW_LOG(severity, __VA_ARGS__)
+#define RAW_DCHECK(condition, message) RAW_CHECK(condition, message)
+
+#else // NDEBUG
+
+#define RAW_DLOG(severity, ...) \
+ while (false) \
+ RAW_LOG(severity, __VA_ARGS__)
+#define RAW_DCHECK(condition, message) \
+ while (false) \
+ RAW_CHECK(condition, message)
+
+#endif // NDEBUG
+
+// Stub log function used to work around for unused variable warnings when
+// building with STRIP_LOG > 0.
+static inline void RawLogStub__(int ignored, ...) {
+}
+
+// Helper function to implement RAW_LOG and RAW_VLOG
+// Logs format... at "severity" level, reporting it
+// as called from file:line.
+// This does not allocate memory or acquire locks.
+GOOGLE_GLOG_DLL_DECL void RawLog__(LogSeverity severity,
+ const char* file,
+ int line,
+ const char* format, ...)
+ ;
+
+// Hack to propagate time information into this module so that
+// this module does not have to directly call localtime_r(),
+// which could allocate memory.
+GOOGLE_GLOG_DLL_DECL void RawLog__SetLastTime(const struct tm& t, int usecs);
+
+}
+
+#endif // BASE_RAW_LOGGING_H_
diff --git a/extern/libmv/third_party/glog/src/windows/glog/vlog_is_on.h b/extern/libmv/third_party/glog/src/windows/glog/vlog_is_on.h
new file mode 100644
index 00000000000..409a4011b38
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/windows/glog/vlog_is_on.h
@@ -0,0 +1,133 @@
+// This file is automatically generated from src/glog/vlog_is_on.h.in
+// using src/windows/preprocess.sh.
+// DO NOT EDIT!
+
+// Copyright (c) 1999, 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Ray Sidney and many others
+//
+// Defines the VLOG_IS_ON macro that controls the variable-verbosity
+// conditional logging.
+//
+// It's used by VLOG and VLOG_IF in logging.h
+// and by RAW_VLOG in raw_logging.h to trigger the logging.
+//
+// It can also be used directly e.g. like this:
+// if (VLOG_IS_ON(2)) {
+// // do some logging preparation and logging
+// // that can't be accomplished e.g. via just VLOG(2) << ...;
+// }
+//
+// The truth value that VLOG_IS_ON(level) returns is determined by
+// the three verbosity level flags:
+// --v=<n> Gives the default maximal active V-logging level;
+// 0 is the default.
+// Normally positive values are used for V-logging levels.
+// --vmodule=<str> Gives the per-module maximal V-logging levels to override
+// the value given by --v.
+// E.g. "my_module=2,foo*=3" would change the logging level
+// for all code in source files "my_module.*" and "foo*.*"
+// ("-inl" suffixes are also disregarded for this matching).
+//
+// SetVLOGLevel helper function is provided to do limited dynamic control over
+// V-logging by overriding the per-module settings given via --vmodule flag.
+//
+// CAVEAT: --vmodule functionality is not available in non gcc compilers.
+//
+
+#ifndef BASE_VLOG_IS_ON_H_
+#define BASE_VLOG_IS_ON_H_
+
+#include "glog/log_severity.h"
+
+// Annoying stuff for windows -- makes sure clients can import these functions
+#ifndef GOOGLE_GLOG_DLL_DECL
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# define GOOGLE_GLOG_DLL_DECL __declspec(dllimport)
+# else
+# define GOOGLE_GLOG_DLL_DECL
+# endif
+#endif
+
+#if defined(__GNUC__)
+// We emit an anonymous static int* variable at every VLOG_IS_ON(n) site.
+// (Normally) the first time every VLOG_IS_ON(n) site is hit,
+// we determine what variable will dynamically control logging at this site:
+// it's either FLAGS_v or an appropriate internal variable
+// matching the current source file that represents results of
+// parsing of --vmodule flag and/or SetVLOGLevel calls.
+#define VLOG_IS_ON(verboselevel) \
+ __extension__ \
+ ({ static google::int32* vlocal__ = &google::kLogSiteUninitialized; \
+ google::int32 verbose_level__ = (verboselevel); \
+ (*vlocal__ >= verbose_level__) && \
+ ((vlocal__ != &google::kLogSiteUninitialized) || \
+ (google::InitVLOG3__(&vlocal__, &FLAGS_v, \
+ __FILE__, verbose_level__))); })
+#else
+// GNU extensions not available, so we do not support --vmodule.
+// Dynamic value of FLAGS_v always controls the logging level.
+#define VLOG_IS_ON(verboselevel) (FLAGS_v >= (verboselevel))
+#endif
+
+// Set VLOG(_IS_ON) level for module_pattern to log_level.
+// This lets us dynamically control what is normally set by the --vmodule flag.
+// Returns the level that previously applied to module_pattern.
+// NOTE: To change the log level for VLOG(_IS_ON) sites
+// that have already executed after/during InitGoogleLogging,
+// one needs to supply the exact --vmodule pattern that applied to them.
+// (If no --vmodule pattern applied to them
+// the value of FLAGS_v will continue to control them.)
+extern GOOGLE_GLOG_DLL_DECL int SetVLOGLevel(const char* module_pattern,
+ int log_level);
+
+// Various declarations needed for VLOG_IS_ON above: =========================
+
+// Special value used to indicate that a VLOG_IS_ON site has not been
+// initialized. We make this a large value, so the common-case check
+// of "*vlocal__ >= verbose_level__" in VLOG_IS_ON definition
+// passes in such cases and InitVLOG3__ is then triggered.
+extern google::int32 kLogSiteUninitialized;
+
+// Helper routine which determines the logging info for a particalur VLOG site.
+// site_flag is the address of the site-local pointer to the controlling
+// verbosity level
+// site_default is the default to use for *site_flag
+// fname is the current source file name
+// verbose_level is the argument to VLOG_IS_ON
+// We will return the return value for VLOG_IS_ON
+// and if possible set *site_flag appropriately.
+extern GOOGLE_GLOG_DLL_DECL bool InitVLOG3__(
+ google::int32** site_flag,
+ google::int32* site_default,
+ const char* fname,
+ google::int32 verbose_level);
+
+#endif // BASE_VLOG_IS_ON_H_
diff --git a/extern/libmv/third_party/glog/src/windows/port.cc b/extern/libmv/third_party/glog/src/windows/port.cc
new file mode 100644
index 00000000000..bfa6e70afbb
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/windows/port.cc
@@ -0,0 +1,64 @@
+/* Copyright (c) 2008, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ---
+ * Author: Craig Silverstein
+ * Copied from google-perftools and modified by Shinichiro Hamaji
+ */
+
+#ifndef _WIN32
+# error You should only be including windows/port.cc in a windows environment!
+#endif
+
+#include "config.h"
+#include <stdarg.h> // for va_list, va_start, va_end
+#include <string.h> // for strstr()
+#include <assert.h>
+#include <string>
+#include <vector>
+#include "port.h"
+
+using std::string;
+using std::vector;
+
+// These call the windows _vsnprintf, but always NUL-terminate.
+int safe_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
+ if (size == 0) // not even room for a \0?
+ return -1; // not what C99 says to do, but what windows does
+ str[size-1] = '\0';
+ return _vsnprintf(str, size-1, format, ap);
+}
+
+int snprintf(char *str, size_t size, const char *format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ const int r = vsnprintf(str, size, format, ap);
+ va_end(ap);
+ return r;
+}
diff --git a/extern/libmv/third_party/glog/src/windows/port.h b/extern/libmv/third_party/glog/src/windows/port.h
new file mode 100644
index 00000000000..d093bf5d34c
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/windows/port.h
@@ -0,0 +1,149 @@
+/* Copyright (c) 2008, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ---
+ * Author: Craig Silverstein
+ * Copied from google-perftools and modified by Shinichiro Hamaji
+ *
+ * These are some portability typedefs and defines to make it a bit
+ * easier to compile this code under VC++.
+ *
+ * Several of these are taken from glib:
+ * http://developer.gnome.org/doc/API/glib/glib-windows-compatability-functions.html
+ */
+
+#ifndef CTEMPLATE_WINDOWS_PORT_H_
+#define CTEMPLATE_WINDOWS_PORT_H_
+
+#include "config.h"
+
+#ifdef _WIN32
+
+#define WIN32_LEAN_AND_MEAN /* We always want minimal includes */
+#include <windows.h>
+#include <winsock.h> /* for gethostname */
+#include <io.h> /* because we so often use open/close/etc */
+#include <direct.h> /* for _getcwd() */
+#include <process.h> /* for _getpid() */
+#include <stdio.h> /* read in vsnprintf decl. before redifining it */
+#include <stdarg.h> /* template_dictionary.cc uses va_copy */
+#include <string.h> /* for _strnicmp(), strerror_s() */
+#include <time.h> /* for localtime_s() */
+/* Note: the C++ #includes are all together at the bottom. This file is
+ * used by both C and C++ code, so we put all the C++ together.
+ */
+
+/* 4244: otherwise we get problems when substracting two size_t's to an int
+ * 4251: it's complaining about a private struct I've chosen not to dllexport
+ * 4355: we use this in a constructor, but we do it safely
+ * 4715: for some reason VC++ stopped realizing you can't return after abort()
+ * 4800: we know we're casting ints/char*'s to bools, and we're ok with that
+ * 4996: Yes, we're ok using "unsafe" functions like fopen() and strerror()
+ */
+#pragma warning(disable:4244 4251 4355 4715 4800 4996)
+
+/* file I/O */
+#define PATH_MAX 1024
+#define access _access
+#define getcwd _getcwd
+#define open _open
+#define read _read
+#define write _write
+#define lseek _lseek
+#define close _close
+#define popen _popen
+#define pclose _pclose
+#define R_OK 04 /* read-only (for access()) */
+#define S_ISDIR(m) (((m) & _S_IFMT) == _S_IFDIR)
+#ifndef __MINGW32__
+enum { STDIN_FILENO = 0, STDOUT_FILENO = 1, STDERR_FILENO = 2 };
+#endif
+#define S_IRUSR S_IREAD
+#define S_IWUSR S_IWRITE
+
+/* Not quite as lightweight as a hard-link, but more than good enough for us. */
+#define link(oldpath, newpath) CopyFileA(oldpath, newpath, false)
+
+#define strcasecmp _stricmp
+#define strncasecmp _strnicmp
+
+/* In windows-land, hash<> is called hash_compare<> (from xhash.h) */
+#define hash hash_compare
+
+/* Sleep is in ms, on windows */
+#define sleep(secs) Sleep((secs) * 1000)
+
+/* We can't just use _vsnprintf and _snprintf as drop-in-replacements,
+ * because they don't always NUL-terminate. :-( We also can't use the
+ * name vsnprintf, since windows defines that (but not snprintf (!)).
+ */
+extern int snprintf(char *str, size_t size,
+ const char *format, ...);
+extern int safe_vsnprintf(char *str, size_t size,
+ const char *format, va_list ap);
+#define vsnprintf(str, size, format, ap) safe_vsnprintf(str, size, format, ap)
+#define va_copy(dst, src) (dst) = (src)
+
+/* Windows doesn't support specifying the number of buckets as a
+ * hash_map constructor arg, so we leave this blank.
+ */
+#define CTEMPLATE_SMALL_HASHTABLE
+
+#define DEFAULT_TEMPLATE_ROOTDIR ".."
+
+// ----------------------------------- SYSTEM/PROCESS
+typedef int pid_t;
+#define getpid _getpid
+
+// ----------------------------------- THREADS
+typedef DWORD pthread_t;
+typedef DWORD pthread_key_t;
+typedef LONG pthread_once_t;
+enum { PTHREAD_ONCE_INIT = 0 }; // important that this be 0! for SpinLock
+#define pthread_self GetCurrentThreadId
+#define pthread_equal(pthread_t_1, pthread_t_2) ((pthread_t_1)==(pthread_t_2))
+
+inline struct tm* localtime_r(const time_t* timep, struct tm* result) {
+ localtime_s(result, timep);
+ return result;
+}
+
+inline char* strerror_r(int errnum, char* buf, size_t buflen) {
+ strerror_s(buf, buflen, errnum);
+ return buf;
+}
+
+#ifndef __cplusplus
+/* I don't see how to get inlining for C code in MSVC. Ah well. */
+#define inline
+#endif
+
+#endif /* _WIN32 */
+
+#endif /* CTEMPLATE_WINDOWS_PORT_H_ */
diff --git a/extern/libmv/third_party/glog/src/windows/preprocess.sh b/extern/libmv/third_party/glog/src/windows/preprocess.sh
new file mode 100755
index 00000000000..ea4352e8e3a
--- /dev/null
+++ b/extern/libmv/third_party/glog/src/windows/preprocess.sh
@@ -0,0 +1,118 @@
+#!/bin/sh
+
+# Copyright (c) 2008, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# ---
+# Author: Craig Silverstein
+# Copied from google-perftools and modified by Shinichiro Hamaji
+#
+# This script is meant to be run at distribution-generation time, for
+# instance by autogen.sh. It does some of the work configure would
+# normally do, for windows systems. In particular, it expands all the
+# @...@ variables found in .in files, and puts them here, in the windows
+# directory.
+#
+# This script should be run before any new release.
+
+if [ -z "$1" ]; then
+ echo "USAGE: $0 <src/ directory>"
+ exit 1
+fi
+
+DLLDEF_MACRO_NAME="GLOG_DLL_DECL"
+
+# The text we put in every .h files we create. As a courtesy, we'll
+# include a helpful comment for windows users as to how to use
+# GLOG_DLL_DECL. Apparently sed expands \n into a newline. Good!
+DLLDEF_DEFINES="\
+// NOTE: if you are statically linking the template library into your binary\n\
+// (rather than using the template .dll), set '/D $DLLDEF_MACRO_NAME='\n\
+// as a compiler flag in your project file to turn off the dllimports.\n\
+#ifndef $DLLDEF_MACRO_NAME\n\
+# define $DLLDEF_MACRO_NAME __declspec(dllimport)\n\
+#endif"
+
+# Read all the windows config info into variables
+# In order for the 'set' to take, this requires putting all in a subshell.
+(
+ while read define varname value; do
+ [ "$define" != "#define" ] && continue
+ eval "$varname='$value'"
+ done
+
+ # Process all the .in files in the "glog" subdirectory
+ mkdir -p "$1/windows/glog"
+ for file in `echo "$1"/glog/*.in`; do
+ echo "Processing $file"
+ outfile="$1/windows/glog/`basename $file .in`"
+
+ echo "\
+// This file is automatically generated from $file
+// using src/windows/preprocess.sh.
+// DO NOT EDIT!
+" > "$outfile"
+ # Besides replacing @...@, we also need to turn on dllimport
+ # We also need to replace hash by hash_compare (annoying we hard-code :-( )
+ sed -e "s!@ac_windows_dllexport@!$DLLDEF_MACRO_NAME!g" \
+ -e "s!@ac_windows_dllexport_defines@!$DLLDEF_DEFINES!g" \
+ -e "s!@ac_cv_cxx_hash_map@!$HASH_MAP_H!g" \
+ -e "s!@ac_cv_cxx_hash_namespace@!$HASH_NAMESPACE!g" \
+ -e "s!@ac_cv_cxx_hash_set@!$HASH_SET_H!g" \
+ -e "s!@ac_cv_have_stdint_h@!0!g" \
+ -e "s!@ac_cv_have_systypes_h@!0!g" \
+ -e "s!@ac_cv_have_inttypes_h@!0!g" \
+ -e "s!@ac_cv_have_unistd_h@!0!g" \
+ -e "s!@ac_cv_have_uint16_t@!0!g" \
+ -e "s!@ac_cv_have_u_int16_t@!0!g" \
+ -e "s!@ac_cv_have___uint16@!1!g" \
+ -e "s!@ac_cv_have_libgflags@!0!g" \
+ -e "s!@ac_cv_have___builtin_expect@!0!g" \
+ -e "s!@ac_cv_cxx_using_operator@!1!g" \
+ -e "s!@ac_cv___attribute___noreturn@!!g" \
+ -e "s!@ac_cv___attribute___printf_4_5@!!g" \
+ -e "s!@ac_google_attribute@!${HAVE___ATTRIBUTE__:-0}!g" \
+ -e "s!@ac_google_end_namespace@!$_END_GOOGLE_NAMESPACE_!g" \
+ -e "s!@ac_google_namespace@!$GOOGLE_NAMESPACE!g" \
+ -e "s!@ac_google_start_namespace@!$_START_GOOGLE_NAMESPACE_!g" \
+ -e "s!@ac_htmlparser_namespace@!$HTMLPARSER_NAMESPACE!g" \
+ -e "s!\\bhash\\b!hash_compare!g" \
+ "$file" >> "$outfile"
+ done
+) < "$1/windows/config.h"
+
+# log_severity.h isn't a .in file.
+echo "\
+// This file is automatically generated from $1/glog/log_severity.h
+// using src/windows/preprocess.sh.
+// DO NOT EDIT!
+" > "$1/windows/glog/log_severity.h"
+cat "$1/glog/log_severity.h" >> "$1/windows/glog/log_severity.h"
+
+echo "DONE"
diff --git a/extern/libmv/third_party/ldl/CMakeLists.txt b/extern/libmv/third_party/ldl/CMakeLists.txt
new file mode 100644
index 00000000000..db2d40e2612
--- /dev/null
+++ b/extern/libmv/third_party/ldl/CMakeLists.txt
@@ -0,0 +1,5 @@
+include_directories(../ufconfig)
+include_directories(Include)
+add_library(ldl Source/ldl.c)
+
+LIBMV_INSTALL_THIRD_PARTY_LIB(ldl)
diff --git a/extern/libmv/third_party/ldl/Doc/ChangeLog b/extern/libmv/third_party/ldl/Doc/ChangeLog
new file mode 100644
index 00000000000..48c322d3e77
--- /dev/null
+++ b/extern/libmv/third_party/ldl/Doc/ChangeLog
@@ -0,0 +1,39 @@
+May 31, 2007: version 2.0.0
+
+ * C-callable 64-bit version added
+
+ * ported to 64-bit MATLAB
+
+ * subdirectories added (Source/, Include/, Lib/, Demo/, Doc/, MATLAB/)
+
+Dec 12, 2006: version 1.3.4
+
+ * minor MATLAB cleanup
+
+Sept 11, 2006: version 1.3.1
+
+ * The ldl m-file renamed to ldlsparse, to avoid name conflict with the
+ new MATLAB ldl function (in MATLAB 7.3).
+
+Apr 30, 2006: version 1.3
+
+ * requires AMD v2.0. ldlmain.c demo program modified, since AMD can now
+ handle jumbled matrices. Minor change to Makefile.
+
+Aug 30, 2005:
+
+ * Makefile changed to use ../UFconfig/UFconfig.mk. License changed to
+ GNU LGPL.
+
+July 4, 2005:
+
+ * user guide added. Since no changes to the code were made,
+ the version number (1.1) and code release date (Apr 22, 2005)
+ were left unchanged.
+
+Apr. 22, 2005: LDL v1.1 released.
+
+ * No real changes were made. The code was revised so
+ that each routine fits on a single page in the documentation.
+
+Dec 31, 2003: LDL v1.0 released.
diff --git a/extern/libmv/third_party/ldl/Doc/lesser.txt b/extern/libmv/third_party/ldl/Doc/lesser.txt
new file mode 100644
index 00000000000..8add30ad590
--- /dev/null
+++ b/extern/libmv/third_party/ldl/Doc/lesser.txt
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/extern/libmv/third_party/ldl/Include/ldl.h b/extern/libmv/third_party/ldl/Include/ldl.h
new file mode 100644
index 00000000000..5840be322f7
--- /dev/null
+++ b/extern/libmv/third_party/ldl/Include/ldl.h
@@ -0,0 +1,104 @@
+/* ========================================================================== */
+/* === ldl.h: include file for the LDL package ============================= */
+/* ========================================================================== */
+
+/* LDL Copyright (c) Timothy A Davis,
+ * University of Florida. All Rights Reserved. See README for the License.
+ */
+
+#include "UFconfig.h"
+
+#ifdef LDL_LONG
+#define LDL_int UF_long
+#define LDL_ID UF_long_id
+
+#define LDL_symbolic ldl_l_symbolic
+#define LDL_numeric ldl_l_numeric
+#define LDL_lsolve ldl_l_lsolve
+#define LDL_dsolve ldl_l_dsolve
+#define LDL_ltsolve ldl_l_ltsolve
+#define LDL_perm ldl_l_perm
+#define LDL_permt ldl_l_permt
+#define LDL_valid_perm ldl_l_valid_perm
+#define LDL_valid_matrix ldl_l_valid_matrix
+
+#else
+#define LDL_int int
+#define LDL_ID "%d"
+
+#define LDL_symbolic ldl_symbolic
+#define LDL_numeric ldl_numeric
+#define LDL_lsolve ldl_lsolve
+#define LDL_dsolve ldl_dsolve
+#define LDL_ltsolve ldl_ltsolve
+#define LDL_perm ldl_perm
+#define LDL_permt ldl_permt
+#define LDL_valid_perm ldl_valid_perm
+#define LDL_valid_matrix ldl_valid_matrix
+
+#endif
+
+/* ========================================================================== */
+/* === int version ========================================================== */
+/* ========================================================================== */
+
+void ldl_symbolic (int n, int Ap [ ], int Ai [ ], int Lp [ ],
+ int Parent [ ], int Lnz [ ], int Flag [ ], int P [ ], int Pinv [ ]) ;
+
+int ldl_numeric (int n, int Ap [ ], int Ai [ ], double Ax [ ],
+ int Lp [ ], int Parent [ ], int Lnz [ ], int Li [ ], double Lx [ ],
+ double D [ ], double Y [ ], int Pattern [ ], int Flag [ ],
+ int P [ ], int Pinv [ ]) ;
+
+void ldl_lsolve (int n, double X [ ], int Lp [ ], int Li [ ],
+ double Lx [ ]) ;
+
+void ldl_dsolve (int n, double X [ ], double D [ ]) ;
+
+void ldl_ltsolve (int n, double X [ ], int Lp [ ], int Li [ ],
+ double Lx [ ]) ;
+
+void ldl_perm (int n, double X [ ], double B [ ], int P [ ]) ;
+void ldl_permt (int n, double X [ ], double B [ ], int P [ ]) ;
+
+int ldl_valid_perm (int n, int P [ ], int Flag [ ]) ;
+int ldl_valid_matrix ( int n, int Ap [ ], int Ai [ ]) ;
+
+/* ========================================================================== */
+/* === long version ========================================================= */
+/* ========================================================================== */
+
+void ldl_l_symbolic (UF_long n, UF_long Ap [ ], UF_long Ai [ ], UF_long Lp [ ],
+ UF_long Parent [ ], UF_long Lnz [ ], UF_long Flag [ ], UF_long P [ ],
+ UF_long Pinv [ ]) ;
+
+UF_long ldl_l_numeric (UF_long n, UF_long Ap [ ], UF_long Ai [ ], double Ax [ ],
+ UF_long Lp [ ], UF_long Parent [ ], UF_long Lnz [ ], UF_long Li [ ],
+ double Lx [ ], double D [ ], double Y [ ], UF_long Pattern [ ],
+ UF_long Flag [ ], UF_long P [ ], UF_long Pinv [ ]) ;
+
+void ldl_l_lsolve (UF_long n, double X [ ], UF_long Lp [ ], UF_long Li [ ],
+ double Lx [ ]) ;
+
+void ldl_l_dsolve (UF_long n, double X [ ], double D [ ]) ;
+
+void ldl_l_ltsolve (UF_long n, double X [ ], UF_long Lp [ ], UF_long Li [ ],
+ double Lx [ ]) ;
+
+void ldl_l_perm (UF_long n, double X [ ], double B [ ], UF_long P [ ]) ;
+void ldl_l_permt (UF_long n, double X [ ], double B [ ], UF_long P [ ]) ;
+
+UF_long ldl_l_valid_perm (UF_long n, UF_long P [ ], UF_long Flag [ ]) ;
+UF_long ldl_l_valid_matrix ( UF_long n, UF_long Ap [ ], UF_long Ai [ ]) ;
+
+/* ========================================================================== */
+/* === LDL version ========================================================== */
+/* ========================================================================== */
+
+#define LDL_DATE "Nov 1, 2007"
+#define LDL_VERSION_CODE(main,sub) ((main) * 1000 + (sub))
+#define LDL_MAIN_VERSION 2
+#define LDL_SUB_VERSION 0
+#define LDL_SUBSUB_VERSION 1
+#define LDL_VERSION LDL_VERSION_CODE(LDL_MAIN_VERSION,LDL_SUB_VERSION)
+
diff --git a/extern/libmv/third_party/ldl/README.libmv b/extern/libmv/third_party/ldl/README.libmv
new file mode 100644
index 00000000000..64ece48a390
--- /dev/null
+++ b/extern/libmv/third_party/ldl/README.libmv
@@ -0,0 +1,10 @@
+Project: LDL
+URL: http://www.cise.ufl.edu/research/sparse/ldl/
+License: LGPL2.1
+Upstream version: 2.0.1 (despite the ChangeLog saying 2.0.0)
+
+Local modifications:
+
+ * Deleted everything except ldl.c, ldl.h, the license, the ChangeLog, and the
+ README.
+
diff --git a/extern/libmv/third_party/ldl/README.txt b/extern/libmv/third_party/ldl/README.txt
new file mode 100644
index 00000000000..7be8dd1f001
--- /dev/null
+++ b/extern/libmv/third_party/ldl/README.txt
@@ -0,0 +1,136 @@
+LDL Version 2.0: a sparse LDL' factorization and solve package.
+ Written in C, with both a C and MATLAB mexFunction interface.
+
+These routines are not terrifically fast (they do not use dense matrix kernels),
+but the code is very short and concise. The purpose is to illustrate the
+algorithms in a very concise and readable manner, primarily for educational
+purposes. Although the code is very concise, this package is slightly faster
+than the built-in sparse Cholesky factorization in MATLAB 6.5 (chol), when
+using the same input permutation.
+
+Requires UFconfig, in the ../UFconfig directory relative to this directory.
+
+Quick start (Unix, or Windows with Cygwin):
+
+ To compile, test, and install LDL, you may wish to first obtain a copy of
+ AMD v2.0 from http://www.cise.ufl.edu/research/sparse, and place it in the
+ ../AMD directory, relative to this directory. Next, type "make", which
+ will compile the LDL library and three demo main programs (one of which
+ requires AMD). It will also compile the LDL MATLAB mexFunction (if you
+ have MATLAB). Typing "make clean" will remove non-essential files.
+ AMD v2.0 or later is required. Its use is optional.
+
+Quick start (for MATLAB users);
+
+ To compile, test, and install the LDL mexFunctions (ldlsparse and
+ ldlsymbol), start MATLAB in this directory and type ldl_install.
+ This works on any system supported by MATLAB.
+
+--------------------------------------------------------------------------------
+
+LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
+
+LDL License:
+
+ Your use or distribution of LDL or any modified version of
+ LDL implies that you agree to this License.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
+ USA
+
+ Permission is hereby granted to use or copy this program under the
+ terms of the GNU LGPL, provided that the Copyright, this License,
+ and the Availability of the original version is retained on all copies.
+ User documentation of any code that uses this code or any modified
+ version of this code must cite the Copyright, this License, the
+ Availability note, and "Used by permission." Permission to modify
+ the code and to distribute modified code is granted, provided the
+ Copyright, this License, and the Availability note are retained,
+ and a notice that the code was modified is included.
+
+Availability:
+
+ http://www.cise.ufl.edu/research/sparse/ldl
+
+Acknowledgements:
+
+ This work was supported by the National Science Foundation, under
+ grant CCR-0203270.
+
+ Portions of this work were done while on sabbatical at Stanford University
+ and Lawrence Berkeley National Laboratory (with funding from the SciDAC
+ program). I would like to thank Gene Golub, Esmond Ng, and Horst Simon
+ for making this sabbatical possible. I would like to thank Pete Stewart
+ for his comments on a draft of this software and paper.
+
+--------------------------------------------------------------------------------
+Files and directories in this distribution:
+--------------------------------------------------------------------------------
+
+ Documentation, and compiling:
+
+ README.txt this file
+ Makefile for compiling LDL
+ ChangeLog changes since V1.0 (Dec 31, 2003)
+ License license
+ lesser.txt the GNU LGPL license
+
+ ldl_userguide.pdf user guide in PDF
+ ldl_userguide.ps user guide in postscript
+ ldl_userguide.tex user guide in Latex
+ ldl.bib bibliography for user guide
+
+ The LDL library itself:
+
+ ldl.c the C-callable routines
+ ldl.h include file for any code that calls LDL
+
+ A simple C main program that demonstrates how to use LDL:
+
+ ldlsimple.c a stand-alone C program, uses the basic features of LDL
+ ldlsimple.out output of ldlsimple
+
+ ldllsimple.c long integer version of ldlsimple.c
+
+ Demo C program, for testing LDL and providing an example of its use
+
+ ldlmain.c a stand-alone C main program that uses and tests LDL
+ Matrix a directory containing matrices used by ldlmain.c
+ ldlmain.out output of ldlmain
+ ldlamd.out output of ldlamd (ldlmain.c compiled with AMD)
+ ldllamd.out output of ldllamd (ldlmain.c compiled with AMD, long)
+
+ MATLAB-related, not required for use in a regular C program
+
+ Contents.m a list of the MATLAB-callable routines
+ ldl.m MATLAB help file for the LDL mexFunction
+ ldldemo.m MATLAB demo of how to use the LDL mexFunction
+ ldldemo.out diary output of ldldemo
+ ldltest.m to test the LDL mexFunction
+ ldltest.out diary output of ldltest
+ ldlmex.c the LDL mexFunction for MATLAB
+ ldlrow.m the numerical algorithm that LDL is based on
+ ldlmain2.m compiles and runs ldlmain.c as a MATLAB mexFunction
+ ldlmain2.out output of ldlmain2.m
+ ldlsymbolmex.c symbolic factorization using LDL (see SYMBFACT, ETREE)
+ ldlsymbol.m help file for the LDLSYMBOL mexFunction
+
+ ldl_install.m compile, install, and test LDL functions
+ ldl_make.m compile LDL (ldlsparse and ldlsymbol)
+
+ ldlsparse.m help for ldlsparse
+
+See ldl.c for a description of how to use the code from a C program. Type
+"help ldl" in MATLAB for information on how to use LDL in a MATLAB program.
diff --git a/extern/libmv/third_party/ldl/Source/ldl.c b/extern/libmv/third_party/ldl/Source/ldl.c
new file mode 100644
index 00000000000..a9b35c846ef
--- /dev/null
+++ b/extern/libmv/third_party/ldl/Source/ldl.c
@@ -0,0 +1,507 @@
+/* ========================================================================== */
+/* === ldl.c: sparse LDL' factorization and solve package =================== */
+/* ========================================================================== */
+
+/* LDL: a simple set of routines for sparse LDL' factorization. These routines
+ * are not terrifically fast (they do not use dense matrix kernels), but the
+ * code is very short. The purpose is to illustrate the algorithms in a very
+ * concise manner, primarily for educational purposes. Although the code is
+ * very concise, this package is slightly faster than the built-in sparse
+ * Cholesky factorization in MATLAB 7.0 (chol), when using the same input
+ * permutation.
+ *
+ * The routines compute the LDL' factorization of a real sparse symmetric
+ * matrix A (or PAP' if a permutation P is supplied), and solve upper
+ * and lower triangular systems with the resulting L and D factors. If A is
+ * positive definite then the factorization will be accurate. A can be
+ * indefinite (with negative values on the diagonal D), but in this case no
+ * guarantee of accuracy is provided, since no numeric pivoting is performed.
+ *
+ * The n-by-n sparse matrix A is in compressed-column form. The nonzero values
+ * in column j are stored in Ax [Ap [j] ... Ap [j+1]-1], with corresponding row
+ * indices in Ai [Ap [j] ... Ap [j+1]-1]. Ap [0] = 0 is required, and thus
+ * nz = Ap [n] is the number of nonzeros in A. Ap is an int array of size n+1.
+ * The int array Ai and the double array Ax are of size nz. This data structure
+ * is identical to the one used by MATLAB, except for the following
+ * generalizations. The row indices in each column of A need not be in any
+ * particular order, although they must be in the range 0 to n-1. Duplicate
+ * entries can be present; any duplicates are summed. That is, if row index i
+ * appears twice in a column j, then the value of A (i,j) is the sum of the two
+ * entries. The data structure used here for the input matrix A is more
+ * flexible than MATLAB's, which requires sorted columns with no duplicate
+ * entries.
+ *
+ * Only the diagonal and upper triangular part of A (or PAP' if a permutation
+ * P is provided) is accessed. The lower triangular parts of the matrix A or
+ * PAP' can be present, but they are ignored.
+ *
+ * The optional input permutation is provided as an array P of length n. If
+ * P [k] = j, the row and column j of A is the kth row and column of PAP'.
+ * If P is present then the factorization is LDL' = PAP' or L*D*L' = A(P,P) in
+ * 0-based MATLAB notation. If P is not present (a null pointer) then no
+ * permutation is performed, and the factorization is LDL' = A.
+ *
+ * The lower triangular matrix L is stored in the same compressed-column
+ * form (an int Lp array of size n+1, an int Li array of size Lp [n], and a
+ * double array Lx of the same size as Li). It has a unit diagonal, which is
+ * not stored. The row indices in each column of L are always returned in
+ * ascending order, with no duplicate entries. This format is compatible with
+ * MATLAB, except that it would be more convenient for MATLAB to include the
+ * unit diagonal of L. Doing so here would add additional complexity to the
+ * code, and is thus omitted in the interest of keeping this code short and
+ * readable.
+ *
+ * The elimination tree is held in the Parent [0..n-1] array. It is normally
+ * not required by the user, but it is required by ldl_numeric. The diagonal
+ * matrix D is held as an array D [0..n-1] of size n.
+ *
+ * --------------------
+ * C-callable routines:
+ * --------------------
+ *
+ * ldl_symbolic: Given the pattern of A, computes the Lp and Parent arrays
+ * required by ldl_numeric. Takes time proportional to the number of
+ * nonzeros in L. Computes the inverse Pinv of P if P is provided.
+ * Also returns Lnz, the count of nonzeros in each column of L below
+ * the diagonal (this is not required by ldl_numeric).
+ * ldl_numeric: Given the pattern and numerical values of A, the Lp array,
+ * the Parent array, and P and Pinv if applicable, computes the
+ * pattern and numerical values of L and D.
+ * ldl_lsolve: Solves Lx=b for a dense vector b.
+ * ldl_dsolve: Solves Dx=b for a dense vector b.
+ * ldl_ltsolve: Solves L'x=b for a dense vector b.
+ * ldl_perm: Computes x=Pb for a dense vector b.
+ * ldl_permt: Computes x=P'b for a dense vector b.
+ * ldl_valid_perm: checks the validity of a permutation vector
+ * ldl_valid_matrix: checks the validity of the sparse matrix A
+ *
+ * ----------------------------
+ * Limitations of this package:
+ * ----------------------------
+ *
+ * In the interest of keeping this code simple and readable, ldl_symbolic and
+ * ldl_numeric assume their inputs are valid. You can check your own inputs
+ * prior to calling these routines with the ldl_valid_perm and ldl_valid_matrix
+ * routines. Except for the two ldl_valid_* routines, no routine checks to see
+ * if the array arguments are present (non-NULL). Like all C routines, no
+ * routine can determine if the arrays are long enough and don't overlap.
+ *
+ * The ldl_numeric does check the numerical factorization, however. It returns
+ * n if the factorization is successful. If D (k,k) is zero, then k is
+ * returned, and L is only partially computed.
+ *
+ * No pivoting to control fill-in is performed, which is often critical for
+ * obtaining good performance. I recommend that you compute the permutation P
+ * using AMD or SYMAMD (approximate minimum degree ordering routines), or an
+ * appropriate graph-partitioning based ordering. See the ldldemo.m routine for
+ * an example in MATLAB, and the ldlmain.c stand-alone C program for examples of
+ * how to find P. Routines for manipulating compressed-column matrices are
+ * available in UMFPACK. AMD, SYMAMD, UMFPACK, and this LDL package are all
+ * available at http://www.cise.ufl.edu/research/sparse.
+ *
+ * -------------------------
+ * Possible simplifications:
+ * -------------------------
+ *
+ * These routines could be made even simpler with a few additional assumptions.
+ * If no input permutation were performed, the caller would have to permute the
+ * matrix first, but the computation of Pinv, and the use of P and Pinv could be
+ * removed. If only the diagonal and upper triangular part of A or PAP' are
+ * present, then the tests in the "if (i < k)" statement in ldl_symbolic and
+ * "if (i <= k)" in ldl_numeric, are always true, and could be removed (i can
+ * equal k in ldl_symbolic, but then the body of the if statement would
+ * correctly do no work since Flag [k] == k). If we could assume that no
+ * duplicate entries are present, then the statement Y [i] += Ax [p] could be
+ * replaced with Y [i] = Ax [p] in ldl_numeric.
+ *
+ * --------------------------
+ * Description of the method:
+ * --------------------------
+ *
+ * LDL computes the symbolic factorization by finding the pattern of L one row
+ * at a time. It does this based on the following theory. Consider a sparse
+ * system Lx=b, where L, x, and b, are all sparse, and where L comes from a
+ * Cholesky (or LDL') factorization. The elimination tree (etree) of L is
+ * defined as follows. The parent of node j is the smallest k > j such that
+ * L (k,j) is nonzero. Node j has no parent if column j of L is completely zero
+ * below the diagonal (j is a root of the etree in this case). The nonzero
+ * pattern of x is the union of the paths from each node i to the root, for
+ * each nonzero b (i). To compute the numerical solution to Lx=b, we can
+ * traverse the columns of L corresponding to nonzero values of x. This
+ * traversal does not need to be done in the order 0 to n-1. It can be done in
+ * any "topological" order, such that x (i) is computed before x (j) if i is a
+ * descendant of j in the elimination tree.
+ *
+ * The row-form of the LDL' factorization is shown in the MATLAB function
+ * ldlrow.m in this LDL package. Note that row k of L is found via a sparse
+ * triangular solve of L (1:k-1, 1:k-1) \ A (1:k-1, k), to use 1-based MATLAB
+ * notation. Thus, we can start with the nonzero pattern of the kth column of
+ * A (above the diagonal), follow the paths up to the root of the etree of the
+ * (k-1)-by-(k-1) leading submatrix of L, and obtain the pattern of the kth row
+ * of L. Note that we only need the leading (k-1)-by-(k-1) submatrix of L to
+ * do this. The elimination tree can be constructed as we go.
+ *
+ * The symbolic factorization does the same thing, except that it discards the
+ * pattern of L as it is computed. It simply counts the number of nonzeros in
+ * each column of L and then constructs the Lp index array when it's done. The
+ * symbolic factorization does not need to do this in topological order.
+ * Compare ldl_symbolic with the first part of ldl_numeric, and note that the
+ * while (len > 0) loop is not present in ldl_symbolic.
+ *
+ * LDL Version 1.3, Copyright (c) 2006 by Timothy A Davis,
+ * University of Florida. All Rights Reserved. Developed while on sabbatical
+ * at Stanford University and Lawrence Berkeley National Laboratory. Refer to
+ * the README file for the License. Available at
+ * http://www.cise.ufl.edu/research/sparse.
+ */
+
+#include "ldl.h"
+
+/* ========================================================================== */
+/* === ldl_symbolic ========================================================= */
+/* ========================================================================== */
+
+/* The input to this routine is a sparse matrix A, stored in column form, and
+ * an optional permutation P. The output is the elimination tree
+ * and the number of nonzeros in each column of L. Parent [i] = k if k is the
+ * parent of i in the tree. The Parent array is required by ldl_numeric.
+ * Lnz [k] gives the number of nonzeros in the kth column of L, excluding the
+ * diagonal.
+ *
+ * One workspace vector (Flag) of size n is required.
+ *
+ * If P is NULL, then it is ignored. The factorization will be LDL' = A.
+ * Pinv is not computed. In this case, neither P nor Pinv are required by
+ * ldl_numeric.
+ *
+ * If P is not NULL, then it is assumed to be a valid permutation. If
+ * row and column j of A is the kth pivot, the P [k] = j. The factorization
+ * will be LDL' = PAP', or A (p,p) in MATLAB notation. The inverse permutation
+ * Pinv is computed, where Pinv [j] = k if P [k] = j. In this case, both P
+ * and Pinv are required as inputs to ldl_numeric.
+ *
+ * The floating-point operation count of the subsequent call to ldl_numeric
+ * is not returned, but could be computed after ldl_symbolic is done. It is
+ * the sum of (Lnz [k]) * (Lnz [k] + 2) for k = 0 to n-1.
+ */
+
+void LDL_symbolic
+(
+ LDL_int n, /* A and L are n-by-n, where n >= 0 */
+ LDL_int Ap [ ], /* input of size n+1, not modified */
+ LDL_int Ai [ ], /* input of size nz=Ap[n], not modified */
+ LDL_int Lp [ ], /* output of size n+1, not defined on input */
+ LDL_int Parent [ ], /* output of size n, not defined on input */
+ LDL_int Lnz [ ], /* output of size n, not defined on input */
+ LDL_int Flag [ ], /* workspace of size n, not defn. on input or output */
+ LDL_int P [ ], /* optional input of size n */
+ LDL_int Pinv [ ] /* optional output of size n (used if P is not NULL) */
+)
+{
+ LDL_int i, k, p, kk, p2 ;
+ if (P)
+ {
+ /* If P is present then compute Pinv, the inverse of P */
+ for (k = 0 ; k < n ; k++)
+ {
+ Pinv [P [k]] = k ;
+ }
+ }
+ for (k = 0 ; k < n ; k++)
+ {
+ /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
+ Parent [k] = -1 ; /* parent of k is not yet known */
+ Flag [k] = k ; /* mark node k as visited */
+ Lnz [k] = 0 ; /* count of nonzeros in column k of L */
+ kk = (P) ? (P [k]) : (k) ; /* kth original, or permuted, column */
+ p2 = Ap [kk+1] ;
+ for (p = Ap [kk] ; p < p2 ; p++)
+ {
+ /* A (i,k) is nonzero (original or permuted A) */
+ i = (Pinv) ? (Pinv [Ai [p]]) : (Ai [p]) ;
+ if (i < k)
+ {
+ /* follow path from i to root of etree, stop at flagged node */
+ for ( ; Flag [i] != k ; i = Parent [i])
+ {
+ /* find parent of i if not yet determined */
+ if (Parent [i] == -1) Parent [i] = k ;
+ Lnz [i]++ ; /* L (k,i) is nonzero */
+ Flag [i] = k ; /* mark i as visited */
+ }
+ }
+ }
+ }
+ /* construct Lp index array from Lnz column counts */
+ Lp [0] = 0 ;
+ for (k = 0 ; k < n ; k++)
+ {
+ Lp [k+1] = Lp [k] + Lnz [k] ;
+ }
+}
+
+
+/* ========================================================================== */
+/* === ldl_numeric ========================================================== */
+/* ========================================================================== */
+
+/* Given a sparse matrix A (the arguments n, Ap, Ai, and Ax) and its symbolic
+ * analysis (Lp and Parent, and optionally P and Pinv), compute the numeric LDL'
+ * factorization of A or PAP'. The outputs of this routine are arguments Li,
+ * Lx, and D. It also requires three size-n workspaces (Y, Pattern, and Flag).
+ */
+
+LDL_int LDL_numeric /* returns n if successful, k if D (k,k) is zero */
+(
+ LDL_int n, /* A and L are n-by-n, where n >= 0 */
+ LDL_int Ap [ ], /* input of size n+1, not modified */
+ LDL_int Ai [ ], /* input of size nz=Ap[n], not modified */
+ double Ax [ ], /* input of size nz=Ap[n], not modified */
+ LDL_int Lp [ ], /* input of size n+1, not modified */
+ LDL_int Parent [ ], /* input of size n, not modified */
+ LDL_int Lnz [ ], /* output of size n, not defn. on input */
+ LDL_int Li [ ], /* output of size lnz=Lp[n], not defined on input */
+ double Lx [ ], /* output of size lnz=Lp[n], not defined on input */
+ double D [ ], /* output of size n, not defined on input */
+ double Y [ ], /* workspace of size n, not defn. on input or output */
+ LDL_int Pattern [ ],/* workspace of size n, not defn. on input or output */
+ LDL_int Flag [ ], /* workspace of size n, not defn. on input or output */
+ LDL_int P [ ], /* optional input of size n */
+ LDL_int Pinv [ ] /* optional input of size n */
+)
+{
+ double yi, l_ki ;
+ LDL_int i, k, p, kk, p2, len, top ;
+ for (k = 0 ; k < n ; k++)
+ {
+ /* compute nonzero Pattern of kth row of L, in topological order */
+ Y [k] = 0.0 ; /* Y(0:k) is now all zero */
+ top = n ; /* stack for pattern is empty */
+ Flag [k] = k ; /* mark node k as visited */
+ Lnz [k] = 0 ; /* count of nonzeros in column k of L */
+ kk = (P) ? (P [k]) : (k) ; /* kth original, or permuted, column */
+ p2 = Ap [kk+1] ;
+ for (p = Ap [kk] ; p < p2 ; p++)
+ {
+ i = (Pinv) ? (Pinv [Ai [p]]) : (Ai [p]) ; /* get A(i,k) */
+ if (i <= k)
+ {
+ Y [i] += Ax [p] ; /* scatter A(i,k) into Y (sum duplicates) */
+ for (len = 0 ; Flag [i] != k ; i = Parent [i])
+ {
+ Pattern [len++] = i ; /* L(k,i) is nonzero */
+ Flag [i] = k ; /* mark i as visited */
+ }
+ while (len > 0) Pattern [--top] = Pattern [--len] ;
+ }
+ }
+ /* compute numerical values kth row of L (a sparse triangular solve) */
+ D [k] = Y [k] ; /* get D(k,k) and clear Y(k) */
+ Y [k] = 0.0 ;
+ for ( ; top < n ; top++)
+ {
+ i = Pattern [top] ; /* Pattern [top:n-1] is pattern of L(:,k) */
+ yi = Y [i] ; /* get and clear Y(i) */
+ Y [i] = 0.0 ;
+ p2 = Lp [i] + Lnz [i] ;
+ for (p = Lp [i] ; p < p2 ; p++)
+ {
+ Y [Li [p]] -= Lx [p] * yi ;
+ }
+ l_ki = yi / D [i] ; /* the nonzero entry L(k,i) */
+ D [k] -= l_ki * yi ;
+ Li [p] = k ; /* store L(k,i) in column form of L */
+ Lx [p] = l_ki ;
+ Lnz [i]++ ; /* increment count of nonzeros in col i */
+ }
+ if (D [k] == 0.0) return (k) ; /* failure, D(k,k) is zero */
+ }
+ return (n) ; /* success, diagonal of D is all nonzero */
+}
+
+
+/* ========================================================================== */
+/* === ldl_lsolve: solve Lx=b ============================================== */
+/* ========================================================================== */
+
+void LDL_lsolve
+(
+ LDL_int n, /* L is n-by-n, where n >= 0 */
+ double X [ ], /* size n. right-hand-side on input, soln. on output */
+ LDL_int Lp [ ], /* input of size n+1, not modified */
+ LDL_int Li [ ], /* input of size lnz=Lp[n], not modified */
+ double Lx [ ] /* input of size lnz=Lp[n], not modified */
+)
+{
+ LDL_int j, p, p2 ;
+ for (j = 0 ; j < n ; j++)
+ {
+ p2 = Lp [j+1] ;
+ for (p = Lp [j] ; p < p2 ; p++)
+ {
+ X [Li [p]] -= Lx [p] * X [j] ;
+ }
+ }
+}
+
+
+/* ========================================================================== */
+/* === ldl_dsolve: solve Dx=b ============================================== */
+/* ========================================================================== */
+
+void LDL_dsolve
+(
+ LDL_int n, /* D is n-by-n, where n >= 0 */
+ double X [ ], /* size n. right-hand-side on input, soln. on output */
+ double D [ ] /* input of size n, not modified */
+)
+{
+ LDL_int j ;
+ for (j = 0 ; j < n ; j++)
+ {
+ X [j] /= D [j] ;
+ }
+}
+
+
+/* ========================================================================== */
+/* === ldl_ltsolve: solve L'x=b ============================================ */
+/* ========================================================================== */
+
+void LDL_ltsolve
+(
+ LDL_int n, /* L is n-by-n, where n >= 0 */
+ double X [ ], /* size n. right-hand-side on input, soln. on output */
+ LDL_int Lp [ ], /* input of size n+1, not modified */
+ LDL_int Li [ ], /* input of size lnz=Lp[n], not modified */
+ double Lx [ ] /* input of size lnz=Lp[n], not modified */
+)
+{
+ int j, p, p2 ;
+ for (j = n-1 ; j >= 0 ; j--)
+ {
+ p2 = Lp [j+1] ;
+ for (p = Lp [j] ; p < p2 ; p++)
+ {
+ X [j] -= Lx [p] * X [Li [p]] ;
+ }
+ }
+}
+
+
+/* ========================================================================== */
+/* === ldl_perm: permute a vector, x=Pb ===================================== */
+/* ========================================================================== */
+
+void LDL_perm
+(
+ LDL_int n, /* size of X, B, and P */
+ double X [ ], /* output of size n. */
+ double B [ ], /* input of size n. */
+ LDL_int P [ ] /* input permutation array of size n. */
+)
+{
+ LDL_int j ;
+ for (j = 0 ; j < n ; j++)
+ {
+ X [j] = B [P [j]] ;
+ }
+}
+
+
+/* ========================================================================== */
+/* === ldl_permt: permute a vector, x=P'b =================================== */
+/* ========================================================================== */
+
+void LDL_permt
+(
+ LDL_int n, /* size of X, B, and P */
+ double X [ ], /* output of size n. */
+ double B [ ], /* input of size n. */
+ LDL_int P [ ] /* input permutation array of size n. */
+)
+{
+ LDL_int j ;
+ for (j = 0 ; j < n ; j++)
+ {
+ X [P [j]] = B [j] ;
+ }
+}
+
+
+/* ========================================================================== */
+/* === ldl_valid_perm: check if a permutation vector is valid =============== */
+/* ========================================================================== */
+
+LDL_int LDL_valid_perm /* returns 1 if valid, 0 otherwise */
+(
+ LDL_int n,
+ LDL_int P [ ], /* input of size n, a permutation of 0:n-1 */
+ LDL_int Flag [ ] /* workspace of size n */
+)
+{
+ LDL_int j, k ;
+ if (n < 0 || !Flag)
+ {
+ return (0) ; /* n must be >= 0, and Flag must be present */
+ }
+ if (!P)
+ {
+ return (1) ; /* If NULL, P is assumed to be the identity perm. */
+ }
+ for (j = 0 ; j < n ; j++)
+ {
+ Flag [j] = 0 ; /* clear the Flag array */
+ }
+ for (k = 0 ; k < n ; k++)
+ {
+ j = P [k] ;
+ if (j < 0 || j >= n || Flag [j] != 0)
+ {
+ return (0) ; /* P is not valid */
+ }
+ Flag [j] = 1 ;
+ }
+ return (1) ; /* P is valid */
+}
+
+
+/* ========================================================================== */
+/* === ldl_valid_matrix: check if a sparse matrix is valid ================== */
+/* ========================================================================== */
+
+/* This routine checks to see if a sparse matrix A is valid for input to
+ * ldl_symbolic and ldl_numeric. It returns 1 if the matrix is valid, 0
+ * otherwise. A is in sparse column form. The numerical values in column j
+ * are stored in Ax [Ap [j] ... Ap [j+1]-1], with row indices in
+ * Ai [Ap [j] ... Ap [j+1]-1]. The Ax array is not checked.
+ */
+
+LDL_int LDL_valid_matrix
+(
+ LDL_int n,
+ LDL_int Ap [ ],
+ LDL_int Ai [ ]
+)
+{
+ LDL_int j, p ;
+ if (n < 0 || !Ap || !Ai || Ap [0] != 0)
+ {
+ return (0) ; /* n must be >= 0, and Ap and Ai must be present */
+ }
+ for (j = 0 ; j < n ; j++)
+ {
+ if (Ap [j] > Ap [j+1])
+ {
+ return (0) ; /* Ap must be monotonically nondecreasing */
+ }
+ }
+ for (p = 0 ; p < Ap [n] ; p++)
+ {
+ if (Ai [p] < 0 || Ai [p] >= n)
+ {
+ return (0) ; /* row indices must be in the range 0 to n-1 */
+ }
+ }
+ return (1) ; /* matrix is valid */
+}
diff --git a/extern/libmv/third_party/msinttypes/README.libmv b/extern/libmv/third_party/msinttypes/README.libmv
new file mode 100644
index 00000000000..423f599b4ad
--- /dev/null
+++ b/extern/libmv/third_party/msinttypes/README.libmv
@@ -0,0 +1,5 @@
+Project: msinttypes
+URL: http://code.google.com/p/msinttypes/
+License: New BSD License
+Upstream version: r24
+Local modifications: None.
diff --git a/extern/libmv/third_party/msinttypes/inttypes.h b/extern/libmv/third_party/msinttypes/inttypes.h
new file mode 100644
index 00000000000..0e8af69cb07
--- /dev/null
+++ b/extern/libmv/third_party/msinttypes/inttypes.h
@@ -0,0 +1,305 @@
+// ISO C9x compliant inttypes.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. The name of the author may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_INTTYPES_H_ // [
+#define _MSC_INTTYPES_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include <stdint.h>
+
+// 7.8 Format conversion of integer types
+
+typedef struct {
+ intmax_t quot;
+ intmax_t rem;
+} imaxdiv_t;
+
+// 7.8.1 Macros for format specifiers
+
+#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
+
+// The fprintf macros for signed integers are:
+#define PRId8 "d"
+#define PRIi8 "i"
+#define PRIdLEAST8 "d"
+#define PRIiLEAST8 "i"
+#define PRIdFAST8 "d"
+#define PRIiFAST8 "i"
+
+#define PRId16 "hd"
+#define PRIi16 "hi"
+#define PRIdLEAST16 "hd"
+#define PRIiLEAST16 "hi"
+#define PRIdFAST16 "hd"
+#define PRIiFAST16 "hi"
+
+#define PRId32 "I32d"
+#define PRIi32 "I32i"
+#define PRIdLEAST32 "I32d"
+#define PRIiLEAST32 "I32i"
+#define PRIdFAST32 "I32d"
+#define PRIiFAST32 "I32i"
+
+#define PRId64 "I64d"
+#define PRIi64 "I64i"
+#define PRIdLEAST64 "I64d"
+#define PRIiLEAST64 "I64i"
+#define PRIdFAST64 "I64d"
+#define PRIiFAST64 "I64i"
+
+#define PRIdMAX "I64d"
+#define PRIiMAX "I64i"
+
+#define PRIdPTR "Id"
+#define PRIiPTR "Ii"
+
+// The fprintf macros for unsigned integers are:
+#define PRIo8 "o"
+#define PRIu8 "u"
+#define PRIx8 "x"
+#define PRIX8 "X"
+#define PRIoLEAST8 "o"
+#define PRIuLEAST8 "u"
+#define PRIxLEAST8 "x"
+#define PRIXLEAST8 "X"
+#define PRIoFAST8 "o"
+#define PRIuFAST8 "u"
+#define PRIxFAST8 "x"
+#define PRIXFAST8 "X"
+
+#define PRIo16 "ho"
+#define PRIu16 "hu"
+#define PRIx16 "hx"
+#define PRIX16 "hX"
+#define PRIoLEAST16 "ho"
+#define PRIuLEAST16 "hu"
+#define PRIxLEAST16 "hx"
+#define PRIXLEAST16 "hX"
+#define PRIoFAST16 "ho"
+#define PRIuFAST16 "hu"
+#define PRIxFAST16 "hx"
+#define PRIXFAST16 "hX"
+
+#define PRIo32 "I32o"
+#define PRIu32 "I32u"
+#define PRIx32 "I32x"
+#define PRIX32 "I32X"
+#define PRIoLEAST32 "I32o"
+#define PRIuLEAST32 "I32u"
+#define PRIxLEAST32 "I32x"
+#define PRIXLEAST32 "I32X"
+#define PRIoFAST32 "I32o"
+#define PRIuFAST32 "I32u"
+#define PRIxFAST32 "I32x"
+#define PRIXFAST32 "I32X"
+
+#define PRIo64 "I64o"
+#define PRIu64 "I64u"
+#define PRIx64 "I64x"
+#define PRIX64 "I64X"
+#define PRIoLEAST64 "I64o"
+#define PRIuLEAST64 "I64u"
+#define PRIxLEAST64 "I64x"
+#define PRIXLEAST64 "I64X"
+#define PRIoFAST64 "I64o"
+#define PRIuFAST64 "I64u"
+#define PRIxFAST64 "I64x"
+#define PRIXFAST64 "I64X"
+
+#define PRIoMAX "I64o"
+#define PRIuMAX "I64u"
+#define PRIxMAX "I64x"
+#define PRIXMAX "I64X"
+
+#define PRIoPTR "Io"
+#define PRIuPTR "Iu"
+#define PRIxPTR "Ix"
+#define PRIXPTR "IX"
+
+// The fscanf macros for signed integers are:
+#define SCNd8 "d"
+#define SCNi8 "i"
+#define SCNdLEAST8 "d"
+#define SCNiLEAST8 "i"
+#define SCNdFAST8 "d"
+#define SCNiFAST8 "i"
+
+#define SCNd16 "hd"
+#define SCNi16 "hi"
+#define SCNdLEAST16 "hd"
+#define SCNiLEAST16 "hi"
+#define SCNdFAST16 "hd"
+#define SCNiFAST16 "hi"
+
+#define SCNd32 "ld"
+#define SCNi32 "li"
+#define SCNdLEAST32 "ld"
+#define SCNiLEAST32 "li"
+#define SCNdFAST32 "ld"
+#define SCNiFAST32 "li"
+
+#define SCNd64 "I64d"
+#define SCNi64 "I64i"
+#define SCNdLEAST64 "I64d"
+#define SCNiLEAST64 "I64i"
+#define SCNdFAST64 "I64d"
+#define SCNiFAST64 "I64i"
+
+#define SCNdMAX "I64d"
+#define SCNiMAX "I64i"
+
+#ifdef _WIN64 // [
+# define SCNdPTR "I64d"
+# define SCNiPTR "I64i"
+#else // _WIN64 ][
+# define SCNdPTR "ld"
+# define SCNiPTR "li"
+#endif // _WIN64 ]
+
+// The fscanf macros for unsigned integers are:
+#define SCNo8 "o"
+#define SCNu8 "u"
+#define SCNx8 "x"
+#define SCNX8 "X"
+#define SCNoLEAST8 "o"
+#define SCNuLEAST8 "u"
+#define SCNxLEAST8 "x"
+#define SCNXLEAST8 "X"
+#define SCNoFAST8 "o"
+#define SCNuFAST8 "u"
+#define SCNxFAST8 "x"
+#define SCNXFAST8 "X"
+
+#define SCNo16 "ho"
+#define SCNu16 "hu"
+#define SCNx16 "hx"
+#define SCNX16 "hX"
+#define SCNoLEAST16 "ho"
+#define SCNuLEAST16 "hu"
+#define SCNxLEAST16 "hx"
+#define SCNXLEAST16 "hX"
+#define SCNoFAST16 "ho"
+#define SCNuFAST16 "hu"
+#define SCNxFAST16 "hx"
+#define SCNXFAST16 "hX"
+
+#define SCNo32 "lo"
+#define SCNu32 "lu"
+#define SCNx32 "lx"
+#define SCNX32 "lX"
+#define SCNoLEAST32 "lo"
+#define SCNuLEAST32 "lu"
+#define SCNxLEAST32 "lx"
+#define SCNXLEAST32 "lX"
+#define SCNoFAST32 "lo"
+#define SCNuFAST32 "lu"
+#define SCNxFAST32 "lx"
+#define SCNXFAST32 "lX"
+
+#define SCNo64 "I64o"
+#define SCNu64 "I64u"
+#define SCNx64 "I64x"
+#define SCNX64 "I64X"
+#define SCNoLEAST64 "I64o"
+#define SCNuLEAST64 "I64u"
+#define SCNxLEAST64 "I64x"
+#define SCNXLEAST64 "I64X"
+#define SCNoFAST64 "I64o"
+#define SCNuFAST64 "I64u"
+#define SCNxFAST64 "I64x"
+#define SCNXFAST64 "I64X"
+
+#define SCNoMAX "I64o"
+#define SCNuMAX "I64u"
+#define SCNxMAX "I64x"
+#define SCNXMAX "I64X"
+
+#ifdef _WIN64 // [
+# define SCNoPTR "I64o"
+# define SCNuPTR "I64u"
+# define SCNxPTR "I64x"
+# define SCNXPTR "I64X"
+#else // _WIN64 ][
+# define SCNoPTR "lo"
+# define SCNuPTR "lu"
+# define SCNxPTR "lx"
+# define SCNXPTR "lX"
+#endif // _WIN64 ]
+
+#endif // __STDC_FORMAT_MACROS ]
+
+// 7.8.2 Functions for greatest-width integer types
+
+// 7.8.2.1 The imaxabs function
+#define imaxabs _abs64
+
+// 7.8.2.2 The imaxdiv function
+
+// This is modified version of div() function from Microsoft's div.c found
+// in %MSVC.NET%\crt\src\div.c
+#ifdef STATIC_IMAXDIV // [
+static
+#else // STATIC_IMAXDIV ][
+_inline
+#endif // STATIC_IMAXDIV ]
+imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
+{
+ imaxdiv_t result;
+
+ result.quot = numer / denom;
+ result.rem = numer % denom;
+
+ if (numer < 0 && result.rem > 0) {
+ // did division wrong; must fix up
+ ++result.quot;
+ result.rem -= denom;
+ }
+
+ return result;
+}
+
+// 7.8.2.3 The strtoimax and strtoumax functions
+#define strtoimax _strtoi64
+#define strtoumax _strtoui64
+
+// 7.8.2.4 The wcstoimax and wcstoumax functions
+#define wcstoimax _wcstoi64
+#define wcstoumax _wcstoui64
+
+
+#endif // _MSC_INTTYPES_H_ ]
diff --git a/extern/libmv/third_party/msinttypes/stdint.h b/extern/libmv/third_party/msinttypes/stdint.h
new file mode 100644
index 00000000000..e236bb00015
--- /dev/null
+++ b/extern/libmv/third_party/msinttypes/stdint.h
@@ -0,0 +1,247 @@
+// ISO C9x compliant stdint.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006-2008 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. The name of the author may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_STDINT_H_ // [
+#define _MSC_STDINT_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include <limits.h>
+
+// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
+// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
+// or compiler give many errors like this:
+// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
+#ifdef __cplusplus
+extern "C" {
+#endif
+# include <wchar.h>
+#ifdef __cplusplus
+}
+#endif
+
+// Define _W64 macros to mark types changing their size, like intptr_t.
+#ifndef _W64
+# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
+# define _W64 __w64
+# else
+# define _W64
+# endif
+#endif
+
+
+// 7.18.1 Integer types
+
+// 7.18.1.1 Exact-width integer types
+
+// Visual Studio 6 and Embedded Visual C++ 4 doesn't
+// realize that, e.g. char has the same size as __int8
+// so we give up on __intX for them.
+#if (_MSC_VER < 1300)
+ typedef char int8_t;
+ typedef short int16_t;
+ typedef int int32_t;
+ typedef unsigned char uint8_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned int uint32_t;
+#else
+ typedef __int8 int8_t;
+ typedef __int16 int16_t;
+ typedef __int32 int32_t;
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+#endif
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+
+
+// 7.18.1.2 Minimum-width integer types
+typedef int8_t int_least8_t;
+typedef int16_t int_least16_t;
+typedef int32_t int_least32_t;
+typedef int64_t int_least64_t;
+typedef uint8_t uint_least8_t;
+typedef uint16_t uint_least16_t;
+typedef uint32_t uint_least32_t;
+typedef uint64_t uint_least64_t;
+
+// 7.18.1.3 Fastest minimum-width integer types
+typedef int8_t int_fast8_t;
+typedef int16_t int_fast16_t;
+typedef int32_t int_fast32_t;
+typedef int64_t int_fast64_t;
+typedef uint8_t uint_fast8_t;
+typedef uint16_t uint_fast16_t;
+typedef uint32_t uint_fast32_t;
+typedef uint64_t uint_fast64_t;
+
+// 7.18.1.4 Integer types capable of holding object pointers
+#ifdef _WIN64 // [
+ typedef __int64 intptr_t;
+ typedef unsigned __int64 uintptr_t;
+#else // _WIN64 ][
+ typedef _W64 int intptr_t;
+ typedef _W64 unsigned int uintptr_t;
+#endif // _WIN64 ]
+
+// 7.18.1.5 Greatest-width integer types
+typedef int64_t intmax_t;
+typedef uint64_t uintmax_t;
+
+
+// 7.18.2 Limits of specified-width integer types
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
+
+// 7.18.2.1 Limits of exact-width integer types
+#define INT8_MIN ((int8_t)_I8_MIN)
+#define INT8_MAX _I8_MAX
+#define INT16_MIN ((int16_t)_I16_MIN)
+#define INT16_MAX _I16_MAX
+#define INT32_MIN ((int32_t)_I32_MIN)
+#define INT32_MAX _I32_MAX
+#define INT64_MIN ((int64_t)_I64_MIN)
+#define INT64_MAX _I64_MAX
+#define UINT8_MAX _UI8_MAX
+#define UINT16_MAX _UI16_MAX
+#define UINT32_MAX _UI32_MAX
+#define UINT64_MAX _UI64_MAX
+
+// 7.18.2.2 Limits of minimum-width integer types
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+// 7.18.2.3 Limits of fastest minimum-width integer types
+#define INT_FAST8_MIN INT8_MIN
+#define INT_FAST8_MAX INT8_MAX
+#define INT_FAST16_MIN INT16_MIN
+#define INT_FAST16_MAX INT16_MAX
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST8_MAX UINT8_MAX
+#define UINT_FAST16_MAX UINT16_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+// 7.18.2.4 Limits of integer types capable of holding object pointers
+#ifdef _WIN64 // [
+# define INTPTR_MIN INT64_MIN
+# define INTPTR_MAX INT64_MAX
+# define UINTPTR_MAX UINT64_MAX
+#else // _WIN64 ][
+# define INTPTR_MIN INT32_MIN
+# define INTPTR_MAX INT32_MAX
+# define UINTPTR_MAX UINT32_MAX
+#endif // _WIN64 ]
+
+// 7.18.2.5 Limits of greatest-width integer types
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+// 7.18.3 Limits of other integer types
+
+#ifdef _WIN64 // [
+# define PTRDIFF_MIN _I64_MIN
+# define PTRDIFF_MAX _I64_MAX
+#else // _WIN64 ][
+# define PTRDIFF_MIN _I32_MIN
+# define PTRDIFF_MAX _I32_MAX
+#endif // _WIN64 ]
+
+#define SIG_ATOMIC_MIN INT_MIN
+#define SIG_ATOMIC_MAX INT_MAX
+
+#ifndef SIZE_MAX // [
+# ifdef _WIN64 // [
+# define SIZE_MAX _UI64_MAX
+# else // _WIN64 ][
+# define SIZE_MAX _UI32_MAX
+# endif // _WIN64 ]
+#endif // SIZE_MAX ]
+
+// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
+#ifndef WCHAR_MIN // [
+# define WCHAR_MIN 0
+#endif // WCHAR_MIN ]
+#ifndef WCHAR_MAX // [
+# define WCHAR_MAX _UI16_MAX
+#endif // WCHAR_MAX ]
+
+#define WINT_MIN 0
+#define WINT_MAX _UI16_MAX
+
+#endif // __STDC_LIMIT_MACROS ]
+
+
+// 7.18.4 Limits of other integer types
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
+
+// 7.18.4.1 Macros for minimum-width integer constants
+
+#define INT8_C(val) val##i8
+#define INT16_C(val) val##i16
+#define INT32_C(val) val##i32
+#define INT64_C(val) val##i64
+
+#define UINT8_C(val) val##ui8
+#define UINT16_C(val) val##ui16
+#define UINT32_C(val) val##ui32
+#define UINT64_C(val) val##ui64
+
+// 7.18.4.2 Macros for greatest-width integer constants
+#define INTMAX_C INT64_C
+#define UINTMAX_C UINT64_C
+
+#endif // __STDC_CONSTANT_MACROS ]
+
+
+#endif // _MSC_STDINT_H_ ]
diff --git a/extern/libmv/third_party/ssba/COPYING.TXT b/extern/libmv/third_party/ssba/COPYING.TXT
new file mode 100644
index 00000000000..fc8a5de7edf
--- /dev/null
+++ b/extern/libmv/third_party/ssba/COPYING.TXT
@@ -0,0 +1,165 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/extern/libmv/third_party/ssba/Geometry/v3d_cameramatrix.h b/extern/libmv/third_party/ssba/Geometry/v3d_cameramatrix.h
new file mode 100644
index 00000000000..448ae9714e5
--- /dev/null
+++ b/extern/libmv/third_party/ssba/Geometry/v3d_cameramatrix.h
@@ -0,0 +1,204 @@
+// -*- C++ -*-
+/*
+Copyright (c) 2008 University of North Carolina at Chapel Hill
+
+This file is part of SSBA (Simple Sparse Bundle Adjustment).
+
+SSBA is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
+
+SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License along
+with SSBA. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef V3D_CAMERA_MATRIX_H
+#define V3D_CAMERA_MATRIX_H
+
+#include "Math/v3d_linear.h"
+#include "Geometry/v3d_distortion.h"
+
+namespace V3D
+{
+
+ struct CameraMatrix
+ {
+ CameraMatrix()
+ {
+ makeIdentityMatrix(_K);
+ makeIdentityMatrix(_R);
+ makeZeroVector(_T);
+ this->updateCachedValues(true, true);
+ }
+
+ CameraMatrix(double f, double cx, double cy)
+ {
+ makeIdentityMatrix(_K);
+ _K[0][0] = f;
+ _K[1][1] = f;
+ _K[0][2] = cx;
+ _K[1][2] = cy;
+ makeIdentityMatrix(_R);
+ makeZeroVector(_T);
+ this->updateCachedValues(true, true);
+ }
+
+ CameraMatrix(Matrix3x3d const& K,
+ Matrix3x3d const& R,
+ Vector3d const& T)
+ : _K(K), _R(R), _T(T)
+ {
+ this->updateCachedValues(true, true);
+ }
+
+ void setIntrinsic(Matrix3x3d const& K) { _K = K; this->updateCachedValues(true, false); }
+ void setRotation(Matrix3x3d const& R) { _R = R; this->updateCachedValues(false, true); }
+ void setTranslation(Vector3d const& T) { _T = T; this->updateCachedValues(false, true); }
+
+ template <typename Mat>
+ void setOrientation(Mat const& RT)
+ {
+ _R[0][0] = RT[0][0]; _R[0][1] = RT[0][1]; _R[0][2] = RT[0][2];
+ _R[1][0] = RT[1][0]; _R[1][1] = RT[1][1]; _R[1][2] = RT[1][2];
+ _R[2][0] = RT[2][0]; _R[2][1] = RT[2][1]; _R[2][2] = RT[2][2];
+ _T[0] = RT[0][3]; _T[1] = RT[1][3]; _T[2] = RT[2][3];
+ this->updateCachedValues(false, true);
+ }
+
+ Matrix3x3d const& getIntrinsic() const { return _K; }
+ Matrix3x3d const& getRotation() const { return _R; }
+ Vector3d const& getTranslation() const { return _T; }
+
+ Matrix3x4d getOrientation() const
+ {
+ Matrix3x4d RT;
+ RT[0][0] = _R[0][0]; RT[0][1] = _R[0][1]; RT[0][2] = _R[0][2];
+ RT[1][0] = _R[1][0]; RT[1][1] = _R[1][1]; RT[1][2] = _R[1][2];
+ RT[2][0] = _R[2][0]; RT[2][1] = _R[2][1]; RT[2][2] = _R[2][2];
+ RT[0][3] = _T[0]; RT[1][3] = _T[1]; RT[2][3] = _T[2];
+ return RT;
+ }
+
+ Matrix3x4d getProjection() const
+ {
+ Matrix3x4d const RT = this->getOrientation();
+ return _K * RT;
+ }
+
+ double getFocalLength() const { return _K[0][0]; }
+ double getAspectRatio() const { return _K[1][1] / _K[0][0]; }
+
+ Vector2d getPrincipalPoint() const
+ {
+ Vector2d pp;
+ pp[0] = _K[0][2];
+ pp[1] = _K[1][2];
+ return pp;
+ }
+
+ Vector2d projectPoint(Vector3d const& X) const
+ {
+ Vector3d q = _K*(_R*X + _T);
+ Vector2d res;
+ res[0] = q[0]/q[2]; res[1] = q[1]/q[2];
+ return res;
+ }
+
+ template <typename Distortion>
+ Vector2d projectPoint(Distortion const& distortion, Vector3d const& X) const
+ {
+ Vector3d XX = _R*X + _T;
+ Vector2d p;
+ p[0] = XX[0] / XX[2];
+ p[1] = XX[1] / XX[2];
+ p = distortion(p);
+
+ Vector2d res;
+ res[0] = _K[0][0] * p[0] + _K[0][1] * p[1] + _K[0][2];
+ res[1] = _K[1][1] * p[1] + _K[1][2];
+ return res;
+ }
+
+ Vector3d unprojectPixel(Vector2d const &p, double depth = 1) const
+ {
+ Vector3d pp;
+ pp[0] = p[0]; pp[1] = p[1]; pp[2] = 1.0;
+ Vector3d ray = _invK * pp;
+ ray[0] *= depth/ray[2];
+ ray[1] *= depth/ray[2];
+ ray[2] = depth;
+ ray = _Rt * ray;
+ return _center + ray;
+ }
+
+ Vector3d transformPointIntoCameraSpace(Vector3d const& p) const
+ {
+ return _R*p + _T;
+ }
+
+ Vector3d transformPointFromCameraSpace(Vector3d const& p) const
+ {
+ return _Rt*(p-_T);
+ }
+
+ Vector3d transformDirectionFromCameraSpace(Vector3d const& dir) const
+ {
+ return _Rt*dir;
+ }
+
+ Vector3d const& cameraCenter() const
+ {
+ return _center;
+ }
+
+ Vector3d opticalAxis() const
+ {
+ return this->transformDirectionFromCameraSpace(makeVector3(0.0, 0.0, 1.0));
+ }
+
+ Vector3d upVector() const
+ {
+ return this->transformDirectionFromCameraSpace(makeVector3(0.0, 1.0, 0.0));
+ }
+
+ Vector3d rightVector() const
+ {
+ return this->transformDirectionFromCameraSpace(makeVector3(1.0, 0.0, 0.0));
+ }
+
+ Vector3d getRay(Vector2d const& p) const
+ {
+ Vector3d pp = makeVector3(p[0], p[1], 1.0);
+ Vector3d ray = _invK * pp;
+ ray = _Rt * ray;
+ normalizeVector(ray);
+ return ray;
+ }
+
+ protected:
+ void updateCachedValues(bool intrinsic, bool orientation)
+ {
+ if (intrinsic) _invK = invertedMatrix(_K);
+
+ if (orientation)
+ {
+ makeTransposedMatrix(_R, _Rt);
+ _center = _Rt * (-1.0 * _T);
+ }
+ }
+
+ Matrix3x3d _K, _R;
+ Vector3d _T;
+ Matrix3x3d _invK, _Rt;
+ Vector3d _center;
+ }; // end struct CameraMatrix
+
+} // end namespace V3D
+
+#endif
diff --git a/extern/libmv/third_party/ssba/Geometry/v3d_distortion.h b/extern/libmv/third_party/ssba/Geometry/v3d_distortion.h
new file mode 100644
index 00000000000..d0816558314
--- /dev/null
+++ b/extern/libmv/third_party/ssba/Geometry/v3d_distortion.h
@@ -0,0 +1,97 @@
+// -*- C++ -*-
+/*
+Copyright (c) 2008 University of North Carolina at Chapel Hill
+
+This file is part of SSBA (Simple Sparse Bundle Adjustment).
+
+SSBA is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
+
+SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License along
+with SSBA. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef V3D_DISTORTION_H
+#define V3D_DISTORTION_H
+
+#include "Math/v3d_linear.h"
+#include "Math/v3d_linear_utils.h"
+
+namespace V3D
+{
+
+ struct StdDistortionFunction
+ {
+ double k1, k2, p1, p2;
+
+ StdDistortionFunction()
+ : k1(0), k2(0), p1(0), p2(0)
+ { }
+
+ Vector2d operator()(Vector2d const& xu) const
+ {
+ double const r2 = xu[0]*xu[0] + xu[1]*xu[1];
+ double const r4 = r2*r2;
+ double const kr = 1 + k1*r2 + k2*r4;
+
+ Vector2d xd;
+ xd[0] = kr * xu[0] + 2*p1*xu[0]*xu[1] + p2*(r2 + 2*xu[0]*xu[0]);
+ xd[1] = kr * xu[1] + 2*p2*xu[0]*xu[1] + p1*(r2 + 2*xu[1]*xu[1]);
+ return xd;
+ }
+
+ Matrix2x2d derivativeWrtRadialParameters(Vector2d const& xu) const
+ {
+ double const r2 = xu[0]*xu[0] + xu[1]*xu[1];
+ double const r4 = r2*r2;
+ //double const kr = 1 + k1*r2 + k2*r4;
+
+ Matrix2x2d deriv;
+
+ deriv[0][0] = xu[0] * r2; // d xd/d k1
+ deriv[0][1] = xu[0] * r4; // d xd/d k2
+ deriv[1][0] = xu[1] * r2; // d yd/d k1
+ deriv[1][1] = xu[1] * r4; // d yd/d k2
+ return deriv;
+ }
+
+ Matrix2x2d derivativeWrtTangentialParameters(Vector2d const& xu) const
+ {
+ double const r2 = xu[0]*xu[0] + xu[1]*xu[1];
+ //double const r4 = r2*r2;
+ //double const kr = 1 + k1*r2 + k2*r4;
+
+ Matrix2x2d deriv;
+ deriv[0][0] = 2*xu[0]*xu[1]; // d xd/d p1
+ deriv[0][1] = r2 + 2*xu[0]*xu[0]; // d xd/d p2
+ deriv[1][0] = r2 + 2*xu[1]*xu[1]; // d yd/d p1
+ deriv[1][1] = deriv[0][0]; // d yd/d p2
+ return deriv;
+ }
+
+ Matrix2x2d derivativeWrtUndistortedPoint(Vector2d const& xu) const
+ {
+ double const r2 = xu[0]*xu[0] + xu[1]*xu[1];
+ double const r4 = r2*r2;
+ double const kr = 1 + k1*r2 + k2*r4;
+ double const dkr = 2*k1 + 4*k2*r2;
+
+ Matrix2x2d deriv;
+ deriv[0][0] = kr + xu[0] * xu[0] * dkr + 2*p1*xu[1] + 6*p2*xu[0]; // d xd/d xu
+ deriv[0][1] = xu[0] * xu[1] * dkr + 2*p1*xu[0] + 2*p2*xu[1]; // d xd/d yu
+ deriv[1][0] = deriv[0][1]; // d yd/d xu
+ deriv[1][1] = kr + xu[1] * xu[1] * dkr + 6*p1*xu[1] + 2*p2*xu[0]; // d yd/d yu
+ return deriv;
+ }
+ }; // end struct StdDistortionFunction
+
+} // end namespace V3D
+
+#endif
diff --git a/extern/libmv/third_party/ssba/Geometry/v3d_metricbundle.cpp b/extern/libmv/third_party/ssba/Geometry/v3d_metricbundle.cpp
new file mode 100644
index 00000000000..1c1f0cb2627
--- /dev/null
+++ b/extern/libmv/third_party/ssba/Geometry/v3d_metricbundle.cpp
@@ -0,0 +1,365 @@
+/*
+Copyright (c) 2008 University of North Carolina at Chapel Hill
+
+This file is part of SSBA (Simple Sparse Bundle Adjustment).
+
+SSBA is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
+
+SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License along
+with SSBA. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "Geometry/v3d_metricbundle.h"
+
+#if defined(V3DLIB_ENABLE_SUITESPARSE)
+
+namespace
+{
+
+ typedef V3D::InlineMatrix<double, 2, 4> Matrix2x4d;
+ typedef V3D::InlineMatrix<double, 4, 2> Matrix4x2d;
+ typedef V3D::InlineMatrix<double, 2, 6> Matrix2x6d;
+
+} // end namespace <>
+
+namespace V3D
+{
+
+ void
+ MetricBundleOptimizerBase::updateParametersA(VectorArray<double> const& deltaAi)
+ {
+ Vector3d T, omega;
+ Matrix3x3d R0, dR;
+
+ for (int i = _nNonvaryingA; i < _nParametersA; ++i)
+ {
+ T = _cams[i].getTranslation();
+ T[0] += deltaAi[i][0];
+ T[1] += deltaAi[i][1];
+ T[2] += deltaAi[i][2];
+ _cams[i].setTranslation(T);
+
+ // Create incremental rotation using Rodriguez formula.
+ R0 = _cams[i].getRotation();
+ omega[0] = deltaAi[i][3];
+ omega[1] = deltaAi[i][4];
+ omega[2] = deltaAi[i][5];
+ createRotationMatrixRodriguez(omega, dR);
+ _cams[i].setRotation(dR * R0);
+ } // end for (i)
+ } // end MetricBundleOptimizerBase::updateParametersA()
+
+ void
+ MetricBundleOptimizerBase::updateParametersB(VectorArray<double> const& deltaBj)
+ {
+ for (int j = _nNonvaryingB; j < _nParametersB; ++j)
+ {
+ _Xs[j][0] += deltaBj[j][0];
+ _Xs[j][1] += deltaBj[j][1];
+ _Xs[j][2] += deltaBj[j][2];
+ }
+ } // end MetricBundleOptimizerBase::updateParametersB()
+
+ void
+ MetricBundleOptimizerBase::poseDerivatives(int i, int j, Vector3d& XX,
+ Matrix3x6d& d_dRT, Matrix3x3d& d_dX) const
+ {
+ XX = _cams[i].transformPointIntoCameraSpace(_Xs[j]);
+
+ // See Frank Dellaerts bundle adjustment tutorial.
+ // d(dR * R0 * X + t)/d omega = -[R0 * X]_x
+ Matrix3x3d J;
+ makeCrossProductMatrix(XX - _cams[i].getTranslation(), J);
+ scaleMatrixIP(-1.0, J);
+
+ // Now the transformation from world coords into camera space is xx = Rx + T
+ // Hence the derivative of x wrt. T is just the identity matrix.
+ makeIdentityMatrix(d_dRT);
+ copyMatrixSlice(J, 0, 0, 3, 3, d_dRT, 0, 3);
+
+ // The derivative of Rx+T wrt x is just R.
+ copyMatrix(_cams[i].getRotation(), d_dX);
+ } // end MetricBundleOptimizerBase::poseDerivatives()
+
+
+//----------------------------------------------------------------------
+
+ void
+ StdMetricBundleOptimizer::fillJacobians(Matrix<double>& Ak,
+ Matrix<double>& Bk,
+ Matrix<double>& Ck,
+ int i, int j, int k)
+ {
+ Vector3d XX;
+ Matrix3x6d d_dRT;
+ Matrix3x3d d_dX;
+ this->poseDerivatives(i, j, XX, d_dRT, d_dX);
+
+ double const f = _cams[i].getFocalLength();
+ double const ar = _cams[i].getAspectRatio();
+
+ Matrix2x3d dp_dX;
+ double const bx = f / (XX[2] * XX[2]);
+ double const by = ar * bx;
+ dp_dX[0][0] = bx * XX[2]; dp_dX[0][1] = 0; dp_dX[0][2] = -bx * XX[0];
+ dp_dX[1][0] = 0; dp_dX[1][1] = by * XX[2]; dp_dX[1][2] = -by * XX[1];
+
+ multiply_A_B(dp_dX, d_dRT, Ak);
+ multiply_A_B(dp_dX, d_dX, Bk);
+ } // end StdMetricBundleOptimizer::fillJacobians()
+
+ //----------------------------------------------------------------------
+
+ void
+ CommonInternalsMetricBundleOptimizer::fillJacobians(Matrix<double>& Ak,
+ Matrix<double>& Bk,
+ Matrix<double>& Ck,
+ int i, int j, int k)
+ {
+ double const focalLength = _K[0][0];
+
+ Vector3d XX;
+ Matrix3x6d dXX_dRT;
+ Matrix3x3d dXX_dX;
+ this->poseDerivatives(i, j, XX, dXX_dRT, dXX_dX);
+
+ Vector2d xu; // undistorted image point
+ xu[0] = XX[0] / XX[2];
+ xu[1] = XX[1] / XX[2];
+
+ Vector2d const xd = _distortion(xu); // distorted image point
+
+ Matrix2x2d dp_dxd;
+ dp_dxd[0][0] = focalLength; dp_dxd[0][1] = 0;
+ dp_dxd[1][0] = 0; dp_dxd[1][1] = _cachedAspectRatio * focalLength;
+
+ {
+ // First, lets do the derivative wrt the structure and motion parameters.
+ Matrix2x3d dxu_dXX;
+ dxu_dXX[0][0] = 1.0f / XX[2]; dxu_dXX[0][1] = 0; dxu_dXX[0][2] = -XX[0]/(XX[2]*XX[2]);
+ dxu_dXX[1][0] = 0; dxu_dXX[1][1] = 1.0f / XX[2]; dxu_dXX[1][2] = -XX[1]/(XX[2]*XX[2]);
+
+ Matrix2x2d dxd_dxu = _distortion.derivativeWrtUndistortedPoint(xu);
+
+ Matrix2x2d dp_dxu = dp_dxd * dxd_dxu;
+ Matrix2x3d dp_dXX = dp_dxu * dxu_dXX;
+
+ multiply_A_B(dp_dXX, dXX_dRT, Ak);
+ multiply_A_B(dp_dXX, dXX_dX, Bk);
+ } // end scope
+
+ switch (_mode)
+ {
+ case FULL_BUNDLE_RADIAL_TANGENTIAL:
+ {
+ Matrix2x2d dxd_dp1p2 = _distortion.derivativeWrtTangentialParameters(xu);
+ Matrix2x2d d_dp1p2 = dp_dxd * dxd_dp1p2;
+ copyMatrixSlice(d_dp1p2, 0, 0, 2, 2, Ck, 0, 5);
+ // No break here!
+ }
+ case FULL_BUNDLE_RADIAL:
+ {
+ Matrix2x2d dxd_dk1k2 = _distortion.derivativeWrtRadialParameters(xu);
+ Matrix2x2d d_dk1k2 = dp_dxd * dxd_dk1k2;
+ copyMatrixSlice(d_dk1k2, 0, 0, 2, 2, Ck, 0, 3);
+ // No break here!
+ }
+ case FULL_BUNDLE_FOCAL_LENGTH_PP:
+ {
+ Ck[0][1] = 1; Ck[0][2] = 0;
+ Ck[1][1] = 0; Ck[1][2] = 1;
+ // No break here!
+ }
+ case FULL_BUNDLE_FOCAL_LENGTH:
+ {
+ Ck[0][0] = xd[0];
+ Ck[1][0] = xd[1];
+ }
+ case FULL_BUNDLE_METRIC:
+ {
+ }
+ } // end switch
+ } // end CommonInternalsMetricBundleOptimizer::fillJacobians()
+
+ void
+ CommonInternalsMetricBundleOptimizer::updateParametersC(Vector<double> const& deltaC)
+ {
+ switch (_mode)
+ {
+ case FULL_BUNDLE_RADIAL_TANGENTIAL:
+ {
+ _distortion.p1 += deltaC[5];
+ _distortion.p2 += deltaC[6];
+ // No break here!
+ }
+ case FULL_BUNDLE_RADIAL:
+ {
+ _distortion.k1 += deltaC[3];
+ _distortion.k2 += deltaC[4];
+ // No break here!
+ }
+ case FULL_BUNDLE_FOCAL_LENGTH_PP:
+ {
+ _K[0][2] += deltaC[1];
+ _K[1][2] += deltaC[2];
+ // No break here!
+ }
+ case FULL_BUNDLE_FOCAL_LENGTH:
+ {
+ _K[0][0] += deltaC[0];
+ _K[1][1] = _cachedAspectRatio * _K[0][0];
+ }
+ case FULL_BUNDLE_METRIC:
+ {
+ }
+ } // end switch
+ } // end CommonInternalsMetricBundleOptimizer::updateParametersC()
+
+ //----------------------------------------------------------------------
+
+ void
+ VaryingInternalsMetricBundleOptimizer::fillJacobians(Matrix<double>& Ak,
+ Matrix<double>& Bk,
+ Matrix<double>& Ck,
+ int i, int j, int k)
+ {
+ Vector3d XX;
+ Matrix3x6d dXX_dRT;
+ Matrix3x3d dXX_dX;
+ this->poseDerivatives(i, j, XX, dXX_dRT, dXX_dX);
+
+ Vector2d xu; // undistorted image point
+ xu[0] = XX[0] / XX[2];
+ xu[1] = XX[1] / XX[2];
+
+ Vector2d const xd = _distortions[i](xu); // distorted image point
+
+ double const focalLength = _cams[i].getFocalLength();
+ double const aspectRatio = _cams[i].getAspectRatio();
+
+ Matrix2x2d dp_dxd;
+ dp_dxd[0][0] = focalLength; dp_dxd[0][1] = 0;
+ dp_dxd[1][0] = 0; dp_dxd[1][1] = aspectRatio * focalLength;
+
+ {
+ // First, lets do the derivative wrt the structure and motion parameters.
+ Matrix2x3d dxu_dXX;
+ dxu_dXX[0][0] = 1.0f / XX[2]; dxu_dXX[0][1] = 0; dxu_dXX[0][2] = -XX[0]/(XX[2]*XX[2]);
+ dxu_dXX[1][0] = 0; dxu_dXX[1][1] = 1.0f / XX[2]; dxu_dXX[1][2] = -XX[1]/(XX[2]*XX[2]);
+
+ Matrix2x2d dxd_dxu = _distortions[i].derivativeWrtUndistortedPoint(xu);
+
+ Matrix2x2d dp_dxu = dp_dxd * dxd_dxu;
+ Matrix2x3d dp_dXX = dp_dxu * dxu_dXX;
+
+ Matrix2x6d dp_dRT;
+
+ multiply_A_B(dp_dXX, dXX_dRT, dp_dRT);
+ copyMatrixSlice(dp_dRT, 0, 0, 2, 6, Ak, 0, 0);
+ multiply_A_B(dp_dXX, dXX_dX, Bk);
+ } // end scope
+
+ switch (_mode)
+ {
+ case FULL_BUNDLE_RADIAL_TANGENTIAL:
+ {
+ Matrix2x2d dxd_dp1p2 = _distortions[i].derivativeWrtTangentialParameters(xu);
+ Matrix2x2d d_dp1p2 = dp_dxd * dxd_dp1p2;
+ copyMatrixSlice(d_dp1p2, 0, 0, 2, 2, Ak, 0, 11);
+ // No break here!
+ }
+ case FULL_BUNDLE_RADIAL:
+ {
+ Matrix2x2d dxd_dk1k2 = _distortions[i].derivativeWrtRadialParameters(xu);
+ Matrix2x2d d_dk1k2 = dp_dxd * dxd_dk1k2;
+ copyMatrixSlice(d_dk1k2, 0, 0, 2, 2, Ak, 0, 9);
+ // No break here!
+ }
+ case FULL_BUNDLE_FOCAL_LENGTH_PP:
+ {
+ Ak[0][7] = 1; Ak[0][8] = 0;
+ Ak[1][7] = 0; Ak[1][8] = 1;
+ // No break here!
+ }
+ case FULL_BUNDLE_FOCAL_LENGTH:
+ {
+ Ak[0][6] = xd[0];
+ Ak[1][6] = xd[1];
+ }
+ case FULL_BUNDLE_METRIC:
+ {
+ }
+ } // end switch
+ } // end VaryingInternalsMetricBundleOptimizer::fillJacobians()
+
+ void
+ VaryingInternalsMetricBundleOptimizer::updateParametersA(VectorArray<double> const& deltaAi)
+ {
+ Vector3d T, omega;
+ Matrix3x3d R0, dR, K;
+
+ for (int i = _nNonvaryingA; i < _nParametersA; ++i)
+ {
+ Vector<double> const& deltaA = deltaAi[i];
+
+ T = _cams[i].getTranslation();
+ T[0] += deltaA[0];
+ T[1] += deltaA[1];
+ T[2] += deltaA[2];
+ _cams[i].setTranslation(T);
+
+ // Create incremental rotation using Rodriguez formula.
+ R0 = _cams[i].getRotation();
+ omega[0] = deltaA[3];
+ omega[1] = deltaA[4];
+ omega[2] = deltaA[5];
+ createRotationMatrixRodriguez(omega, dR);
+ _cams[i].setRotation(dR * R0);
+
+ K = _cams[i].getIntrinsic();
+
+ switch (_mode)
+ {
+ case FULL_BUNDLE_RADIAL_TANGENTIAL:
+ {
+ _distortions[i].p1 += deltaA[11];
+ _distortions[i].p2 += deltaA[12];
+ // No break here!
+ }
+ case FULL_BUNDLE_RADIAL:
+ {
+ _distortions[i].k1 += deltaA[9];
+ _distortions[i].k2 += deltaA[10];
+ // No break here!
+ }
+ case FULL_BUNDLE_FOCAL_LENGTH_PP:
+ {
+ K[0][2] += deltaA[7];
+ K[1][2] += deltaA[8];
+ // No break here!
+ }
+ case FULL_BUNDLE_FOCAL_LENGTH:
+ {
+ double const ar = K[1][1] / K[0][0];
+ K[0][0] += deltaA[6];
+ K[1][1] = ar * K[0][0];
+ }
+ case FULL_BUNDLE_METRIC:
+ {
+ }
+ } // end switch
+ _cams[i].setIntrinsic(K);
+ } // end for (i)
+ } // end VaryingInternalsMetricBundleOptimizer::updateParametersC()
+
+} // end namespace V3D
+
+#endif // defined(V3DLIB_ENABLE_SUITESPARSE)
diff --git a/extern/libmv/third_party/ssba/Geometry/v3d_metricbundle.h b/extern/libmv/third_party/ssba/Geometry/v3d_metricbundle.h
new file mode 100644
index 00000000000..076a9e64346
--- /dev/null
+++ b/extern/libmv/third_party/ssba/Geometry/v3d_metricbundle.h
@@ -0,0 +1,346 @@
+// -*- C++ -*-
+/*
+Copyright (c) 2008 University of North Carolina at Chapel Hill
+
+This file is part of SSBA (Simple Sparse Bundle Adjustment).
+
+SSBA is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
+
+SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License along
+with SSBA. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef V3D_METRICBUNDLE_H
+#define V3D_METRICBUNDLE_H
+
+# if defined(V3DLIB_ENABLE_SUITESPARSE)
+
+#include "Math/v3d_optimization.h"
+#include "Math/v3d_linear.h"
+#include "Math/v3d_linear_utils.h"
+#include "Geometry/v3d_cameramatrix.h"
+#include "Geometry/v3d_distortion.h"
+
+namespace V3D
+{
+
+ // This structure provides some helper functions common to all metric BAs
+ struct MetricBundleOptimizerBase : public SparseLevenbergOptimizer
+ {
+ typedef SparseLevenbergOptimizer Base;
+
+ MetricBundleOptimizerBase(double inlierThreshold,
+ vector<CameraMatrix>& cams,
+ vector<Vector3d >& Xs,
+ vector<Vector2d > const& measurements,
+ vector<int> const& corrspondingView,
+ vector<int> const& corrspondingPoint,
+ int nAddParamsA, int nParamsC)
+ : SparseLevenbergOptimizer(2, cams.size(), 6+nAddParamsA, Xs.size(), 3, nParamsC,
+ corrspondingView, corrspondingPoint),
+ _cams(cams), _Xs(Xs), _measurements(measurements),
+ _savedTranslations(cams.size()), _savedRotations(cams.size()),
+ _savedXs(Xs.size()),
+ _inlierThreshold(inlierThreshold), _cachedParamLength(0.0)
+ {
+ // Since we assume that BA does not alter the inputs too much,
+ // we compute the overall length of the parameter vector in advance
+ // and return that value as the result of getParameterLength().
+ for (int i = _nNonvaryingA; i < _nParametersA; ++i)
+ {
+ _cachedParamLength += sqrNorm_L2(_cams[i].getTranslation());
+ _cachedParamLength += 3.0; // Assume eye(3) for R.
+ }
+ for (int j = _nNonvaryingB; j < _nParametersB; ++j)
+ _cachedParamLength += sqrNorm_L2(_Xs[j]);
+
+ _cachedParamLength = sqrt(_cachedParamLength);
+ }
+
+ // Huber robust cost function.
+ virtual void fillWeights(VectorArray<double> const& residual, Vector<double>& w)
+ {
+ for (unsigned int k = 0; k < w.size(); ++k)
+ {
+ Vector<double> const& r = residual[k];
+ double const e = norm_L2(r);
+ w[k] = (e < _inlierThreshold) ? 1.0 : sqrt(_inlierThreshold / e);
+ } // end for (k)
+ }
+
+ virtual double getParameterLength() const
+ {
+ return _cachedParamLength;
+ }
+
+ virtual void updateParametersA(VectorArray<double> const& deltaAi);
+ virtual void updateParametersB(VectorArray<double> const& deltaBj);
+ virtual void updateParametersC(Vector<double> const& deltaC)
+ {
+ (void)deltaC;
+ }
+
+ virtual void saveAllParameters()
+ {
+ for (int i = _nNonvaryingA; i < _nParametersA; ++i)
+ {
+ _savedTranslations[i] = _cams[i].getTranslation();
+ _savedRotations[i] = _cams[i].getRotation();
+ }
+ _savedXs = _Xs;
+ }
+
+ virtual void restoreAllParameters()
+ {
+ for (int i = _nNonvaryingA; i < _nParametersA; ++i)
+ {
+ _cams[i].setTranslation(_savedTranslations[i]);
+ _cams[i].setRotation(_savedRotations[i]);
+ }
+ _Xs = _savedXs;
+ }
+
+ protected:
+ typedef InlineMatrix<double, 3, 6> Matrix3x6d;
+
+ void poseDerivatives(int i, int j, Vector3d& XX,
+ Matrix3x6d& d_dRT, Matrix3x3d& d_dX) const;
+
+ vector<CameraMatrix>& _cams;
+ vector<Vector3d>& _Xs;
+
+ vector<Vector2d> const& _measurements;
+
+ vector<Vector3d> _savedTranslations;
+ vector<Matrix3x3d> _savedRotations;
+ vector<Vector3d> _savedXs;
+
+ double const _inlierThreshold;
+ double _cachedParamLength;
+ }; // end struct MetricBundleOptimizerBase
+
+ struct StdMetricBundleOptimizer : public MetricBundleOptimizerBase
+ {
+ typedef MetricBundleOptimizerBase Base;
+
+ StdMetricBundleOptimizer(double inlierThreshold,
+ vector<CameraMatrix>& cams,
+ vector<Vector3d >& Xs,
+ vector<Vector2d > const& measurements,
+ vector<int> const& corrspondingView,
+ vector<int> const& corrspondingPoint)
+ : MetricBundleOptimizerBase(inlierThreshold, cams, Xs, measurements,
+ corrspondingView, corrspondingPoint, 0, 0)
+ { }
+
+ virtual void evalResidual(VectorArray<double>& e)
+ {
+ for (unsigned int k = 0; k < e.count(); ++k)
+ {
+ int const i = _correspondingParamA[k];
+ int const j = _correspondingParamB[k];
+
+ Vector2d const q = _cams[i].projectPoint(_Xs[j]);
+ e[k][0] = q[0] - _measurements[k][0];
+ e[k][1] = q[1] - _measurements[k][1];
+ }
+ }
+
+ virtual void fillJacobians(Matrix<double>& Ak, Matrix<double>& Bk, Matrix<double>& Ck,
+ int i, int j, int k);
+ }; // end struct StdMetricBundleOptimizer
+
+//----------------------------------------------------------------------
+
+ enum
+ {
+ FULL_BUNDLE_METRIC = 0,
+ FULL_BUNDLE_FOCAL_LENGTH = 1, // f
+ FULL_BUNDLE_FOCAL_LENGTH_PP = 2, // f, cx, cy
+ FULL_BUNDLE_RADIAL = 3, // f, cx, cy, k1, k2
+ FULL_BUNDLE_RADIAL_TANGENTIAL = 4 // f, cx, cy, k1, k2, p1, p2
+ };
+
+ struct CommonInternalsMetricBundleOptimizer : public MetricBundleOptimizerBase
+ {
+ static int globalParamDimensionFromMode(int mode)
+ {
+ switch (mode)
+ {
+ case FULL_BUNDLE_METRIC: return 0;
+ case FULL_BUNDLE_FOCAL_LENGTH: return 1;
+ case FULL_BUNDLE_FOCAL_LENGTH_PP: return 3;
+ case FULL_BUNDLE_RADIAL: return 5;
+ case FULL_BUNDLE_RADIAL_TANGENTIAL: return 7;
+ }
+ return 0;
+ }
+
+ typedef MetricBundleOptimizerBase Base;
+
+ CommonInternalsMetricBundleOptimizer(int mode,
+ double inlierThreshold,
+ Matrix3x3d& K,
+ StdDistortionFunction& distortion,
+ vector<CameraMatrix>& cams,
+ vector<Vector3d >& Xs,
+ vector<Vector2d > const& measurements,
+ vector<int> const& corrspondingView,
+ vector<int> const& corrspondingPoint)
+ : MetricBundleOptimizerBase(inlierThreshold, cams, Xs, measurements,
+ corrspondingView, corrspondingPoint,
+ 0, globalParamDimensionFromMode(mode)),
+ _mode(mode), _K(K), _distortion(distortion)
+ {
+ _cachedAspectRatio = K[1][1] / K[0][0];
+ }
+
+ Vector2d projectPoint(Vector3d const& X, int i) const
+ {
+ Vector3d const XX = _cams[i].transformPointIntoCameraSpace(X);
+ Vector2d p;
+ p[0] = XX[0] / XX[2];
+ p[1] = XX[1] / XX[2];
+ p = _distortion(p);
+ Vector2d res;
+ res[0] = _K[0][0] * p[0] + _K[0][1] * p[1] + _K[0][2];
+ res[1] = _K[1][1] * p[1] + _K[1][2];
+ return res;
+ }
+
+ virtual void evalResidual(VectorArray<double>& e)
+ {
+ for (unsigned int k = 0; k < e.count(); ++k)
+ {
+ int const i = _correspondingParamA[k];
+ int const j = _correspondingParamB[k];
+
+ Vector2d const q = this->projectPoint(_Xs[j], i);
+ e[k][0] = q[0] - _measurements[k][0];
+ e[k][1] = q[1] - _measurements[k][1];
+ }
+ }
+
+ virtual void fillJacobians(Matrix<double>& Ak, Matrix<double>& Bk, Matrix<double>& Ck,
+ int i, int j, int k);
+
+ virtual void updateParametersC(Vector<double> const& deltaC);
+
+ virtual void saveAllParameters()
+ {
+ Base::saveAllParameters();
+ _savedK = _K;
+ _savedDistortion = _distortion;
+ }
+
+ virtual void restoreAllParameters()
+ {
+ Base::restoreAllParameters();
+ _K = _savedK;
+ _distortion = _savedDistortion;
+ }
+
+ protected:
+ int _mode;
+ Matrix3x3d& _K;
+ StdDistortionFunction& _distortion;
+
+ Matrix3x3d _savedK;
+ StdDistortionFunction _savedDistortion;
+ double _cachedAspectRatio;
+ }; // end struct CommonInternalsMetricBundleOptimizer
+
+//----------------------------------------------------------------------
+
+ struct VaryingInternalsMetricBundleOptimizer : public MetricBundleOptimizerBase
+ {
+ static int extParamDimensionFromMode(int mode)
+ {
+ switch (mode)
+ {
+ case FULL_BUNDLE_METRIC: return 0;
+ case FULL_BUNDLE_FOCAL_LENGTH: return 1;
+ case FULL_BUNDLE_FOCAL_LENGTH_PP: return 3;
+ case FULL_BUNDLE_RADIAL: return 5;
+ case FULL_BUNDLE_RADIAL_TANGENTIAL: return 7;
+ }
+ return 0;
+ }
+
+ typedef MetricBundleOptimizerBase Base;
+
+ VaryingInternalsMetricBundleOptimizer(int mode,
+ double inlierThreshold,
+ std::vector<StdDistortionFunction>& distortions,
+ vector<CameraMatrix>& cams,
+ vector<Vector3d >& Xs,
+ vector<Vector2d > const& measurements,
+ vector<int> const& corrspondingView,
+ vector<int> const& corrspondingPoint)
+ : MetricBundleOptimizerBase(inlierThreshold, cams, Xs, measurements,
+ corrspondingView, corrspondingPoint,
+ extParamDimensionFromMode(mode), 0),
+ _mode(mode), _distortions(distortions),
+ _savedKs(cams.size()), _savedDistortions(cams.size())
+ { }
+
+ Vector2d projectPoint(Vector3d const& X, int i) const
+ {
+ return _cams[i].projectPoint(_distortions[i], X);
+ }
+
+ virtual void evalResidual(VectorArray<double>& e)
+ {
+ for (unsigned int k = 0; k < e.count(); ++k)
+ {
+ int const i = _correspondingParamA[k];
+ int const j = _correspondingParamB[k];
+
+ Vector2d const q = this->projectPoint(_Xs[j], i);
+ e[k][0] = q[0] - _measurements[k][0];
+ e[k][1] = q[1] - _measurements[k][1];
+ }
+ }
+
+ virtual void fillJacobians(Matrix<double>& Ak, Matrix<double>& Bk, Matrix<double>& Ck,
+ int i, int j, int k);
+
+ virtual void updateParametersA(VectorArray<double> const& deltaAi);
+
+ virtual void saveAllParameters()
+ {
+ Base::saveAllParameters();
+ for (int i = _nNonvaryingA; i < _nParametersA; ++i)
+ _savedKs[i] = _cams[i].getIntrinsic();
+ std::copy(_distortions.begin(), _distortions.end(), _savedDistortions.begin());
+ }
+
+ virtual void restoreAllParameters()
+ {
+ Base::restoreAllParameters();
+ for (int i = _nNonvaryingA; i < _nParametersA; ++i)
+ _cams[i].setIntrinsic(_savedKs[i]);
+ std::copy(_savedDistortions.begin(), _savedDistortions.end(), _distortions.begin());
+ }
+
+ protected:
+ int _mode;
+ std::vector<StdDistortionFunction>& _distortions;
+
+ std::vector<Matrix3x3d> _savedKs;
+ std::vector<StdDistortionFunction> _savedDistortions;
+ }; // end struct VaryingInternalsMetricBundleOptimizer
+
+} // end namespace V3D
+
+# endif
+
+#endif
diff --git a/extern/libmv/third_party/ssba/Math/v3d_linear.h b/extern/libmv/third_party/ssba/Math/v3d_linear.h
new file mode 100644
index 00000000000..7d6e898169c
--- /dev/null
+++ b/extern/libmv/third_party/ssba/Math/v3d_linear.h
@@ -0,0 +1,923 @@
+// -*- C++ -*-
+/*
+Copyright (c) 2008 University of North Carolina at Chapel Hill
+
+This file is part of SSBA (Simple Sparse Bundle Adjustment).
+
+SSBA is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
+
+SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License along
+with SSBA. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef V3D_LINEAR_H
+#define V3D_LINEAR_H
+
+#include <cassert>
+#include <algorithm>
+#include <vector>
+#include <cmath>
+
+namespace V3D
+{
+ using namespace std;
+
+ //! Unboxed vector type
+ template <typename Elem, int Size>
+ struct InlineVectorBase
+ {
+ typedef Elem value_type;
+ typedef Elem element_type;
+
+ typedef Elem const * const_iterator;
+ typedef Elem * iterator;
+
+ static unsigned int size() { return Size; }
+
+ Elem& operator[](unsigned int i) { return _vec[i]; }
+ Elem operator[](unsigned int i) const { return _vec[i]; }
+
+ Elem& operator()(unsigned int i) { return _vec[i-1]; }
+ Elem operator()(unsigned int i) const { return _vec[i-1]; }
+
+ const_iterator begin() const { return _vec; }
+ iterator begin() { return _vec; }
+ const_iterator end() const { return _vec + Size; }
+ iterator end() { return _vec + Size; }
+
+ void newsize(unsigned int sz) const
+ {
+ assert(sz == Size);
+ }
+
+ protected:
+ Elem _vec[Size];
+ };
+
+ //! Boxed (heap allocated) vector.
+ template <typename Elem>
+ struct VectorBase
+ {
+ typedef Elem value_type;
+ typedef Elem element_type;
+
+ typedef Elem const * const_iterator;
+ typedef Elem * iterator;
+
+ VectorBase()
+ : _size(0), _ownsVec(true), _vec(0)
+ { }
+
+ VectorBase(unsigned int size)
+ : _size(size), _ownsVec(true), _vec(0)
+ {
+ if (size > 0) _vec = new Elem[size];
+ }
+
+ VectorBase(unsigned int size, Elem * values)
+ : _size(size), _ownsVec(false), _vec(values)
+ { }
+
+ VectorBase(VectorBase<Elem> const& a)
+ : _size(0), _ownsVec(true), _vec(0)
+ {
+ _size = a._size;
+ if (_size == 0) return;
+ _vec = new Elem[_size];
+ std::copy(a._vec, a._vec + _size, _vec);
+ }
+
+ ~VectorBase() { if (_ownsVec && _vec != 0) delete [] _vec; }
+
+ VectorBase& operator=(VectorBase<Elem> const& a)
+ {
+ if (this == &a) return *this;
+
+ this->newsize(a._size);
+ std::copy(a._vec, a._vec + _size, _vec);
+ return *this;
+ }
+
+ unsigned int size() const { return _size; }
+
+ VectorBase<Elem>& newsize(unsigned int sz)
+ {
+ if (sz == _size) return *this;
+ assert(_ownsVec);
+
+ __destroy();
+ _size = sz;
+ if (_size > 0) _vec = new Elem[_size];
+
+ return *this;
+ }
+
+
+ Elem& operator[](unsigned int i) { return _vec[i]; }
+ Elem operator[](unsigned int i) const { return _vec[i]; }
+
+ Elem& operator()(unsigned int i) { return _vec[i-1]; }
+ Elem operator()(unsigned int i) const { return _vec[i-1]; }
+
+ const_iterator begin() const { return _vec; }
+ iterator begin() { return _vec; }
+ const_iterator end() const { return _vec + _size; }
+ iterator end() { return _vec + _size; }
+
+ protected:
+ void __destroy()
+ {
+ assert(_ownsVec);
+
+ if (_vec != 0) delete [] _vec;
+ _size = 0;
+ _vec = 0;
+ }
+
+ unsigned int _size;
+ bool _ownsVec;
+ Elem * _vec;
+ };
+
+ template <typename Elem, int Rows, int Cols>
+ struct InlineMatrixBase
+ {
+ typedef Elem value_type;
+ typedef Elem element_type;
+
+ typedef Elem * iterator;
+ typedef Elem const * const_iterator;
+
+ static unsigned int num_rows() { return Rows; }
+ static unsigned int num_cols() { return Cols; }
+
+ Elem * operator[](unsigned int row) { return _m[row]; }
+ Elem const * operator[](unsigned int row) const { return _m[row]; }
+
+ Elem& operator()(unsigned int row, unsigned int col) { return _m[row-1][col-1]; }
+ Elem operator()(unsigned int row, unsigned int col) const { return _m[row-1][col-1]; }
+
+ template <typename Vec>
+ void getRowSlice(unsigned int row, unsigned int first, unsigned int last, Vec& dst) const
+ {
+ for (unsigned int c = first; c < last; ++c) dst[c-first] = _m[row][c];
+ }
+
+ template <typename Vec>
+ void getColumnSlice(unsigned int first, unsigned int len, unsigned int col, Vec& dst) const
+ {
+ for (unsigned int r = 0; r < len; ++r) dst[r] = _m[r+first][col];
+ }
+
+ void newsize(unsigned int rows, unsigned int cols) const
+ {
+ assert(rows == Rows && cols == Cols);
+ }
+
+ const_iterator begin() const { return &_m[0][0]; }
+ iterator begin() { return &_m[0][0]; }
+ const_iterator end() const { return &_m[0][0] + Rows*Cols; }
+ iterator end() { return &_m[0][0] + Rows*Cols; }
+
+ protected:
+ Elem _m[Rows][Cols];
+ };
+
+ template <typename Elem>
+ struct MatrixBase
+ {
+ typedef Elem value_type;
+ typedef Elem element_type;
+
+ typedef Elem const * const_iterator;
+ typedef Elem * iterator;
+
+ MatrixBase()
+ : _rows(0), _cols(0), _ownsData(true), _m(0)
+ { }
+
+ MatrixBase(unsigned int rows, unsigned int cols)
+ : _rows(rows), _cols(cols), _ownsData(true), _m(0)
+ {
+ if (_rows * _cols == 0) return;
+ _m = new Elem[rows*cols];
+ }
+
+ MatrixBase(unsigned int rows, unsigned int cols, Elem * values)
+ : _rows(rows), _cols(cols), _ownsData(false), _m(values)
+ { }
+
+ MatrixBase(MatrixBase<Elem> const& a)
+ : _ownsData(true), _m(0)
+ {
+ _rows = a._rows; _cols = a._cols;
+ if (_rows * _cols == 0) return;
+ _m = new Elem[_rows*_cols];
+ std::copy(a._m, a._m+_rows*_cols, _m);
+ }
+
+ ~MatrixBase()
+ {
+ if (_ownsData && _m != 0) delete [] _m;
+ }
+
+ MatrixBase& operator=(MatrixBase<Elem> const& a)
+ {
+ if (this == &a) return *this;
+
+ this->newsize(a.num_rows(), a.num_cols());
+
+ std::copy(a._m, a._m+_rows*_cols, _m);
+ return *this;
+ }
+
+ void newsize(unsigned int rows, unsigned int cols)
+ {
+ if (rows == _rows && cols == _cols) return;
+
+ assert(_ownsData);
+
+ __destroy();
+
+ _rows = rows;
+ _cols = cols;
+ if (_rows * _cols == 0) return;
+ _m = new Elem[rows*cols];
+ }
+
+ unsigned int num_rows() const { return _rows; }
+ unsigned int num_cols() const { return _cols; }
+
+ Elem * operator[](unsigned int row) { return _m + row*_cols; }
+ Elem const * operator[](unsigned int row) const { return _m + row*_cols; }
+
+ Elem& operator()(unsigned int row, unsigned int col) { return _m[(row-1)*_cols + col-1]; }
+ Elem operator()(unsigned int row, unsigned int col) const { return _m[(row-1)*_cols + col-1]; }
+
+ const_iterator begin() const { return _m; }
+ iterator begin() { return _m; }
+ const_iterator end() const { return _m + _rows*_cols; }
+ iterator end() { return _m + _rows*_cols; }
+
+ template <typename Vec>
+ void getRowSlice(unsigned int row, unsigned int first, unsigned int last, Vec& dst) const
+ {
+ Elem const * v = (*this)[row];
+ for (unsigned int c = first; c < last; ++c) dst[c-first] = v[c];
+ }
+
+ template <typename Vec>
+ void getColumnSlice(unsigned int first, unsigned int len, unsigned int col, Vec& dst) const
+ {
+ for (unsigned int r = 0; r < len; ++r) dst[r] = _m[r+first][col];
+ }
+
+ protected:
+ void __destroy()
+ {
+ assert(_ownsData);
+ if (_m != 0) delete [] _m;
+ _m = 0;
+ _rows = _cols = 0;
+ }
+
+ unsigned int _rows, _cols;
+ bool _ownsData;
+ Elem * _m;
+ };
+
+ template <typename T>
+ struct CCS_Matrix
+ {
+ CCS_Matrix()
+ : _rows(0), _cols(0)
+ { }
+
+ CCS_Matrix(int const rows, int const cols, vector<pair<int, int> > const& nonZeros)
+ : _rows(rows), _cols(cols)
+ {
+ this->initialize(nonZeros);
+ }
+
+ CCS_Matrix(CCS_Matrix const& b)
+ : _rows(b._rows), _cols(b._cols),
+ _colStarts(b._colStarts), _rowIdxs(b._rowIdxs), _destIdxs(b._destIdxs), _values(b._values)
+ { }
+
+ CCS_Matrix& operator=(CCS_Matrix const& b)
+ {
+ if (this == &b) return *this;
+ _rows = b._rows;
+ _cols = b._cols;
+ _colStarts = b._colStarts;
+ _rowIdxs = b._rowIdxs;
+ _destIdxs = b._destIdxs;
+ _values = b._values;
+ return *this;
+ }
+
+ void create(int const rows, int const cols, vector<pair<int, int> > const& nonZeros)
+ {
+ _rows = rows;
+ _cols = cols;
+ this->initialize(nonZeros);
+ }
+
+ unsigned int num_rows() const { return _rows; }
+ unsigned int num_cols() const { return _cols; }
+
+ int getNonzeroCount() const { return _values.size(); }
+
+ T const * getValues() const { return &_values[0]; }
+ T * getValues() { return &_values[0]; }
+
+ int const * getDestIndices() const { return &_destIdxs[0]; }
+ int const * getColumnStarts() const { return &_colStarts[0]; }
+ int const * getRowIndices() const { return &_rowIdxs[0]; }
+
+ void getRowRange(unsigned int col, unsigned int& firstRow, unsigned int& lastRow) const
+ {
+ firstRow = _rowIdxs[_colStarts[col]];
+ lastRow = _rowIdxs[_colStarts[col+1]-1]+1;
+ }
+
+ template <typename Vec>
+ void getColumnSlice(unsigned int first, unsigned int len, unsigned int col, Vec& dst) const
+ {
+ unsigned int const last = first + len;
+
+ for (int r = 0; r < len; ++r) dst[r] = 0; // Fill vector with zeros
+
+ int const colStart = _colStarts[col];
+ int const colEnd = _colStarts[col+1];
+
+ int i = colStart;
+ int r;
+ // Skip rows less than the given start row
+ while (i < colEnd && (r = _rowIdxs[i]) < first) ++i;
+
+ // Copy elements until the final row
+ while (i < colEnd && (r = _rowIdxs[i]) < last)
+ {
+ dst[r-first] = _values[i];
+ ++i;
+ }
+ } // end getColumnSlice()
+
+ int getColumnNonzeroCount(unsigned int col) const
+ {
+ int const colStart = _colStarts[col];
+ int const colEnd = _colStarts[col+1];
+ return colEnd - colStart;
+ }
+
+ template <typename VecA, typename VecB>
+ void getSparseColumn(unsigned int col, VecA& rows, VecB& values) const
+ {
+ int const colStart = _colStarts[col];
+ int const colEnd = _colStarts[col+1];
+ int const nnz = colEnd - colStart;
+
+ for (int i = 0; i < nnz; ++i)
+ {
+ rows[i] = _rowIdxs[colStart + i];
+ values[i] = _values[colStart + i];
+ }
+ }
+
+ protected:
+ struct NonzeroInfo
+ {
+ int row, col, serial;
+
+ // Sort wrt the column first
+ bool operator<(NonzeroInfo const& rhs) const
+ {
+ if (col < rhs.col) return true;
+ if (col > rhs.col) return false;
+ return row < rhs.row;
+ }
+ };
+
+ void initialize(std::vector<std::pair<int, int> > const& nonZeros)
+ {
+ using namespace std;
+
+ int const nnz = nonZeros.size();
+
+ _colStarts.resize(_cols + 1);
+ _rowIdxs.resize(nnz);
+
+ vector<NonzeroInfo> nz(nnz);
+ for (int k = 0; k < nnz; ++k)
+ {
+ nz[k].row = nonZeros[k].first;
+ nz[k].col = nonZeros[k].second;
+ nz[k].serial = k;
+ }
+
+ // Sort in column major order
+ std::sort(nz.begin(), nz.end());
+
+ for (size_t k = 0; k < nnz; ++k) _rowIdxs[k] = nz[k].row;
+
+ int curCol = -1;
+ for (int k = 0; k < nnz; ++k)
+ {
+ NonzeroInfo const& el = nz[k];
+ if (el.col != curCol)
+ {
+ // Update empty cols between
+ for (int c = curCol+1; c < el.col; ++c) _colStarts[c] = k;
+
+ curCol = el.col;
+ _colStarts[curCol] = k;
+ } // end if
+ } // end for (k)
+
+ // Update remaining columns
+ for (int c = curCol+1; c <= _cols; ++c) _colStarts[c] = nnz;
+
+ _destIdxs.resize(nnz);
+ for (int k = 0; k < nnz; ++k) _destIdxs[nz[k].serial] = k;
+
+ _values.resize(nnz);
+ } // end initialize()
+
+ int _rows, _cols;
+ std::vector<int> _colStarts;
+ std::vector<int> _rowIdxs;
+ std::vector<int> _destIdxs;
+ std::vector<T> _values;
+ }; // end struct CCS_Matrix
+
+//----------------------------------------------------------------------
+
+ template <typename Vec, typename Elem>
+ inline void
+ fillVector(Vec& v, Elem val)
+ {
+ // We do not use std::fill since we rely only on size() and operator[] member functions.
+ for (unsigned int i = 0; i < v.size(); ++i) v[i] = val;
+ }
+
+ template <typename Vec>
+ inline void
+ makeZeroVector(Vec& v)
+ {
+ fillVector(v, 0);
+ }
+
+ template <typename VecA, typename VecB>
+ inline void
+ copyVector(VecA const& src, VecB& dst)
+ {
+ assert(src.size() == dst.size());
+ // We do not use std::fill since we rely only on size() and operator[] member functions.
+ for (unsigned int i = 0; i < src.size(); ++i) dst[i] = src[i];
+ }
+
+ template <typename VecA, typename VecB>
+ inline void
+ copyVectorSlice(VecA const& src, unsigned int srcStart, unsigned int srcLen,
+ VecB& dst, unsigned int dstStart)
+ {
+ unsigned int const end = std::min(srcStart + srcLen, src.size());
+ unsigned int const sz = dst.size();
+ unsigned int i0, i1;
+ for (i0 = srcStart, i1 = dstStart; i0 < end && i1 < sz; ++i0, ++i1) dst[i1] = src[i0];
+ }
+
+ template <typename Vec>
+ inline typename Vec::value_type
+ norm_L1(Vec const& v)
+ {
+ typename Vec::value_type res(0);
+ for (unsigned int i = 0; i < v.size(); ++i) res += fabs(v[i]);
+ return res;
+ }
+
+ template <typename Vec>
+ inline typename Vec::value_type
+ norm_Linf(Vec const& v)
+ {
+ typename Vec::value_type res(0);
+ for (unsigned int i = 0; i < v.size(); ++i) res = std::max(res, fabs(v[i]));
+ return res;
+ }
+
+ template <typename Vec>
+ inline typename Vec::value_type
+ norm_L2(Vec const& v)
+ {
+ typename Vec::value_type res(0);
+ for (unsigned int i = 0; i < v.size(); ++i) res += v[i]*v[i];
+ return sqrt((double)res);
+ }
+
+ template <typename Vec>
+ inline typename Vec::value_type
+ sqrNorm_L2(Vec const& v)
+ {
+ typename Vec::value_type res(0);
+ for (unsigned int i = 0; i < v.size(); ++i) res += v[i]*v[i];
+ return res;
+ }
+
+ template <typename Vec>
+ inline void
+ normalizeVector(Vec& v)
+ {
+ typename Vec::value_type norm(norm_L2(v));
+ for (unsigned int i = 0; i < v.size(); ++i) v[i] /= norm;
+ }
+
+ template<typename VecA, typename VecB>
+ inline typename VecA::value_type
+ innerProduct(VecA const& a, VecB const& b)
+ {
+ assert(a.size() == b.size());
+ typename VecA::value_type res(0);
+ for (unsigned int i = 0; i < a.size(); ++i) res += a[i] * b[i];
+ return res;
+ }
+
+ template <typename Elem, typename VecA, typename VecB>
+ inline void
+ scaleVector(Elem s, VecA const& v, VecB& dst)
+ {
+ for (unsigned int i = 0; i < v.size(); ++i) dst[i] = s * v[i];
+ }
+
+ template <typename Elem, typename Vec>
+ inline void
+ scaleVectorIP(Elem s, Vec& v)
+ {
+ typedef typename Vec::value_type Elem2;
+ for (unsigned int i = 0; i < v.size(); ++i)
+ v[i] = (Elem2)(v[i] * s);
+ }
+
+ template <typename VecA, typename VecB, typename VecC>
+ inline void
+ makeCrossProductVector(VecA const& v, VecB const& w, VecC& dst)
+ {
+ assert(v.size() == 3);
+ assert(w.size() == 3);
+ assert(dst.size() == 3);
+ dst[0] = v[1]*w[2] - v[2]*w[1];
+ dst[1] = v[2]*w[0] - v[0]*w[2];
+ dst[2] = v[0]*w[1] - v[1]*w[0];
+ }
+
+ template <typename VecA, typename VecB, typename VecC>
+ inline void
+ addVectors(VecA const& v, VecB const& w, VecC& dst)
+ {
+ assert(v.size() == w.size());
+ assert(v.size() == dst.size());
+ for (unsigned int i = 0; i < v.size(); ++i) dst[i] = v[i] + w[i];
+ }
+
+ template <typename VecA, typename VecB, typename VecC>
+ inline void
+ subtractVectors(VecA const& v, VecB const& w, VecC& dst)
+ {
+ assert(v.size() == w.size());
+ assert(v.size() == dst.size());
+ for (unsigned int i = 0; i < v.size(); ++i) dst[i] = v[i] - w[i];
+ }
+
+ template <typename MatA, typename MatB>
+ inline void
+ copyMatrix(MatA const& src, MatB& dst)
+ {
+ unsigned int const rows = src.num_rows();
+ unsigned int const cols = src.num_cols();
+ assert(dst.num_rows() == rows);
+ assert(dst.num_cols() == cols);
+ for (unsigned int c = 0; c < cols; ++c)
+ for (unsigned int r = 0; r < rows; ++r) dst[r][c] = src[r][c];
+ }
+
+ template <typename MatA, typename MatB>
+ inline void
+ copyMatrixSlice(MatA const& src, unsigned int rowStart, unsigned int colStart, unsigned int rowLen, unsigned int colLen,
+ MatB& dst, unsigned int dstRow, unsigned int dstCol)
+ {
+ unsigned int const rows = dst.num_rows();
+ unsigned int const cols = dst.num_cols();
+
+ unsigned int const rowEnd = std::min(rowStart + rowLen, src.num_rows());
+ unsigned int const colEnd = std::min(colStart + colLen, src.num_cols());
+
+ unsigned int c0, c1, r0, r1;
+
+ for (c0 = colStart, c1 = dstCol; c0 < colEnd && c1 < cols; ++c0, ++c1)
+ for (r0 = rowStart, r1 = dstRow; r0 < rowEnd && r1 < rows; ++r0, ++r1)
+ dst[r1][c1] = src[r0][c0];
+ }
+
+ template <typename MatA, typename MatB>
+ inline void
+ makeTransposedMatrix(MatA const& src, MatB& dst)
+ {
+ unsigned int const rows = src.num_rows();
+ unsigned int const cols = src.num_cols();
+ assert(dst.num_cols() == rows);
+ assert(dst.num_rows() == cols);
+ for (unsigned int c = 0; c < cols; ++c)
+ for (unsigned int r = 0; r < rows; ++r) dst[c][r] = src[r][c];
+ }
+
+ template <typename Mat>
+ inline void
+ fillMatrix(Mat& m, typename Mat::value_type val)
+ {
+ unsigned int const rows = m.num_rows();
+ unsigned int const cols = m.num_cols();
+ for (unsigned int c = 0; c < cols; ++c)
+ for (unsigned int r = 0; r < rows; ++r) m[r][c] = val;
+ }
+
+ template <typename Mat>
+ inline void
+ makeZeroMatrix(Mat& m)
+ {
+ fillMatrix(m, 0);
+ }
+
+ template <typename Mat>
+ inline void
+ makeIdentityMatrix(Mat& m)
+ {
+ makeZeroMatrix(m);
+ unsigned int const rows = m.num_rows();
+ unsigned int const cols = m.num_cols();
+ unsigned int n = std::min(rows, cols);
+ for (unsigned int i = 0; i < n; ++i)
+ m[i][i] = 1;
+ }
+
+ template <typename Mat, typename Vec>
+ inline void
+ makeCrossProductMatrix(Vec const& v, Mat& m)
+ {
+ assert(v.size() == 3);
+ assert(m.num_rows() == 3);
+ assert(m.num_cols() == 3);
+ m[0][0] = 0; m[0][1] = -v[2]; m[0][2] = v[1];
+ m[1][0] = v[2]; m[1][1] = 0; m[1][2] = -v[0];
+ m[2][0] = -v[1]; m[2][1] = v[0]; m[2][2] = 0;
+ }
+
+ template <typename Mat, typename Vec>
+ inline void
+ makeOuterProductMatrix(Vec const& v, Mat& m)
+ {
+ assert(m.num_cols() == m.num_rows());
+ assert(v.size() == m.num_cols());
+ unsigned const sz = v.size();
+ for (unsigned r = 0; r < sz; ++r)
+ for (unsigned c = 0; c < sz; ++c) m[r][c] = v[r]*v[c];
+ }
+
+ template <typename Mat, typename VecA, typename VecB>
+ inline void
+ makeOuterProductMatrix(VecA const& u, VecB const& v, Mat& m)
+ {
+ assert(m.num_cols() == m.num_rows());
+ assert(u.size() == m.num_cols());
+ assert(v.size() == m.num_cols());
+ unsigned const sz = u.size();
+ for (unsigned r = 0; r < sz; ++r)
+ for (unsigned c = 0; c < sz; ++c) m[r][c] = u[r]*v[c];
+ }
+
+ template <typename MatA, typename MatB, typename MatC>
+ void addMatrices(MatA const& a, MatB const& b, MatC& dst)
+ {
+ assert(a.num_cols() == b.num_cols());
+ assert(a.num_rows() == b.num_rows());
+ assert(dst.num_cols() == a.num_cols());
+ assert(dst.num_rows() == a.num_rows());
+
+ unsigned int const rows = a.num_rows();
+ unsigned int const cols = a.num_cols();
+
+ for (unsigned r = 0; r < rows; ++r)
+ for (unsigned c = 0; c < cols; ++c) dst[r][c] = a[r][c] + b[r][c];
+ }
+
+ template <typename MatA, typename MatB>
+ void addMatricesIP(MatA const& a, MatB& dst)
+ {
+ assert(dst.num_cols() == a.num_cols());
+ assert(dst.num_rows() == a.num_rows());
+
+ unsigned int const rows = a.num_rows();
+ unsigned int const cols = a.num_cols();
+
+ for (unsigned r = 0; r < rows; ++r)
+ for (unsigned c = 0; c < cols; ++c) dst[r][c] += a[r][c];
+ }
+
+ template <typename MatA, typename MatB, typename MatC>
+ void subtractMatrices(MatA const& a, MatB const& b, MatC& dst)
+ {
+ assert(a.num_cols() == b.num_cols());
+ assert(a.num_rows() == b.num_rows());
+ assert(dst.num_cols() == a.num_cols());
+ assert(dst.num_rows() == a.num_rows());
+
+ unsigned int const rows = a.num_rows();
+ unsigned int const cols = a.num_cols();
+
+ for (unsigned r = 0; r < rows; ++r)
+ for (unsigned c = 0; c < cols; ++c) dst[r][c] = a[r][c] - b[r][c];
+ }
+
+ template <typename MatA, typename Elem, typename MatB>
+ inline void
+ makeScaledMatrix(MatA const& m, Elem scale, MatB& dst)
+ {
+ unsigned int const rows = m.num_rows();
+ unsigned int const cols = m.num_cols();
+ for (unsigned int c = 0; c < cols; ++c)
+ for (unsigned int r = 0; r < rows; ++r) dst[r][c] = m[r][c] * scale;
+ }
+
+ template <typename Mat, typename Elem>
+ inline void
+ scaleMatrixIP(Elem scale, Mat& m)
+ {
+ unsigned int const rows = m.num_rows();
+ unsigned int const cols = m.num_cols();
+ for (unsigned int c = 0; c < cols; ++c)
+ for (unsigned int r = 0; r < rows; ++r) m[r][c] *= scale;
+ }
+
+ template <typename Mat, typename VecA, typename VecB>
+ inline void
+ multiply_A_v(Mat const& m, VecA const& in, VecB& dst)
+ {
+ unsigned int const rows = m.num_rows();
+ unsigned int const cols = m.num_cols();
+ assert(in.size() == cols);
+ assert(dst.size() == rows);
+
+ makeZeroVector(dst);
+
+ for (unsigned int r = 0; r < rows; ++r)
+ for (unsigned int c = 0; c < cols; ++c) dst[r] += m[r][c] * in[c];
+ }
+
+ template <typename Mat, typename VecA, typename VecB>
+ inline void
+ multiply_A_v_projective(Mat const& m, VecA const& in, VecB& dst)
+ {
+ unsigned int const rows = m.num_rows();
+ unsigned int const cols = m.num_cols();
+ assert(in.size() == cols-1);
+ assert(dst.size() == rows-1);
+
+ typename VecB::value_type w = m(rows-1, cols-1);
+ unsigned int r, i;
+ for (i = 0; i < cols-1; ++i) w += m(rows-1, i) * in[i];
+ for (r = 0; r < rows-1; ++r) dst[r] = m(r, cols-1);
+ for (r = 0; r < rows-1; ++r)
+ for (unsigned int c = 0; c < cols-1; ++c) dst[r] += m[r][c] * in[c];
+ for (i = 0; i < rows-1; ++i) dst[i] /= w;
+ }
+
+ template <typename Mat, typename VecA, typename VecB>
+ inline void
+ multiply_A_v_affine(Mat const& m, VecA const& in, VecB& dst)
+ {
+ unsigned int const rows = m.num_rows();
+ unsigned int const cols = m.num_cols();
+ assert(in.size() == cols-1);
+ assert(dst.size() == rows);
+
+ unsigned int r;
+
+ for (r = 0; r < rows; ++r) dst[r] = m(r, cols-1);
+ for (r = 0; r < rows; ++r)
+ for (unsigned int c = 0; c < cols-1; ++c) dst[r] += m[r][c] * in[c];
+ }
+
+ template <typename Mat, typename VecA, typename VecB>
+ inline void
+ multiply_At_v(Mat const& m, VecA const& in, VecB& dst)
+ {
+ unsigned int const rows = m.num_rows();
+ unsigned int const cols = m.num_cols();
+ assert(in.size() == rows);
+ assert(dst.size() == cols);
+
+ makeZeroVector(dst);
+ for (unsigned int c = 0; c < cols; ++c)
+ for (unsigned int r = 0; r < rows; ++r) dst[c] += m[r][c] * in[r];
+ }
+
+ template <typename MatA, typename MatB>
+ inline void
+ multiply_At_A(MatA const& a, MatB& dst)
+ {
+ assert(dst.num_rows() == a.num_cols());
+ assert(dst.num_cols() == a.num_cols());
+
+ typedef typename MatB::value_type Elem;
+
+ Elem accum;
+ for (unsigned int r = 0; r < a.num_cols(); ++r)
+ for (unsigned int c = 0; c < a.num_cols(); ++c)
+ {
+ accum = 0;
+ for (unsigned int k = 0; k < a.num_rows(); ++k) accum += a[k][r] * a[k][c];
+ dst[r][c] = accum;
+ }
+ }
+
+ template <typename MatA, typename MatB, typename MatC>
+ inline void
+ multiply_A_B(MatA const& a, MatB const& b, MatC& dst)
+ {
+ assert(a.num_cols() == b.num_rows());
+ assert(dst.num_rows() == a.num_rows());
+ assert(dst.num_cols() == b.num_cols());
+
+ typedef typename MatC::value_type Elem;
+
+ Elem accum;
+ for (unsigned int r = 0; r < a.num_rows(); ++r)
+ for (unsigned int c = 0; c < b.num_cols(); ++c)
+ {
+ accum = 0;
+ for (unsigned int k = 0; k < a.num_cols(); ++k) accum += a[r][k] * b[k][c];
+ dst[r][c] = accum;
+ }
+ }
+
+ template <typename MatA, typename MatB, typename MatC>
+ inline void
+ multiply_At_B(MatA const& a, MatB const& b, MatC& dst)
+ {
+ assert(a.num_rows() == b.num_rows());
+ assert(dst.num_rows() == a.num_cols());
+ assert(dst.num_cols() == b.num_cols());
+
+ typedef typename MatC::value_type Elem;
+
+ Elem accum;
+ for (unsigned int r = 0; r < a.num_cols(); ++r)
+ for (unsigned int c = 0; c < b.num_cols(); ++c)
+ {
+ accum = 0;
+ for (unsigned int k = 0; k < a.num_rows(); ++k) accum += a[k][r] * b[k][c];
+ dst[r][c] = accum;
+ }
+ }
+
+ template <typename MatA, typename MatB, typename MatC>
+ inline void
+ multiply_A_Bt(MatA const& a, MatB const& b, MatC& dst)
+ {
+ assert(a.num_cols() == b.num_cols());
+ assert(dst.num_rows() == a.num_rows());
+ assert(dst.num_cols() == b.num_rows());
+
+ typedef typename MatC::value_type Elem;
+
+ Elem accum;
+ for (unsigned int r = 0; r < a.num_rows(); ++r)
+ for (unsigned int c = 0; c < b.num_rows(); ++c)
+ {
+ accum = 0;
+ for (unsigned int k = 0; k < a.num_cols(); ++k) accum += a[r][k] * b[c][k];
+ dst[r][c] = accum;
+ }
+ }
+
+ template <typename Mat>
+ inline void
+ transposeMatrixIP(Mat& a)
+ {
+ assert(a.num_rows() == a.num_cols());
+
+ for (unsigned int r = 0; r < a.num_rows(); ++r)
+ for (unsigned int c = 0; c < r; ++c)
+ std::swap(a[r][c], a[c][r]);
+ }
+
+} // end namespace V3D
+
+#endif
diff --git a/extern/libmv/third_party/ssba/Math/v3d_linear_utils.h b/extern/libmv/third_party/ssba/Math/v3d_linear_utils.h
new file mode 100644
index 00000000000..969ec99694f
--- /dev/null
+++ b/extern/libmv/third_party/ssba/Math/v3d_linear_utils.h
@@ -0,0 +1,391 @@
+// -*- C++ -*-
+/*
+Copyright (c) 2008 University of North Carolina at Chapel Hill
+
+This file is part of SSBA (Simple Sparse Bundle Adjustment).
+
+SSBA is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
+
+SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License along
+with SSBA. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef V3D_LINEAR_UTILS_H
+#define V3D_LINEAR_UTILS_H
+
+#include "Math/v3d_linear.h"
+
+#include <iostream>
+
+namespace V3D
+{
+
+ template <typename Elem, int Size>
+ struct InlineVector : public InlineVectorBase<Elem, Size>
+ {
+ }; // end struct InlineVector
+
+ template <typename Elem>
+ struct Vector : public VectorBase<Elem>
+ {
+ Vector()
+ : VectorBase<Elem>()
+ { }
+
+ Vector(unsigned int size)
+ : VectorBase<Elem>(size)
+ { }
+
+ Vector(unsigned int size, Elem * values)
+ : VectorBase<Elem>(size, values)
+ { }
+
+ Vector(Vector<Elem> const& a)
+ : VectorBase<Elem>(a)
+ { }
+
+ Vector<Elem>& operator=(Vector<Elem> const& a)
+ {
+ (VectorBase<Elem>::operator=)(a);
+ return *this;
+ }
+
+ Vector<Elem>& operator+=(Vector<Elem> const& rhs)
+ {
+ addVectorsIP(rhs, *this);
+ return *this;
+ }
+
+ Vector<Elem>& operator*=(Elem scale)
+ {
+ scaleVectorsIP(scale, *this);
+ return *this;
+ }
+
+ Vector<Elem> operator+(Vector<Elem> const& rhs) const
+ {
+ Vector<Elem> res(this->size());
+ addVectors(*this, rhs, res);
+ return res;
+ }
+
+ Vector<Elem> operator-(Vector<Elem> const& rhs) const
+ {
+ Vector<Elem> res(this->size());
+ subtractVectors(*this, rhs, res);
+ return res;
+ }
+
+ Elem operator*(Vector<Elem> const& rhs) const
+ {
+ return innerProduct(*this, rhs);
+ }
+
+ }; // end struct Vector
+
+ template <typename Elem, int Rows, int Cols>
+ struct InlineMatrix : public InlineMatrixBase<Elem, Rows, Cols>
+ {
+ }; // end struct InlineMatrix
+
+ template <typename Elem>
+ struct Matrix : public MatrixBase<Elem>
+ {
+ Matrix()
+ : MatrixBase<Elem>()
+ { }
+
+ Matrix(unsigned int rows, unsigned int cols)
+ : MatrixBase<Elem>(rows, cols)
+ { }
+
+ Matrix(unsigned int rows, unsigned int cols, Elem * values)
+ : MatrixBase<Elem>(rows, cols, values)
+ { }
+
+ Matrix(Matrix<Elem> const& a)
+ : MatrixBase<Elem>(a)
+ { }
+
+ Matrix<Elem>& operator=(Matrix<Elem> const& a)
+ {
+ (MatrixBase<Elem>::operator=)(a);
+ return *this;
+ }
+
+ Matrix<Elem>& operator+=(Matrix<Elem> const& rhs)
+ {
+ addMatricesIP(rhs, *this);
+ return *this;
+ }
+
+ Matrix<Elem>& operator*=(Elem scale)
+ {
+ scaleMatrixIP(scale, *this);
+ return *this;
+ }
+
+ Matrix<Elem> operator+(Matrix<Elem> const& rhs) const
+ {
+ Matrix<Elem> res(this->num_rows(), this->num_cols());
+ addMatrices(*this, rhs, res);
+ return res;
+ }
+
+ Matrix<Elem> operator-(Matrix<Elem> const& rhs) const
+ {
+ Matrix<Elem> res(this->num_rows(), this->num_cols());
+ subtractMatrices(*this, rhs, res);
+ return res;
+ }
+
+ }; // end struct Matrix
+
+//----------------------------------------------------------------------
+
+ typedef InlineVector<float, 2> Vector2f;
+ typedef InlineVector<double, 2> Vector2d;
+ typedef InlineVector<float, 3> Vector3f;
+ typedef InlineVector<double, 3> Vector3d;
+ typedef InlineVector<float, 4> Vector4f;
+ typedef InlineVector<double, 4> Vector4d;
+
+ typedef InlineMatrix<float, 2, 2> Matrix2x2f;
+ typedef InlineMatrix<double, 2, 2> Matrix2x2d;
+ typedef InlineMatrix<float, 3, 3> Matrix3x3f;
+ typedef InlineMatrix<double, 3, 3> Matrix3x3d;
+ typedef InlineMatrix<float, 4, 4> Matrix4x4f;
+ typedef InlineMatrix<double, 4, 4> Matrix4x4d;
+
+ typedef InlineMatrix<float, 2, 3> Matrix2x3f;
+ typedef InlineMatrix<double, 2, 3> Matrix2x3d;
+ typedef InlineMatrix<float, 3, 4> Matrix3x4f;
+ typedef InlineMatrix<double, 3, 4> Matrix3x4d;
+
+ template <typename Elem>
+ struct VectorArray
+ {
+ VectorArray(unsigned count, unsigned size)
+ : _count(count), _size(size), _values(0), _vectors(0)
+ {
+ unsigned const nTotal = _count * _size;
+ if (count > 0) _vectors = new Vector<Elem>[count];
+ if (nTotal > 0) _values = new Elem[nTotal];
+ for (unsigned i = 0; i < _count; ++i) new (&_vectors[i]) Vector<Elem>(_size, _values + i*_size);
+ }
+
+ VectorArray(unsigned count, unsigned size, Elem initVal)
+ : _count(count), _size(size), _values(0), _vectors(0)
+ {
+ unsigned const nTotal = _count * _size;
+ if (count > 0) _vectors = new Vector<Elem>[count];
+ if (nTotal > 0) _values = new Elem[nTotal];
+ for (unsigned i = 0; i < _count; ++i) new (&_vectors[i]) Vector<Elem>(_size, _values + i*_size);
+ std::fill(_values, _values + nTotal, initVal);
+ }
+
+ ~VectorArray()
+ {
+ delete [] _values;
+ delete [] _vectors;
+ }
+
+ unsigned count() const { return _count; }
+ unsigned size() const { return _size; }
+
+ //! Get the submatrix at position ix
+ Vector<Elem> const& operator[](unsigned ix) const
+ {
+ return _vectors[ix];
+ }
+
+ //! Get the submatrix at position ix
+ Vector<Elem>& operator[](unsigned ix)
+ {
+ return _vectors[ix];
+ }
+
+ protected:
+ unsigned _count, _size;
+ Elem * _values;
+ Vector<Elem> * _vectors;
+
+ private:
+ VectorArray(VectorArray const&);
+ void operator=(VectorArray const&);
+ };
+
+ template <typename Elem>
+ struct MatrixArray
+ {
+ MatrixArray(unsigned count, unsigned nRows, unsigned nCols)
+ : _count(count), _rows(nRows), _columns(nCols), _values(0), _matrices(0)
+ {
+ unsigned const nTotal = _count * _rows * _columns;
+ if (count > 0) _matrices = new Matrix<Elem>[count];
+ if (nTotal > 0) _values = new double[nTotal];
+ for (unsigned i = 0; i < _count; ++i)
+ new (&_matrices[i]) Matrix<Elem>(_rows, _columns, _values + i*(_rows*_columns));
+ }
+
+ ~MatrixArray()
+ {
+ delete [] _matrices;
+ delete [] _values;
+ }
+
+ //! Get the submatrix at position ix
+ Matrix<Elem> const& operator[](unsigned ix) const
+ {
+ return _matrices[ix];
+ }
+
+ //! Get the submatrix at position ix
+ Matrix<Elem>& operator[](unsigned ix)
+ {
+ return _matrices[ix];
+ }
+
+ unsigned count() const { return _count; }
+ unsigned num_rows() const { return _rows; }
+ unsigned num_cols() const { return _columns; }
+
+ protected:
+ unsigned _count, _rows, _columns;
+ double * _values;
+ Matrix<Elem> * _matrices;
+
+ private:
+ MatrixArray(MatrixArray const&);
+ void operator=(MatrixArray const&);
+ };
+
+//----------------------------------------------------------------------
+
+ template <typename Elem, int Size>
+ inline InlineVector<Elem, Size>
+ operator+(InlineVector<Elem, Size> const& v, InlineVector<Elem, Size> const& w)
+ {
+ InlineVector<Elem, Size> res;
+ addVectors(v, w, res);
+ return res;
+ }
+
+ template <typename Elem, int Size>
+ inline InlineVector<Elem, Size>
+ operator-(InlineVector<Elem, Size> const& v, InlineVector<Elem, Size> const& w)
+ {
+ InlineVector<Elem, Size> res;
+ subtractVectors(v, w, res);
+ return res;
+ }
+
+ template <typename Elem, int Size>
+ inline InlineVector<Elem, Size>
+ operator*(Elem scale, InlineVector<Elem, Size> const& v)
+ {
+ InlineVector<Elem, Size> res;
+ scaleVector(scale, v, res);
+ return res;
+ }
+
+ template <typename Elem, int Rows, int Cols>
+ inline InlineVector<Elem, Rows>
+ operator*(InlineMatrix<Elem, Rows, Cols> const& A, InlineVector<Elem, Cols> const& v)
+ {
+ InlineVector<Elem, Rows> res;
+ multiply_A_v(A, v, res);
+ return res;
+ }
+
+ template <typename Elem, int RowsA, int ColsA, int ColsB>
+ inline InlineMatrix<Elem, RowsA, ColsB>
+ operator*(InlineMatrix<Elem, RowsA, ColsA> const& A, InlineMatrix<Elem, ColsA, ColsB> const& B)
+ {
+ InlineMatrix<Elem, RowsA, ColsB> res;
+ multiply_A_B(A, B, res);
+ return res;
+ }
+
+ template <typename Elem, int Rows, int Cols>
+ inline InlineMatrix<Elem, Cols, Rows>
+ transposedMatrix(InlineMatrix<Elem, Rows, Cols> const& A)
+ {
+ InlineMatrix<Elem, Cols, Rows> At;
+ makeTransposedMatrix(A, At);
+ return At;
+ }
+
+ template <typename Elem>
+ inline InlineMatrix<Elem, 3, 3>
+ invertedMatrix(InlineMatrix<Elem, 3, 3> const& A)
+ {
+ Elem a = A[0][0], b = A[0][1], c = A[0][2];
+ Elem d = A[1][0], e = A[1][1], f = A[1][2];
+ Elem g = A[2][0], h = A[2][1], i = A[2][2];
+
+ Elem const det = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h;
+
+ InlineMatrix<Elem, 3, 3> res;
+ res[0][0] = e*i-f*h; res[0][1] = c*h-b*i; res[0][2] = b*f-c*e;
+ res[1][0] = f*g-d*i; res[1][1] = a*i-c*g; res[1][2] = c*d-a*f;
+ res[2][0] = d*h-e*g; res[2][1] = b*g-a*h; res[2][2] = a*e-b*d;
+
+ scaleMatrixIP(1.0/det, res);
+ return res;
+ }
+
+ template <typename Elem>
+ inline InlineVector<Elem, 2>
+ makeVector2(Elem a, Elem b)
+ {
+ InlineVector<Elem, 2> res;
+ res[0] = a; res[1] = b;
+ return res;
+ }
+
+ template <typename Elem>
+ inline InlineVector<Elem, 3>
+ makeVector3(Elem a, Elem b, Elem c)
+ {
+ InlineVector<Elem, 3> res;
+ res[0] = a; res[1] = b; res[2] = c;
+ return res;
+ }
+
+ template <typename Vec>
+ inline void
+ displayVector(Vec const& v)
+ {
+ using namespace std;
+
+ for (int r = 0; r < v.size(); ++r)
+ cout << v[r] << " ";
+ cout << endl;
+ }
+
+ template <typename Mat>
+ inline void
+ displayMatrix(Mat const& A)
+ {
+ using namespace std;
+
+ for (int r = 0; r < A.num_rows(); ++r)
+ {
+ for (int c = 0; c < A.num_cols(); ++c)
+ cout << A[r][c] << " ";
+ cout << endl;
+ }
+ }
+
+} // end namespace V3D
+
+#endif
diff --git a/extern/libmv/third_party/ssba/Math/v3d_mathutilities.h b/extern/libmv/third_party/ssba/Math/v3d_mathutilities.h
new file mode 100644
index 00000000000..9e38b92a94b
--- /dev/null
+++ b/extern/libmv/third_party/ssba/Math/v3d_mathutilities.h
@@ -0,0 +1,59 @@
+// -*- C++ -*-
+/*
+Copyright (c) 2008 University of North Carolina at Chapel Hill
+
+This file is part of SSBA (Simple Sparse Bundle Adjustment).
+
+SSBA is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
+
+SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License along
+with SSBA. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef V3D_MATH_UTILITIES_H
+#define V3D_MATH_UTILITIES_H
+
+#include "Math/v3d_linear.h"
+#include "Math/v3d_linear_utils.h"
+
+#include <vector>
+
+namespace V3D
+{
+
+ template <typename Vec, typename Mat>
+ inline void
+ createRotationMatrixRodriguez(Vec const& omega, Mat& R)
+ {
+ assert(omega.size() == 3);
+ assert(R.num_rows() == 3);
+ assert(R.num_cols() == 3);
+
+ double const theta = norm_L2(omega);
+ makeIdentityMatrix(R);
+ if (fabs(theta) > 1e-6)
+ {
+ Matrix3x3d J, J2;
+ makeCrossProductMatrix(omega, J);
+ multiply_A_B(J, J, J2);
+ double const c1 = sin(theta)/theta;
+ double const c2 = (1-cos(theta))/(theta*theta);
+ for (int i = 0; i < 3; ++i)
+ for (int j = 0; j < 3; ++j)
+ R[i][j] += c1*J[i][j] + c2*J2[i][j];
+ }
+ } // end createRotationMatrixRodriguez()
+
+ template <typename T> inline double sqr(T x) { return x*x; }
+
+} // namespace V3D
+
+#endif
diff --git a/extern/libmv/third_party/ssba/Math/v3d_optimization.cpp b/extern/libmv/third_party/ssba/Math/v3d_optimization.cpp
new file mode 100644
index 00000000000..234815fcd1f
--- /dev/null
+++ b/extern/libmv/third_party/ssba/Math/v3d_optimization.cpp
@@ -0,0 +1,955 @@
+/*
+Copyright (c) 2008 University of North Carolina at Chapel Hill
+
+This file is part of SSBA (Simple Sparse Bundle Adjustment).
+
+SSBA is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
+
+SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License along
+with SSBA. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "Math/v3d_optimization.h"
+
+#if defined(V3DLIB_ENABLE_SUITESPARSE)
+//# include "COLAMD/Include/colamd.h"
+# include "colamd.h"
+extern "C"
+{
+//# include "LDL/Include/ldl.h"
+# include "ldl.h"
+}
+#endif
+
+#include <iostream>
+#include <map>
+
+#define USE_BLOCK_REORDERING 1
+#define USE_MULTIPLICATIVE_UPDATE 1
+
+using namespace std;
+
+namespace
+{
+
+ using namespace V3D;
+
+ inline double
+ squaredResidual(VectorArray<double> const& e)
+ {
+ int const N = e.count();
+ int const M = e.size();
+
+ double res = 0.0;
+
+ for (int n = 0; n < N; ++n)
+ for (int m = 0; m < M; ++m)
+ res += e[n][m] * e[n][m];
+
+ return res;
+ } // end squaredResidual()
+
+} // end namespace <>
+
+namespace V3D
+{
+
+ int optimizerVerbosenessLevel = 0;
+
+#if defined(V3DLIB_ENABLE_SUITESPARSE)
+
+ void
+ SparseLevenbergOptimizer::setupSparseJtJ()
+ {
+ int const nVaryingA = _nParametersA - _nNonvaryingA;
+ int const nVaryingB = _nParametersB - _nNonvaryingB;
+ int const nVaryingC = _paramDimensionC - _nNonvaryingC;
+
+ int const bColumnStart = nVaryingA*_paramDimensionA;
+ int const cColumnStart = bColumnStart + nVaryingB*_paramDimensionB;
+ int const nColumns = cColumnStart + nVaryingC;
+
+ _jointNonzerosW.clear();
+ _jointIndexW.resize(_nMeasurements);
+#if 1
+ {
+ map<pair<int, int>, int> jointNonzeroMap;
+ for (size_t k = 0; k < _nMeasurements; ++k)
+ {
+ int const i = _correspondingParamA[k] - _nNonvaryingA;
+ int const j = _correspondingParamB[k] - _nNonvaryingB;
+ if (i >= 0 && j >= 0)
+ {
+ map<pair<int, int>, int>::const_iterator p = jointNonzeroMap.find(make_pair(i, j));
+ if (p == jointNonzeroMap.end())
+ {
+ jointNonzeroMap.insert(make_pair(make_pair(i, j), _jointNonzerosW.size()));
+ _jointIndexW[k] = _jointNonzerosW.size();
+ _jointNonzerosW.push_back(make_pair(i, j));
+ }
+ else
+ {
+ _jointIndexW[k] = (*p).second;
+ } // end if
+ } // end if
+ } // end for (k)
+ } // end scope
+#else
+ for (size_t k = 0; k < _nMeasurements; ++k)
+ {
+ int const i = _correspondingParamA[k] - _nNonvaryingA;
+ int const j = _correspondingParamB[k] - _nNonvaryingB;
+ if (i >= 0 && j >= 0)
+ {
+ _jointIndexW[k] = _jointNonzerosW.size();
+ _jointNonzerosW.push_back(make_pair(i, j));
+ }
+ } // end for (k)
+#endif
+
+#if defined(USE_BLOCK_REORDERING)
+ int const bBlockColumnStart = nVaryingA;
+ int const cBlockColumnStart = bBlockColumnStart + nVaryingB;
+
+ int const nBlockColumns = cBlockColumnStart + ((nVaryingC > 0) ? 1 : 0);
+
+ //cout << "nBlockColumns = " << nBlockColumns << endl;
+
+ // For the column reordering we treat the columns belonging to one set
+ // of parameters as one (logical) column.
+
+ // Determine non-zeros of JtJ (we forget about the non-zero diagonal for now)
+ // Only consider nonzeros of Ai^t * Bj induced by the measurements.
+ vector<pair<int, int> > nz_blockJtJ(_jointNonzerosW.size());
+ for (int k = 0; k < _jointNonzerosW.size(); ++k)
+ {
+ nz_blockJtJ[k].first = _jointNonzerosW[k].second + bBlockColumnStart;
+ nz_blockJtJ[k].second = _jointNonzerosW[k].first;
+ }
+
+ if (nVaryingC > 0)
+ {
+ // We assume, that the global unknowns are linked to every other variable.
+ for (int i = 0; i < nVaryingA; ++i)
+ nz_blockJtJ.push_back(make_pair(cBlockColumnStart, i));
+ for (int j = 0; j < nVaryingB; ++j)
+ nz_blockJtJ.push_back(make_pair(cBlockColumnStart, j + bBlockColumnStart));
+ } // end if
+
+ int const nnzBlock = nz_blockJtJ.size();
+
+ vector<int> permBlockJtJ(nBlockColumns + 1);
+
+ if (nnzBlock > 0)
+ {
+// cout << "nnzBlock = " << nnzBlock << endl;
+
+ CCS_Matrix<int> blockJtJ(nBlockColumns, nBlockColumns, nz_blockJtJ);
+
+// cout << " nz_blockJtJ: " << endl;
+// for (size_t k = 0; k < nz_blockJtJ.size(); ++k)
+// cout << " " << nz_blockJtJ[k].first << ":" << nz_blockJtJ[k].second << endl;
+// cout << endl;
+
+ int * colStarts = (int *)blockJtJ.getColumnStarts();
+ int * rowIdxs = (int *)blockJtJ.getRowIndices();
+
+// cout << "blockJtJ_colStarts = ";
+// for (int k = 0; k <= nBlockColumns; ++k) cout << colStarts[k] << " ";
+// cout << endl;
+
+// cout << "blockJtJ_rowIdxs = ";
+// for (int k = 0; k < nnzBlock; ++k) cout << rowIdxs[k] << " ";
+// cout << endl;
+
+ int stats[COLAMD_STATS];
+ symamd(nBlockColumns, rowIdxs, colStarts, &permBlockJtJ[0], (double *) NULL, stats, &calloc, &free);
+ if (optimizerVerbosenessLevel >= 2) symamd_report(stats);
+ }
+ else
+ {
+ for (int k = 0; k < permBlockJtJ.size(); ++k) permBlockJtJ[k] = k;
+ } // end if
+
+// cout << "permBlockJtJ = ";
+// for (int k = 0; k < permBlockJtJ.size(); ++k)
+// cout << permBlockJtJ[k] << " ";
+// cout << endl;
+
+ // From the determined symbolic permutation with logical variables, determine the actual ordering
+ _perm_JtJ.resize(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC + 1);
+
+ int curDstCol = 0;
+ for (int k = 0; k < permBlockJtJ.size()-1; ++k)
+ {
+ int const srcCol = permBlockJtJ[k];
+ if (srcCol < nVaryingA)
+ {
+ for (int n = 0; n < _paramDimensionA; ++n)
+ _perm_JtJ[curDstCol + n] = srcCol*_paramDimensionA + n;
+ curDstCol += _paramDimensionA;
+ }
+ else if (srcCol >= bBlockColumnStart && srcCol < cBlockColumnStart)
+ {
+ int const bStart = nVaryingA*_paramDimensionA;
+ int const j = srcCol - bBlockColumnStart;
+
+ for (int n = 0; n < _paramDimensionB; ++n)
+ _perm_JtJ[curDstCol + n] = bStart + j*_paramDimensionB + n;
+ curDstCol += _paramDimensionB;
+ }
+ else if (srcCol == cBlockColumnStart)
+ {
+ int const cStart = nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB;
+
+ for (int n = 0; n < nVaryingC; ++n)
+ _perm_JtJ[curDstCol + n] = cStart + n;
+ curDstCol += nVaryingC;
+ }
+ else
+ {
+ cerr << "Should not reach " << __LINE__ << ":" << __LINE__ << "!" << endl;
+ assert(false);
+ }
+ }
+#else
+ vector<pair<int, int> > nz, nzL;
+ this->serializeNonZerosJtJ(nz);
+
+ for (int k = 0; k < nz.size(); ++k)
+ {
+ // Swap rows and columns, since serializeNonZerosJtJ() generates the
+ // upper triangular part but symamd wants the lower triangle.
+ nzL.push_back(make_pair(nz[k].second, nz[k].first));
+ }
+
+ _perm_JtJ.resize(nColumns+1);
+
+ if (nzL.size() > 0)
+ {
+ CCS_Matrix<int> symbJtJ(nColumns, nColumns, nzL);
+
+ int * colStarts = (int *)symbJtJ.getColumnStarts();
+ int * rowIdxs = (int *)symbJtJ.getRowIndices();
+
+// cout << "symbJtJ_colStarts = ";
+// for (int k = 0; k <= nColumns; ++k) cout << colStarts[k] << " ";
+// cout << endl;
+
+// cout << "symbJtJ_rowIdxs = ";
+// for (int k = 0; k < nzL.size(); ++k) cout << rowIdxs[k] << " ";
+// cout << endl;
+
+ int stats[COLAMD_STATS];
+ symamd(nColumns, rowIdxs, colStarts, &_perm_JtJ[0], (double *) NULL, stats, &calloc, &free);
+ if (optimizerVerbosenessLevel >= 2) symamd_report(stats);
+ }
+ else
+ {
+ for (int k = 0; k < _perm_JtJ.size(); ++k) _perm_JtJ[k] = k;
+ } //// end if
+#endif
+ _perm_JtJ.back() = _perm_JtJ.size() - 1;
+
+// cout << "_perm_JtJ = ";
+// for (int k = 0; k < _perm_JtJ.size(); ++k) cout << _perm_JtJ[k] << " ";
+// cout << endl;
+
+ // Finally, compute the inverse of the full permutation.
+ _invPerm_JtJ.resize(_perm_JtJ.size());
+ for (size_t k = 0; k < _perm_JtJ.size(); ++k)
+ _invPerm_JtJ[_perm_JtJ[k]] = k;
+
+ vector<pair<int, int> > nz_JtJ;
+ this->serializeNonZerosJtJ(nz_JtJ);
+
+ for (int k = 0; k < nz_JtJ.size(); ++k)
+ {
+ int const i = nz_JtJ[k].first;
+ int const j = nz_JtJ[k].second;
+
+ int pi = _invPerm_JtJ[i];
+ int pj = _invPerm_JtJ[j];
+ // Swap values if in lower triangular part
+ if (pi > pj) std::swap(pi, pj);
+ nz_JtJ[k].first = pi;
+ nz_JtJ[k].second = pj;
+ }
+
+ int const nnz = nz_JtJ.size();
+
+// cout << "nz_JtJ = ";
+// for (int k = 0; k < nnz; ++k) cout << nz_JtJ[k].first << ":" << nz_JtJ[k].second << " ";
+// cout << endl;
+
+ _JtJ.create(nColumns, nColumns, nz_JtJ);
+
+// cout << "_colStart_JtJ = ";
+// for (int k = 0; k < _JtJ.num_cols(); ++k) cout << _JtJ.getColumnStarts()[k] << " ";
+// cout << endl;
+
+// cout << "_rowIdxs_JtJ = ";
+// for (int k = 0; k < nnz; ++k) cout << _JtJ.getRowIndices()[k] << " ";
+// cout << endl;
+
+ vector<int> workFlags(nColumns);
+
+ _JtJ_Lp.resize(nColumns+1);
+ _JtJ_Parent.resize(nColumns);
+ _JtJ_Lnz.resize(nColumns);
+
+ ldl_symbolic(nColumns, (int *)_JtJ.getColumnStarts(), (int *)_JtJ.getRowIndices(),
+ &_JtJ_Lp[0], &_JtJ_Parent[0], &_JtJ_Lnz[0],
+ &workFlags[0], NULL, NULL);
+
+ if (optimizerVerbosenessLevel >= 1)
+ cout << "SparseLevenbergOptimizer: Nonzeros in LDL decomposition: " << _JtJ_Lp[nColumns] << endl;
+
+ } // end SparseLevenbergOptimizer::setupSparseJtJ()
+
+ void
+ SparseLevenbergOptimizer::serializeNonZerosJtJ(vector<pair<int, int> >& dst) const
+ {
+ int const nVaryingA = _nParametersA - _nNonvaryingA;
+ int const nVaryingB = _nParametersB - _nNonvaryingB;
+ int const nVaryingC = _paramDimensionC - _nNonvaryingC;
+
+ int const bColumnStart = nVaryingA*_paramDimensionA;
+ int const cColumnStart = bColumnStart + nVaryingB*_paramDimensionB;
+
+ dst.clear();
+
+ // Add the diagonal block matrices (only the upper triangular part).
+
+ // Ui submatrices of JtJ
+ for (int i = 0; i < nVaryingA; ++i)
+ {
+ int const i0 = i * _paramDimensionA;
+
+ for (int c = 0; c < _paramDimensionA; ++c)
+ for (int r = 0; r <= c; ++r)
+ dst.push_back(make_pair(i0 + r, i0 + c));
+ }
+
+ // Vj submatrices of JtJ
+ for (int j = 0; j < nVaryingB; ++j)
+ {
+ int const j0 = j*_paramDimensionB + bColumnStart;
+
+ for (int c = 0; c < _paramDimensionB; ++c)
+ for (int r = 0; r <= c; ++r)
+ dst.push_back(make_pair(j0 + r, j0 + c));
+ }
+
+ // Z submatrix of JtJ
+ for (int c = 0; c < nVaryingC; ++c)
+ for (int r = 0; r <= c; ++r)
+ dst.push_back(make_pair(cColumnStart + r, cColumnStart + c));
+
+ // Add the elements i and j linked by an observation k
+ // W submatrix of JtJ
+ for (size_t n = 0; n < _jointNonzerosW.size(); ++n)
+ {
+ int const i0 = _jointNonzerosW[n].first;
+ int const j0 = _jointNonzerosW[n].second;
+ int const r0 = i0*_paramDimensionA;
+ int const c0 = j0*_paramDimensionB + bColumnStart;
+
+ for (int r = 0; r < _paramDimensionA; ++r)
+ for (int c = 0; c < _paramDimensionB; ++c)
+ dst.push_back(make_pair(r0 + r, c0 + c));
+ } // end for (n)
+
+ if (nVaryingC > 0)
+ {
+ // Finally, add the dense columns linking i (resp. j) with the global parameters.
+ // X submatrix of JtJ
+ for (int i = 0; i < nVaryingA; ++i)
+ {
+ int const i0 = i*_paramDimensionA;
+
+ for (int r = 0; r < _paramDimensionA; ++r)
+ for (int c = 0; c < nVaryingC; ++c)
+ dst.push_back(make_pair(i0 + r, cColumnStart + c));
+ }
+
+ // Y submatrix of JtJ
+ for (int j = 0; j < nVaryingB; ++j)
+ {
+ int const j0 = j*_paramDimensionB + bColumnStart;
+
+ for (int r = 0; r < _paramDimensionB; ++r)
+ for (int c = 0; c < nVaryingC; ++c)
+ dst.push_back(make_pair(j0 + r, cColumnStart + c));
+ }
+ } // end if
+ } // end SparseLevenbergOptimizer::serializeNonZerosJtJ()
+
+ void
+ SparseLevenbergOptimizer::fillSparseJtJ(MatrixArray<double> const& Ui,
+ MatrixArray<double> const& Vj,
+ MatrixArray<double> const& Wn,
+ Matrix<double> const& Z,
+ Matrix<double> const& X,
+ Matrix<double> const& Y)
+ {
+ int const nVaryingA = _nParametersA - _nNonvaryingA;
+ int const nVaryingB = _nParametersB - _nNonvaryingB;
+ int const nVaryingC = _paramDimensionC - _nNonvaryingC;
+
+ int const bColumnStart = nVaryingA*_paramDimensionA;
+ int const cColumnStart = bColumnStart + nVaryingB*_paramDimensionB;
+
+ int const nCols = _JtJ.num_cols();
+ int const nnz = _JtJ.getNonzeroCount();
+
+ // The following has to replicate the procedure as in serializeNonZerosJtJ()
+
+ int serial = 0;
+
+ double * values = _JtJ.getValues();
+ int const * destIdxs = _JtJ.getDestIndices();
+
+ // Add the diagonal block matrices (only the upper triangular part).
+
+ // Ui submatrices of JtJ
+ for (int i = 0; i < nVaryingA; ++i)
+ {
+ int const i0 = i * _paramDimensionA;
+
+ for (int c = 0; c < _paramDimensionA; ++c)
+ for (int r = 0; r <= c; ++r, ++serial)
+ values[destIdxs[serial]] = Ui[i][r][c];
+ }
+
+ // Vj submatrices of JtJ
+ for (int j = 0; j < nVaryingB; ++j)
+ {
+ int const j0 = j*_paramDimensionB + bColumnStart;
+
+ for (int c = 0; c < _paramDimensionB; ++c)
+ for (int r = 0; r <= c; ++r, ++serial)
+ values[destIdxs[serial]] = Vj[j][r][c];
+ }
+
+ // Z submatrix of JtJ
+ for (int c = 0; c < nVaryingC; ++c)
+ for (int r = 0; r <= c; ++r, ++serial)
+ values[destIdxs[serial]] = Z[r][c];
+
+ // Add the elements i and j linked by an observation k
+ // W submatrix of JtJ
+ for (size_t n = 0; n < _jointNonzerosW.size(); ++n)
+ {
+ for (int r = 0; r < _paramDimensionA; ++r)
+ for (int c = 0; c < _paramDimensionB; ++c, ++serial)
+ values[destIdxs[serial]] = Wn[n][r][c];
+ } // end for (k)
+
+ if (nVaryingC > 0)
+ {
+ // Finally, add the dense columns linking i (resp. j) with the global parameters.
+ // X submatrix of JtJ
+ for (int i = 0; i < nVaryingA; ++i)
+ {
+ int const r0 = i * _paramDimensionA;
+ for (int r = 0; r < _paramDimensionA; ++r)
+ for (int c = 0; c < nVaryingC; ++c, ++serial)
+ values[destIdxs[serial]] = X[r0+r][c];
+ }
+
+ // Y submatrix of JtJ
+ for (int j = 0; j < nVaryingB; ++j)
+ {
+ int const r0 = j * _paramDimensionB;
+ for (int r = 0; r < _paramDimensionB; ++r)
+ for (int c = 0; c < nVaryingC; ++c, ++serial)
+ values[destIdxs[serial]] = Y[r0+r][c];
+ }
+ } // end if
+ } // end SparseLevenbergOptimizer::fillSparseJtJ()
+
+ void
+ SparseLevenbergOptimizer::minimize()
+ {
+ status = LEVENBERG_OPTIMIZER_TIMEOUT;
+ bool computeDerivatives = true;
+
+ int const nVaryingA = _nParametersA - _nNonvaryingA;
+ int const nVaryingB = _nParametersB - _nNonvaryingB;
+ int const nVaryingC = _paramDimensionC - _nNonvaryingC;
+
+ if (nVaryingA == 0 && nVaryingB == 0 && nVaryingC == 0)
+ {
+ // No degrees of freedom, nothing to optimize.
+ status = LEVENBERG_OPTIMIZER_CONVERGED;
+ return;
+ }
+
+ this->setupSparseJtJ();
+
+ Vector<double> weights(_nMeasurements);
+
+ MatrixArray<double> Ak(_nMeasurements, _measurementDimension, _paramDimensionA);
+ MatrixArray<double> Bk(_nMeasurements, _measurementDimension, _paramDimensionB);
+ MatrixArray<double> Ck(_nMeasurements, _measurementDimension, _paramDimensionC);
+
+ MatrixArray<double> Ui(nVaryingA, _paramDimensionA, _paramDimensionA);
+ MatrixArray<double> Vj(nVaryingB, _paramDimensionB, _paramDimensionB);
+
+ // Wn = Ak^t*Bk
+ MatrixArray<double> Wn(_jointNonzerosW.size(), _paramDimensionA, _paramDimensionB);
+
+ Matrix<double> Z(nVaryingC, nVaryingC);
+
+ // X = A^t*C
+ Matrix<double> X(nVaryingA*_paramDimensionA, nVaryingC);
+ // Y = B^t*C
+ Matrix<double> Y(nVaryingB*_paramDimensionB, nVaryingC);
+
+ VectorArray<double> residuals(_nMeasurements, _measurementDimension);
+ VectorArray<double> residuals2(_nMeasurements, _measurementDimension);
+
+ VectorArray<double> diagUi(nVaryingA, _paramDimensionA);
+ VectorArray<double> diagVj(nVaryingB, _paramDimensionB);
+ Vector<double> diagZ(nVaryingC);
+
+ VectorArray<double> At_e(nVaryingA, _paramDimensionA);
+ VectorArray<double> Bt_e(nVaryingB, _paramDimensionB);
+ Vector<double> Ct_e(nVaryingC);
+
+ Vector<double> Jt_e(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC);
+
+ Vector<double> delta(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC);
+ Vector<double> deltaPerm(nVaryingA*_paramDimensionA + nVaryingB*_paramDimensionB + nVaryingC);
+
+ VectorArray<double> deltaAi(_nParametersA, _paramDimensionA);
+ VectorArray<double> deltaBj(_nParametersB, _paramDimensionB);
+ Vector<double> deltaC(_paramDimensionC);
+
+ double err = 0.0;
+
+ for (currentIteration = 0; currentIteration < maxIterations; ++currentIteration)
+ {
+ if (optimizerVerbosenessLevel >= 2)
+ cout << "SparseLevenbergOptimizer: currentIteration: " << currentIteration << endl;
+ if (computeDerivatives)
+ {
+ this->evalResidual(residuals);
+ this->fillWeights(residuals, weights);
+ for (int k = 0; k < _nMeasurements; ++k)
+ scaleVectorIP(weights[k], residuals[k]);
+
+ err = squaredResidual(residuals);
+
+ if (optimizerVerbosenessLevel >= 1) cout << "SparseLevenbergOptimizer: |residual|^2 = " << err << endl;
+ if (optimizerVerbosenessLevel >= 2) cout << "SparseLevenbergOptimizer: lambda = " << lambda << endl;
+
+ for (int k = 0; k < residuals.count(); ++k) scaleVectorIP(-1.0, residuals[k]);
+
+ this->setupJacobianGathering();
+ this->fillAllJacobians(weights, Ak, Bk, Ck);
+
+ // Compute the different parts of J^t*e
+ if (nVaryingA > 0)
+ {
+ for (int i = 0; i < nVaryingA; ++i) makeZeroVector(At_e[i]);
+
+ Vector<double> tmp(_paramDimensionA);
+
+ for (int k = 0; k < _nMeasurements; ++k)
+ {
+ int const i = _correspondingParamA[k] - _nNonvaryingA;
+ if (i < 0) continue;
+ multiply_At_v(Ak[k], residuals[k], tmp);
+ addVectors(tmp, At_e[i], At_e[i]);
+ } // end for (k)
+ } // end if
+
+ if (nVaryingB > 0)
+ {
+ for (int j = 0; j < nVaryingB; ++j) makeZeroVector(Bt_e[j]);
+
+ Vector<double> tmp(_paramDimensionB);
+
+ for (int k = 0; k < _nMeasurements; ++k)
+ {
+ int const j = _correspondingParamB[k] - _nNonvaryingB;
+ if (j < 0) continue;
+ multiply_At_v(Bk[k], residuals[k], tmp);
+ addVectors(tmp, Bt_e[j], Bt_e[j]);
+ } // end for (k)
+ } // end if
+
+ if (nVaryingC > 0)
+ {
+ makeZeroVector(Ct_e);
+
+ Vector<double> tmp(_paramDimensionC);
+
+ for (int k = 0; k < _nMeasurements; ++k)
+ {
+ multiply_At_v(Ck[k], residuals[k], tmp);
+ for (int l = 0; l < nVaryingC; ++l) Ct_e[l] += tmp[_nNonvaryingC + l];
+ }
+ } // end if
+
+ int pos = 0;
+ for (int i = 0; i < nVaryingA; ++i)
+ for (int l = 0; l < _paramDimensionA; ++l, ++pos)
+ Jt_e[pos] = At_e[i][l];
+ for (int j = 0; j < nVaryingB; ++j)
+ for (int l = 0; l < _paramDimensionB; ++l, ++pos)
+ Jt_e[pos] = Bt_e[j][l];
+ for (int l = 0; l < nVaryingC; ++l, ++pos)
+ Jt_e[pos] = Ct_e[l];
+
+// cout << "Jt_e = ";
+// for (int k = 0; k < Jt_e.size(); ++k) cout << Jt_e[k] << " ";
+// cout << endl;
+
+ if (this->applyGradientStoppingCriteria(norm_Linf(Jt_e)))
+ {
+ status = LEVENBERG_OPTIMIZER_CONVERGED;
+ goto end;
+ }
+
+ // The lhs J^t*J consists of several parts:
+ // [ U W X ]
+ // J^t*J = [ W^t V Y ]
+ // [ X^t Y^t Z ],
+ // where U, V and W are block-sparse matrices (due to the sparsity of A and B).
+ // X, Y and Z contain only a few columns (the number of global parameters).
+
+ if (nVaryingA > 0)
+ {
+ // Compute Ui
+ Matrix<double> U(_paramDimensionA, _paramDimensionA);
+
+ for (int i = 0; i < nVaryingA; ++i) makeZeroMatrix(Ui[i]);
+
+ for (int k = 0; k < _nMeasurements; ++k)
+ {
+ int const i = _correspondingParamA[k] - _nNonvaryingA;
+ if (i < 0) continue;
+ multiply_At_A(Ak[k], U);
+ addMatricesIP(U, Ui[i]);
+ } // end for (k)
+ } // end if
+
+ if (nVaryingB > 0)
+ {
+ // Compute Vj
+ Matrix<double> V(_paramDimensionB, _paramDimensionB);
+
+ for (int j = 0; j < nVaryingB; ++j) makeZeroMatrix(Vj[j]);
+
+ for (int k = 0; k < _nMeasurements; ++k)
+ {
+ int const j = _correspondingParamB[k] - _nNonvaryingB;
+ if (j < 0) continue;
+ multiply_At_A(Bk[k], V);
+ addMatricesIP(V, Vj[j]);
+ } // end for (k)
+ } // end if
+
+ if (nVaryingC > 0)
+ {
+ Matrix<double> ZZ(_paramDimensionC, _paramDimensionC);
+ Matrix<double> Zsum(_paramDimensionC, _paramDimensionC);
+
+ makeZeroMatrix(Zsum);
+
+ for (int k = 0; k < _nMeasurements; ++k)
+ {
+ multiply_At_A(Ck[k], ZZ);
+ addMatricesIP(ZZ, Zsum);
+ } // end for (k)
+
+ // Ignore the non-varying parameters
+ for (int i = 0; i < nVaryingC; ++i)
+ for (int j = 0; j < nVaryingC; ++j)
+ Z[i][j] = Zsum[i+_nNonvaryingC][j+_nNonvaryingC];
+ } // end if
+
+ if (nVaryingA > 0 && nVaryingB > 0)
+ {
+ for (int n = 0; n < Wn.count(); ++n) makeZeroMatrix(Wn[n]);
+
+ Matrix<double> W(_paramDimensionA, _paramDimensionB);
+
+ for (int k = 0; k < _nMeasurements; ++k)
+ {
+ int const n = _jointIndexW[k];
+ if (n >= 0)
+ {
+ int const i0 = _jointNonzerosW[n].first;
+ int const j0 = _jointNonzerosW[n].second;
+
+ multiply_At_B(Ak[k], Bk[k], W);
+ addMatricesIP(W, Wn[n]);
+ } // end if
+ } // end for (k)
+ } // end if
+
+ if (nVaryingA > 0 && nVaryingC > 0)
+ {
+ Matrix<double> XX(_paramDimensionA, _paramDimensionC);
+
+ makeZeroMatrix(X);
+
+ for (int k = 0; k < _nMeasurements; ++k)
+ {
+ int const i = _correspondingParamA[k] - _nNonvaryingA;
+ // Ignore the non-varying parameters
+ if (i < 0) continue;
+
+ multiply_At_B(Ak[k], Ck[k], XX);
+
+ for (int r = 0; r < _paramDimensionA; ++r)
+ for (int c = 0; c < nVaryingC; ++c)
+ X[r+i*_paramDimensionA][c] += XX[r][c+_nNonvaryingC];
+ } // end for (k)
+ } // end if
+
+ if (nVaryingB > 0 && nVaryingC > 0)
+ {
+ Matrix<double> YY(_paramDimensionB, _paramDimensionC);
+
+ makeZeroMatrix(Y);
+
+ for (int k = 0; k < _nMeasurements; ++k)
+ {
+ int const j = _correspondingParamB[k] - _nNonvaryingB;
+ // Ignore the non-varying parameters
+ if (j < 0) continue;
+
+ multiply_At_B(Bk[k], Ck[k], YY);
+
+ for (int r = 0; r < _paramDimensionB; ++r)
+ for (int c = 0; c < nVaryingC; ++c)
+ Y[r+j*_paramDimensionB][c] += YY[r][c+_nNonvaryingC];
+ } // end for (k)
+ } // end if
+
+ if (currentIteration == 0)
+ {
+ // Initialize lambda as tau*max(JtJ[i][i])
+ double maxEl = -1e30;
+ if (nVaryingA > 0)
+ {
+ for (int i = 0; i < nVaryingA; ++i)
+ for (int l = 0; l < _paramDimensionA; ++l)
+ maxEl = std::max(maxEl, Ui[i][l][l]);
+ }
+ if (nVaryingB > 0)
+ {
+ for (int j = 0; j < nVaryingB; ++j)
+ for (int l = 0; l < _paramDimensionB; ++l)
+ maxEl = std::max(maxEl, Vj[j][l][l]);
+ }
+ if (nVaryingC > 0)
+ {
+ for (int l = 0; l < nVaryingC; ++l)
+ maxEl = std::max(maxEl, Z[l][l]);
+ }
+
+ lambda = tau * maxEl;
+ if (optimizerVerbosenessLevel >= 2)
+ cout << "SparseLevenbergOptimizer: initial lambda = " << lambda << endl;
+ } // end if (currentIteration == 0)
+ } // end if (computeDerivatives)
+
+ for (int i = 0; i < nVaryingA; ++i)
+ {
+ for (int l = 0; l < _paramDimensionA; ++l) diagUi[i][l] = Ui[i][l][l];
+ } // end for (i)
+
+ for (int j = 0; j < nVaryingB; ++j)
+ {
+ for (int l = 0; l < _paramDimensionB; ++l) diagVj[j][l] = Vj[j][l][l];
+ } // end for (j)
+
+ for (int l = 0; l < nVaryingC; ++l) diagZ[l] = Z[l][l];
+
+ // Augment the diagonals with lambda (either by the standard additive update or by multiplication).
+#if !defined(USE_MULTIPLICATIVE_UPDATE)
+ for (int i = 0; i < nVaryingA; ++i)
+ for (unsigned l = 0; l < _paramDimensionA; ++l)
+ Ui[i][l][l] += lambda;
+
+ for (int j = 0; j < nVaryingB; ++j)
+ for (unsigned l = 0; l < _paramDimensionB; ++l)
+ Vj[j][l][l] += lambda;
+
+ for (unsigned l = 0; l < nVaryingC; ++l)
+ Z[l][l] += lambda;
+#else
+ for (int i = 0; i < nVaryingA; ++i)
+ for (unsigned l = 0; l < _paramDimensionA; ++l)
+ Ui[i][l][l] = std::max(Ui[i][l][l] * (1.0 + lambda), 1e-15);
+
+ for (int j = 0; j < nVaryingB; ++j)
+ for (unsigned l = 0; l < _paramDimensionB; ++l)
+ Vj[j][l][l] = std::max(Vj[j][l][l] * (1.0 + lambda), 1e-15);
+
+ for (unsigned l = 0; l < nVaryingC; ++l)
+ Z[l][l] = std::max(Z[l][l] * (1.0 + lambda), 1e-15);
+#endif
+
+ this->fillSparseJtJ(Ui, Vj, Wn, Z, X, Y);
+
+ bool success = true;
+ double rho = 0.0;
+ {
+ int const nCols = _JtJ_Parent.size();
+ int const nnz = _JtJ.getNonzeroCount();
+ int const lnz = _JtJ_Lp.back();
+
+ vector<int> Li(lnz);
+ vector<double> Lx(lnz);
+ vector<double> D(nCols), Y(nCols);
+ vector<int> workPattern(nCols), workFlag(nCols);
+
+ int * colStarts = (int *)_JtJ.getColumnStarts();
+ int * rowIdxs = (int *)_JtJ.getRowIndices();
+ double * values = _JtJ.getValues();
+
+ int const d = ldl_numeric(nCols, colStarts, rowIdxs, values,
+ &_JtJ_Lp[0], &_JtJ_Parent[0], &_JtJ_Lnz[0],
+ &Li[0], &Lx[0], &D[0],
+ &Y[0], &workPattern[0], &workFlag[0],
+ NULL, NULL);
+
+ if (d == nCols)
+ {
+ ldl_perm(nCols, &deltaPerm[0], &Jt_e[0], &_perm_JtJ[0]);
+ ldl_lsolve(nCols, &deltaPerm[0], &_JtJ_Lp[0], &Li[0], &Lx[0]);
+ ldl_dsolve(nCols, &deltaPerm[0], &D[0]);
+ ldl_ltsolve(nCols, &deltaPerm[0], &_JtJ_Lp[0], &Li[0], &Lx[0]);
+ ldl_permt(nCols, &delta[0], &deltaPerm[0], &_perm_JtJ[0]);
+ }
+ else
+ {
+ if (optimizerVerbosenessLevel >= 2)
+ cout << "SparseLevenbergOptimizer: LDL decomposition failed. Increasing lambda." << endl;
+ success = false;
+ }
+ }
+
+ if (success)
+ {
+ double const deltaSqrLength = sqrNorm_L2(delta);
+
+ if (optimizerVerbosenessLevel >= 2)
+ cout << "SparseLevenbergOptimizer: ||delta||^2 = " << deltaSqrLength << endl;
+
+ double const paramLength = this->getParameterLength();
+ if (this->applyUpdateStoppingCriteria(paramLength, sqrt(deltaSqrLength)))
+ {
+ status = LEVENBERG_OPTIMIZER_SMALL_UPDATE;
+ goto end;
+ }
+
+ // Copy the updates from delta to the respective arrays
+ int pos = 0;
+
+ for (int i = 0; i < _nNonvaryingA; ++i) makeZeroVector(deltaAi[i]);
+ for (int i = _nNonvaryingA; i < _nParametersA; ++i)
+ for (int l = 0; l < _paramDimensionA; ++l, ++pos)
+ deltaAi[i][l] = delta[pos];
+
+ for (int j = 0; j < _nNonvaryingB; ++j) makeZeroVector(deltaBj[j]);
+ for (int j = _nNonvaryingB; j < _nParametersB; ++j)
+ for (int l = 0; l < _paramDimensionB; ++l, ++pos)
+ deltaBj[j][l] = delta[pos];
+
+ makeZeroVector(deltaC);
+ for (int l = _nNonvaryingC; l < _paramDimensionC; ++l, ++pos)
+ deltaC[l] = delta[pos];
+
+ saveAllParameters();
+ if (nVaryingA > 0) updateParametersA(deltaAi);
+ if (nVaryingB > 0) updateParametersB(deltaBj);
+ if (nVaryingC > 0) updateParametersC(deltaC);
+
+ this->evalResidual(residuals2);
+ for (int k = 0; k < _nMeasurements; ++k)
+ scaleVectorIP(weights[k], residuals2[k]);
+
+ double const newErr = squaredResidual(residuals2);
+ rho = err - newErr;
+ if (optimizerVerbosenessLevel >= 2)
+ cout << "SparseLevenbergOptimizer: |new residual|^2 = " << newErr << endl;
+
+#if !defined(USE_MULTIPLICATIVE_UPDATE)
+ double const denom1 = lambda * deltaSqrLength;
+#else
+ double denom1 = 0.0f;
+ for (int i = _nNonvaryingA; i < _nParametersA; ++i)
+ for (int l = 0; l < _paramDimensionA; ++l)
+ denom1 += deltaAi[i][l] * deltaAi[i][l] * diagUi[i-_nNonvaryingA][l];
+
+ for (int j = _nNonvaryingB; j < _nParametersB; ++j)
+ for (int l = 0; l < _paramDimensionB; ++l)
+ denom1 += deltaBj[j][l] * deltaBj[j][l] * diagVj[j-_nNonvaryingB][l];
+
+ for (int l = _nNonvaryingC; l < _paramDimensionC; ++l)
+ denom1 += deltaC[l] * deltaC[l] * diagZ[l-_nNonvaryingC];
+
+ denom1 *= lambda;
+#endif
+ double const denom2 = innerProduct(delta, Jt_e);
+ rho = rho / (denom1 + denom2);
+ if (optimizerVerbosenessLevel >= 2)
+ cout << "SparseLevenbergOptimizer: rho = " << rho
+ << " denom1 = " << denom1 << " denom2 = " << denom2 << endl;
+ } // end if (success)
+
+ if (success && rho > 0)
+ {
+ if (optimizerVerbosenessLevel >= 2)
+ cout << "SparseLevenbergOptimizer: Improved solution - decreasing lambda." << endl;
+ // Improvement in the new solution
+ decreaseLambda(rho);
+ computeDerivatives = true;
+ }
+ else
+ {
+ if (optimizerVerbosenessLevel >= 2)
+ cout << "SparseLevenbergOptimizer: Inferior solution - increasing lambda." << endl;
+ restoreAllParameters();
+ increaseLambda();
+ computeDerivatives = false;
+
+ // Restore diagonal elements in Ui, Vj and Z.
+ for (int i = 0; i < nVaryingA; ++i)
+ {
+ for (int l = 0; l < _paramDimensionA; ++l) Ui[i][l][l] = diagUi[i][l];
+ } // end for (i)
+
+ for (int j = 0; j < nVaryingB; ++j)
+ {
+ for (int l = 0; l < _paramDimensionB; ++l) Vj[j][l][l] = diagVj[j][l];
+ } // end for (j)
+
+ for (int l = 0; l < nVaryingC; ++l) Z[l][l] = diagZ[l];
+ } // end if
+ } // end for
+
+ end:;
+ if (optimizerVerbosenessLevel >= 2)
+ cout << "Leaving SparseLevenbergOptimizer::minimize()." << endl;
+ } // end SparseLevenbergOptimizer::minimize()
+
+#endif // defined(V3DLIB_ENABLE_SUITESPARSE)
+
+} // end namespace V3D
diff --git a/extern/libmv/third_party/ssba/Math/v3d_optimization.h b/extern/libmv/third_party/ssba/Math/v3d_optimization.h
new file mode 100644
index 00000000000..27d2e12287f
--- /dev/null
+++ b/extern/libmv/third_party/ssba/Math/v3d_optimization.h
@@ -0,0 +1,273 @@
+// -*- C++ -*-
+/*
+Copyright (c) 2008 University of North Carolina at Chapel Hill
+
+This file is part of SSBA (Simple Sparse Bundle Adjustment).
+
+SSBA is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
+
+SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License along
+with SSBA. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef V3D_OPTIMIZATION_H
+#define V3D_OPTIMIZATION_H
+
+#include "Math/v3d_linear.h"
+#include "Math/v3d_mathutilities.h"
+
+#include <vector>
+#include <iostream>
+
+namespace V3D
+{
+
+ enum
+ {
+ LEVENBERG_OPTIMIZER_TIMEOUT = 0,
+ LEVENBERG_OPTIMIZER_SMALL_UPDATE = 1,
+ LEVENBERG_OPTIMIZER_CONVERGED = 2
+ };
+
+ extern int optimizerVerbosenessLevel;
+
+ struct LevenbergOptimizerCommon
+ {
+ LevenbergOptimizerCommon()
+ : status(LEVENBERG_OPTIMIZER_TIMEOUT), currentIteration(0), maxIterations(50),
+ tau(1e-3), lambda(1e-3),
+ gradientThreshold(1e-10), updateThreshold(1e-10),
+ _nu(2.0)
+ { }
+ virtual ~LevenbergOptimizerCommon() {}
+
+ // See Madsen et al., "Methods for non-linear least squares problems."
+ virtual void increaseLambda()
+ {
+ lambda *= _nu; _nu *= 2.0;
+ }
+
+ virtual void decreaseLambda(double const rho)
+ {
+ double const r = 2*rho - 1.0;
+ lambda *= std::max(1.0/3.0, 1 - r*r*r);
+ if (lambda < 1e-10) lambda = 1e-10;
+ _nu = 2;
+ }
+
+ bool applyGradientStoppingCriteria(double maxGradient) const
+ {
+ return maxGradient < gradientThreshold;
+ }
+
+ bool applyUpdateStoppingCriteria(double paramLength, double updateLength) const
+ {
+ return updateLength < updateThreshold * (paramLength + updateThreshold);
+ }
+
+ int status;
+ int currentIteration, maxIterations;
+ double tau, lambda;
+ double gradientThreshold, updateThreshold;
+
+ protected:
+ double _nu;
+ }; // end struct LevenbergOptimizerCommon
+
+# if defined(V3DLIB_ENABLE_SUITESPARSE)
+
+ struct SparseLevenbergOptimizer : public LevenbergOptimizerCommon
+ {
+ SparseLevenbergOptimizer(int measurementDimension,
+ int nParametersA, int paramDimensionA,
+ int nParametersB, int paramDimensionB,
+ int paramDimensionC,
+ std::vector<int> const& correspondingParamA,
+ std::vector<int> const& correspondingParamB)
+ : LevenbergOptimizerCommon(),
+ _nMeasurements(correspondingParamA.size()),
+ _measurementDimension(measurementDimension),
+ _nParametersA(nParametersA), _paramDimensionA(paramDimensionA),
+ _nParametersB(nParametersB), _paramDimensionB(paramDimensionB),
+ _paramDimensionC(paramDimensionC),
+ _nNonvaryingA(0), _nNonvaryingB(0), _nNonvaryingC(0),
+ _correspondingParamA(correspondingParamA),
+ _correspondingParamB(correspondingParamB)
+ {
+ assert(correspondingParamA.size() == correspondingParamB.size());
+ }
+
+ ~SparseLevenbergOptimizer() { }
+
+ void setNonvaryingCounts(int nNonvaryingA, int nNonvaryingB, int nNonvaryingC)
+ {
+ _nNonvaryingA = nNonvaryingA;
+ _nNonvaryingB = nNonvaryingB;
+ _nNonvaryingC = nNonvaryingC;
+ }
+
+ void getNonvaryingCounts(int& nNonvaryingA, int& nNonvaryingB, int& nNonvaryingC) const
+ {
+ nNonvaryingA = _nNonvaryingA;
+ nNonvaryingB = _nNonvaryingB;
+ nNonvaryingC = _nNonvaryingC;
+ }
+
+ void minimize();
+
+ virtual void evalResidual(VectorArray<double>& residuals) = 0;
+
+ virtual void fillWeights(VectorArray<double> const& residuals, Vector<double>& w)
+ {
+ (void)residuals;
+ std::fill(w.begin(), w.end(), 1.0);
+ }
+
+ void fillAllJacobians(Vector<double> const& w,
+ MatrixArray<double>& Ak,
+ MatrixArray<double>& Bk,
+ MatrixArray<double>& Ck)
+ {
+ int const nVaryingA = _nParametersA - _nNonvaryingA;
+ int const nVaryingB = _nParametersB - _nNonvaryingB;
+ int const nVaryingC = _paramDimensionC - _nNonvaryingC;
+
+ for (unsigned k = 0; k < _nMeasurements; ++k)
+ {
+ int const i = _correspondingParamA[k];
+ int const j = _correspondingParamB[k];
+
+ if (i < _nNonvaryingA && j < _nNonvaryingB) continue;
+
+ fillJacobians(Ak[k], Bk[k], Ck[k], i, j, k);
+ } // end for (k)
+
+ if (nVaryingA > 0)
+ {
+ for (unsigned k = 0; k < _nMeasurements; ++k)
+ scaleMatrixIP(w[k], Ak[k]);
+ }
+ if (nVaryingB > 0)
+ {
+ for (unsigned k = 0; k < _nMeasurements; ++k)
+ scaleMatrixIP(w[k], Bk[k]);
+ }
+ if (nVaryingC > 0)
+ {
+ for (unsigned k = 0; k < _nMeasurements; ++k)
+ scaleMatrixIP(w[k], Ck[k]);
+ }
+ } // end fillAllJacobians()
+
+ virtual void setupJacobianGathering() { }
+
+ virtual void fillJacobians(Matrix<double>& Ak, Matrix<double>& Bk, Matrix<double>& Ck,
+ int i, int j, int k) = 0;
+
+ virtual double getParameterLength() const = 0;
+
+ virtual void updateParametersA(VectorArray<double> const& deltaAi) = 0;
+ virtual void updateParametersB(VectorArray<double> const& deltaBj) = 0;
+ virtual void updateParametersC(Vector<double> const& deltaC) = 0;
+ virtual void saveAllParameters() = 0;
+ virtual void restoreAllParameters() = 0;
+
+ int currentIteration, maxIterations;
+
+ protected:
+ void serializeNonZerosJtJ(std::vector<std::pair<int, int> >& dst) const;
+ void setupSparseJtJ();
+ void fillSparseJtJ(MatrixArray<double> const& Ui, MatrixArray<double> const& Vj, MatrixArray<double> const& Wk,
+ Matrix<double> const& Z, Matrix<double> const& X, Matrix<double> const& Y);
+
+ int const _nMeasurements, _measurementDimension;
+ int const _nParametersA, _paramDimensionA;
+ int const _nParametersB, _paramDimensionB;
+ int const _paramDimensionC;
+
+ int _nNonvaryingA, _nNonvaryingB, _nNonvaryingC;
+
+ std::vector<int> const& _correspondingParamA;
+ std::vector<int> const& _correspondingParamB;
+
+ std::vector<pair<int, int> > _jointNonzerosW;
+ std::vector<int> _jointIndexW;
+
+ std::vector<int> _JtJ_Lp, _JtJ_Parent, _JtJ_Lnz;
+ std::vector<int> _perm_JtJ, _invPerm_JtJ;
+
+ CCS_Matrix<double> _JtJ;
+ }; // end struct SparseLevenbergOptimizer
+
+ struct StdSparseLevenbergOptimizer : public SparseLevenbergOptimizer
+ {
+ StdSparseLevenbergOptimizer(int measurementDimension,
+ int nParametersA, int paramDimensionA,
+ int nParametersB, int paramDimensionB,
+ int paramDimensionC,
+ std::vector<int> const& correspondingParamA,
+ std::vector<int> const& correspondingParamB)
+ : SparseLevenbergOptimizer(measurementDimension, nParametersA, paramDimensionA,
+ nParametersB, paramDimensionB, paramDimensionC,
+ correspondingParamA, correspondingParamB),
+ curParametersA(nParametersA, paramDimensionA), savedParametersA(nParametersA, paramDimensionA),
+ curParametersB(nParametersB, paramDimensionB), savedParametersB(nParametersB, paramDimensionB),
+ curParametersC(paramDimensionC), savedParametersC(paramDimensionC)
+ { }
+
+ virtual double getParameterLength() const
+ {
+ double res = 0.0;
+ for (int i = 0; i < _nParametersA; ++i) res += sqrNorm_L2(curParametersA[i]);
+ for (int j = 0; j < _nParametersB; ++j) res += sqrNorm_L2(curParametersB[j]);
+ res += sqrNorm_L2(curParametersC);
+ return sqrt(res);
+ }
+
+ virtual void updateParametersA(VectorArray<double> const& deltaAi)
+ {
+ for (int i = 0; i < _nParametersA; ++i) addVectors(deltaAi[i], curParametersA[i], curParametersA[i]);
+ }
+
+ virtual void updateParametersB(VectorArray<double> const& deltaBj)
+ {
+ for (int j = 0; j < _nParametersB; ++j) addVectors(deltaBj[j], curParametersB[j], curParametersB[j]);
+ }
+
+ virtual void updateParametersC(Vector<double> const& deltaC)
+ {
+ addVectors(deltaC, curParametersC, curParametersC);
+ }
+
+ virtual void saveAllParameters()
+ {
+ for (int i = 0; i < _nParametersA; ++i) savedParametersA[i] = curParametersA[i];
+ for (int j = 0; j < _nParametersB; ++j) savedParametersB[j] = curParametersB[j];
+ savedParametersC = curParametersC;
+ }
+
+ virtual void restoreAllParameters()
+ {
+ for (int i = 0; i < _nParametersA; ++i) curParametersA[i] = savedParametersA[i];
+ for (int j = 0; j < _nParametersB; ++j) curParametersB[j] = savedParametersB[j];
+ curParametersC = savedParametersC;
+ }
+
+ VectorArray<double> curParametersA, savedParametersA;
+ VectorArray<double> curParametersB, savedParametersB;
+ Vector<double> curParametersC, savedParametersC;
+ }; // end struct StdSparseLevenbergOptimizer
+
+# endif
+
+} // end namespace V3D
+
+#endif
diff --git a/extern/libmv/third_party/ssba/README.TXT b/extern/libmv/third_party/ssba/README.TXT
new file mode 100644
index 00000000000..734962b1df8
--- /dev/null
+++ b/extern/libmv/third_party/ssba/README.TXT
@@ -0,0 +1,92 @@
+Description
+
+This is an implementation of a sparse Levenberg-Marquardt optimization
+procedure and several bundle adjustment modules based on it. There are three
+versions of bundle adjustment:
+1) Pure metric adjustment. Camera poses have 6 dof and 3D points have 3 dof.
+2) Common, but adjustable intrinsic and distortion parameters. This is useful,
+ if the set of images are taken with the same camera under constant zoom
+ settings.
+3) Variable intrinsics and distortion parameters for each view. This addresses
+ the "community photo collection" setting, where each image is captured with
+ a different camera and/or with varying zoom setting.
+
+There are two demo applications in the Apps directory, bundle_common and
+bundle_varying, which correspond to item 2) and 3) above.
+
+The input data file for both applications is a text file with the following
+numerical values:
+
+First, the number of 3D points, views and 2D measurements:
+<M> <N> <K>
+Then, the values of the intrinsic matrix
+ [ fx skew cx ]
+K = [ 0 fy cy ]
+ [ 0 0 1 ],
+and the distortion parameters according to the convention of the Bouget
+toolbox:
+
+ <fx> <skew> <cx> <fy> <cy> <k1> <k2> <p1> <p2>
+
+For the bundle_varying application this is given <N> times, one for each
+camera/view.
+Then the <M> 3D point positions are given:
+
+ <point-id> <X> <Y> <Z>
+
+Note: the point-ids need not to be exactly from 0 to M-1, any (unique) ids
+will do.
+The camera poses are given subsequently:
+
+ <view-id> <12 entries of the RT matrix>
+
+There is a lot of confusion how to specify the orientation of cameras. We use
+projection matrix notation, i.e. P = K [R|T], and a 3D point X in world
+coordinates is transformed into the camera coordinate system by XX=R*X+T.
+
+Finally, the <K> 2d image measurements (given in pixels) are provided:
+
+ <view-id> <point-id> <x> <y> 1
+
+See the example in the Dataset folder.
+
+
+Performance
+
+This software is able to perform successful loop closing for a video sequence
+containing 1745 views, 37920 3D points and 627228 image measurements in about
+16min on a 2.2 GHz Core 2. The footprint in memory was <700MB.
+
+
+Requirements
+
+Solving the augmented normal equation in the LM optimizer is done with LDL, a
+Cholsky like decomposition method for sparse matrices (see
+http://www.cise.ufl.edu/research/sparse/ldl). The appropriate column
+reordering is done with COLAMD (see
+http://www.cise.ufl.edu/research/sparse/colamd). Both packages are licensed
+under the GNU LGPL.
+
+This software was developed under Linux, but should compile equally well on
+other operating systems.
+
+-Christopher Zach (cmzach@cs.unc.edu)
+
+/*
+Copyright (c) 2008 University of North Carolina at Chapel Hill
+
+This file is part of SSBA (Simple Sparse Bundle Adjustment).
+
+SSBA is free software: you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation, either version 3 of the License, or (at your option) any
+later version.
+
+SSBA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License along
+with SSBA. If not, see <http://www.gnu.org/licenses/>.
+*/
diff --git a/extern/libmv/third_party/ssba/README.libmv b/extern/libmv/third_party/ssba/README.libmv
new file mode 100755
index 00000000000..45e0a31f6fc
--- /dev/null
+++ b/extern/libmv/third_party/ssba/README.libmv
@@ -0,0 +1,23 @@
+Project: SSBA
+URL: http://www.cs.unc.edu/~cmzach/opensource.html
+License: LGPL3
+Upstream version: 1.0
+
+Local modifications:
+
+ * Added
+ SET(CMAKE_CXX_FLAGS "")
+ to CMakeLists.txt to prevent warnings from being treated as errors.
+ * Fixed "unused variable" in the header files. Warnings in the cpps files
+ are still there.
+ * Fixed a bug in CameraMatrix::opticalAxis() in file
+ Geometry/v3d_cameramatrix.h
+ * Deleted the Dataset directory.
+ * Added '#include <string>' to ssba/Apps/bundle_common.cpp and
+ ssba/Apps/bundle_varying.cpp to stop undefined references to strcmp
+ * Removed unnecessary elements from the CMakeLists.txt file, including the
+ obsoleted local_config.cmake and friends.
+ * Added a virtual destructor to V3D::LevenbergOptimizerCommon in
+ Math/v3d_optimization.h
+ * Added /EHsc WIN32-specific flag to CMakeLists.txt
+ * Remove unused variable Vector3d np in bundle_common.cpp and bundle_varying (in main() function).