Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartijn Berger <mberger@denc.com>2015-12-10 14:37:46 +0300
committerMartijn Berger <mberger@denc.com>2015-12-10 14:37:46 +0300
commit9ad13d70faff0c510c313a7d58a58b974e8ce11f (patch)
tree3b7847573047815fc474105195d06c24cfefaaad /extern/Eigen3
parentffc750a4fafc6fe4e6fb556506becaa0e1ce8c9a (diff)
Update Eigen to version 3.2.7
The main purpose of this is to get MSVC 2015 fixes
Diffstat (limited to 'extern/Eigen3')
-rw-r--r--extern/Eigen3/Eigen/Core6
-rw-r--r--extern/Eigen3/Eigen/SparseCore2
-rw-r--r--extern/Eigen3/Eigen/src/Cholesky/LDLT.h61
-rw-r--r--extern/Eigen3/Eigen/src/Cholesky/LLT.h10
-rw-r--r--extern/Eigen3/Eigen/src/Cholesky/LLT_MKL.h2
-rw-r--r--extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h10
-rw-r--r--extern/Eigen3/Eigen/src/Core/Array.h15
-rw-r--r--extern/Eigen3/Eigen/src/Core/ArrayBase.h4
-rw-r--r--extern/Eigen3/Eigen/src/Core/ArrayWrapper.h10
-rw-r--r--extern/Eigen3/Eigen/src/Core/Assign.h15
-rw-r--r--extern/Eigen3/Eigen/src/Core/Block.h7
-rw-r--r--extern/Eigen3/Eigen/src/Core/CommaInitializer.h11
-rw-r--r--extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h3
-rw-r--r--extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h2
-rw-r--r--extern/Eigen3/Eigen/src/Core/DenseBase.h28
-rw-r--r--extern/Eigen3/Eigen/src/Core/DenseStorage.h271
-rw-r--r--extern/Eigen3/Eigen/src/Core/Diagonal.h8
-rw-r--r--extern/Eigen3/Eigen/src/Core/DiagonalProduct.h5
-rw-r--r--extern/Eigen3/Eigen/src/Core/Functors.h45
-rw-r--r--extern/Eigen3/Eigen/src/Core/GeneralProduct.h14
-rw-r--r--extern/Eigen3/Eigen/src/Core/MapBase.h13
-rw-r--r--extern/Eigen3/Eigen/src/Core/MathFunctions.h2
-rw-r--r--extern/Eigen3/Eigen/src/Core/Matrix.h15
-rw-r--r--extern/Eigen3/Eigen/src/Core/MatrixBase.h27
-rw-r--r--extern/Eigen3/Eigen/src/Core/PermutationMatrix.h34
-rw-r--r--extern/Eigen3/Eigen/src/Core/PlainObjectBase.h32
-rw-r--r--extern/Eigen3/Eigen/src/Core/ProductBase.h14
-rw-r--r--extern/Eigen3/Eigen/src/Core/Redux.h5
-rw-r--r--extern/Eigen3/Eigen/src/Core/Ref.h38
-rw-r--r--extern/Eigen3/Eigen/src/Core/Replicate.h4
-rw-r--r--extern/Eigen3/Eigen/src/Core/ReturnByValue.h11
-rw-r--r--extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h10
-rw-r--r--extern/Eigen3/Eigen/src/Core/TriangularMatrix.h29
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h2
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h20
-rw-r--r--extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h14
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h79
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/Parallelizer.h17
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h4
-rw-r--r--extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h9
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Constants.h13
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h3
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/MKL_support.h51
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Macros.h40
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/Memory.h27
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/StaticAssert.h4
-rw-r--r--extern/Eigen3/Eigen/src/Core/util/XprHelper.h10
-rw-r--r--extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h1
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h8
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h9
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h9
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/RealQZ.h14
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h12
-rw-r--r--extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h227
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/AlignedBox.h85
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/AngleAxis.h6
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Homogeneous.h2
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Hyperplane.h12
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Quaternion.h34
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Rotation2D.h7
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Transform.h33
-rw-r--r--extern/Eigen3/Eigen/src/Geometry/Umeyama.h10
-rw-r--r--extern/Eigen3/Eigen/src/Householder/BlockHouseholder.h2
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h4
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h24
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h33
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h25
-rw-r--r--extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h48
-rw-r--r--extern/Eigen3/Eigen/src/LU/FullPivLU.h21
-rw-r--r--extern/Eigen3/Eigen/src/LU/PartialPivLU.h8
-rw-r--r--extern/Eigen3/Eigen/src/OrderingMethods/Amd.h19
-rw-r--r--extern/Eigen3/Eigen/src/OrderingMethods/Ordering.h12
-rw-r--r--extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h2
-rw-r--r--extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h35
-rw-r--r--extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h8
-rw-r--r--extern/Eigen3/Eigen/src/QR/HouseholderQR.h98
-rw-r--r--extern/Eigen3/Eigen/src/QR/HouseholderQR_MKL.h30
-rw-r--r--extern/Eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h64
-rw-r--r--extern/Eigen3/Eigen/src/SVD/JacobiSVD.h130
-rw-r--r--extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h42
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h4
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h8
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h136
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h7
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h44
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h17
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h44
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h2
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h10
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h7
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseVector.h1
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h2
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU.h90
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLUImpl.h2
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_Memory.h4
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h12
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h4
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h4
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_bmod.h8
-rw-r--r--extern/Eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h13
-rw-r--r--extern/Eigen3/Eigen/src/SparseQR/SparseQR.h191
-rw-r--r--extern/Eigen3/Eigen/src/StlSupport/StdDeque.h2
-rw-r--r--extern/Eigen3/Eigen/src/StlSupport/StdList.h2
-rw-r--r--extern/Eigen3/Eigen/src/StlSupport/StdVector.h2
-rw-r--r--extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h112
-rw-r--r--extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h54
-rw-r--r--extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h16
-rw-r--r--extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h17
-rw-r--r--extern/Eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h15
-rwxr-xr-xextern/Eigen3/eigen-update.sh2
110 files changed, 2019 insertions, 943 deletions
diff --git a/extern/Eigen3/Eigen/Core b/extern/Eigen3/Eigen/Core
index 9131cc3fc9d..509c529e13d 100644
--- a/extern/Eigen3/Eigen/Core
+++ b/extern/Eigen3/Eigen/Core
@@ -95,7 +95,7 @@
extern "C" {
// In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
// Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
- #ifdef __INTEL_COMPILER
+ #if defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1110
#include <immintrin.h>
#else
#include <emmintrin.h>
@@ -123,7 +123,7 @@
#undef bool
#undef vector
#undef pixel
- #elif defined __ARM_NEON__
+ #elif defined __ARM_NEON
#define EIGEN_VECTORIZE
#define EIGEN_VECTORIZE_NEON
#include <arm_neon.h>
@@ -165,7 +165,7 @@
#endif
// required for __cpuid, needs to be included after cmath
-#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64))
+#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) && (!defined(_WIN32_WCE))
#include <intrin.h>
#endif
diff --git a/extern/Eigen3/Eigen/SparseCore b/extern/Eigen3/Eigen/SparseCore
index 9b5be5e15a9..24bcf0156b3 100644
--- a/extern/Eigen3/Eigen/SparseCore
+++ b/extern/Eigen3/Eigen/SparseCore
@@ -14,7 +14,7 @@
/**
* \defgroup SparseCore_Module SparseCore module
*
- * This module provides a sparse matrix representation, and basic associatd matrix manipulations
+ * This module provides a sparse matrix representation, and basic associated matrix manipulations
* and operations.
*
* See the \ref TutorialSparse "Sparse tutorial"
diff --git a/extern/Eigen3/Eigen/src/Cholesky/LDLT.h b/extern/Eigen3/Eigen/src/Cholesky/LDLT.h
index d026418f8a9..abd30bd916d 100644
--- a/extern/Eigen3/Eigen/src/Cholesky/LDLT.h
+++ b/extern/Eigen3/Eigen/src/Cholesky/LDLT.h
@@ -235,6 +235,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
}
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
/** \internal
* Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
@@ -274,30 +279,13 @@ template<> struct ldlt_inplace<Lower>
return true;
}
- RealScalar cutoff(0), biggest_in_corner;
-
for (Index k = 0; k < size; ++k)
{
// Find largest diagonal element
Index index_of_biggest_in_corner;
- biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
+ mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
index_of_biggest_in_corner += k;
- if(k == 0)
- {
- // The biggest overall is the point of reference to which further diagonals
- // are compared; if any diagonal is negligible compared
- // to the largest overall, the algorithm bails.
- cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
- }
-
- // Finish early if the matrix is not full rank.
- if(biggest_in_corner < cutoff)
- {
- for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
- break;
- }
-
transpositions.coeffRef(k) = index_of_biggest_in_corner;
if(k != index_of_biggest_in_corner)
{
@@ -328,15 +316,20 @@ template<> struct ldlt_inplace<Lower>
if(k>0)
{
- temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint();
+ temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint();
mat.coeffRef(k,k) -= (A10 * temp.head(k)).value();
if(rs>0)
A21.noalias() -= A20 * temp.head(k);
}
- if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
- A21 /= mat.coeffRef(k,k);
-
+
+ // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot
+ // was smaller than the cutoff value. However, soince LDLT is not rank-revealing
+ // we should only make sure we do not introduce INF or NaN values.
+ // LAPACK also uses 0 as the cutoff value.
RealScalar realAkk = numext::real(mat.coeffRef(k,k));
+ if((rs>0) && (abs(realAkk) > RealScalar(0)))
+ A21 /= realAkk;
+
if (sign == PositiveSemiDef) {
if (realAkk < 0) sign = Indefinite;
} else if (sign == NegativeSemiDef) {
@@ -446,6 +439,8 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
template<typename MatrixType, int _UpLo>
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
{
+ check_template_parameters();
+
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
@@ -454,6 +449,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
m_transpositions.resize(size);
m_isInitialized = false;
m_temporary.resize(size);
+ m_sign = internal::ZeroSign;
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign);
@@ -468,7 +464,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
*/
template<typename MatrixType, int _UpLo>
template<typename Derived>
-LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
+LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma)
{
const Index size = w.rows();
if (m_isInitialized)
@@ -514,16 +510,21 @@ struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
using std::abs;
using std::max;
typedef typename LDLTType::MatrixType MatrixType;
- typedef typename LDLTType::Scalar Scalar;
typedef typename LDLTType::RealScalar RealScalar;
- const Diagonal<const MatrixType> vectorD = dec().vectorD();
- RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits<Scalar>::epsilon(),
- RealScalar(1) / NumTraits<RealScalar>::highest()); // motivated by LAPACK's xGELSS
+ const typename Diagonal<const MatrixType>::RealReturnType vectorD(dec().vectorD());
+ // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
+ // as motivated by LAPACK's xGELSS:
+ // RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
+ // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
+ // diagonal element is not well justified and to numerical issues in some cases.
+ // Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
+ RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
+
for (Index i = 0; i < vectorD.size(); ++i) {
if(abs(vectorD(i)) > tolerance)
- dst.row(i) /= vectorD(i);
+ dst.row(i) /= vectorD(i);
else
- dst.row(i).setZero();
+ dst.row(i).setZero();
}
// dst = L^-T (D^-1 L^-1 P b)
@@ -576,7 +577,7 @@ MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
// L^* P
res = matrixU() * res;
// D(L^*P)
- res = vectorD().asDiagonal() * res;
+ res = vectorD().real().asDiagonal() * res;
// L(DL^*P)
res = matrixL() * res;
// P^T (LDL^*P)
diff --git a/extern/Eigen3/Eigen/src/Cholesky/LLT.h b/extern/Eigen3/Eigen/src/Cholesky/LLT.h
index 2e6189f7dab..7c11a2dc29a 100644
--- a/extern/Eigen3/Eigen/src/Cholesky/LLT.h
+++ b/extern/Eigen3/Eigen/src/Cholesky/LLT.h
@@ -174,6 +174,12 @@ template<typename _MatrixType, int _UpLo> class LLT
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
/** \internal
* Used to compute and store L
* The strict upper part is not used and even not initialized.
@@ -283,7 +289,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
return k;
mat.coeffRef(k,k) = x = sqrt(x);
if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint();
- if (rs>0) A21 *= RealScalar(1)/x;
+ if (rs>0) A21 /= x;
}
return -1;
}
@@ -384,6 +390,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
template<typename MatrixType, int _UpLo>
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
{
+ check_template_parameters();
+
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
m_matrix.resize(size, size);
diff --git a/extern/Eigen3/Eigen/src/Cholesky/LLT_MKL.h b/extern/Eigen3/Eigen/src/Cholesky/LLT_MKL.h
index 64daa445cf7..66675d7476d 100644
--- a/extern/Eigen3/Eigen/src/Cholesky/LLT_MKL.h
+++ b/extern/Eigen3/Eigen/src/Cholesky/LLT_MKL.h
@@ -60,7 +60,7 @@ template<> struct mkl_llt<EIGTYPE> \
lda = m.outerStride(); \
\
info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \
- info = (info==0) ? Success : NumericalIssue; \
+ info = (info==0) ? -1 : info>0 ? info-1 : size; \
return info; \
} \
}; \
diff --git a/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h b/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h
index c449960de4a..99dbe171c36 100644
--- a/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h
+++ b/extern/Eigen3/Eigen/src/CholmodSupport/CholmodSupport.h
@@ -78,7 +78,7 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
{
res.itype = CHOLMOD_INT;
}
- else if (internal::is_same<_Index,UF_long>::value)
+ else if (internal::is_same<_Index,SuiteSparse_long>::value)
{
res.itype = CHOLMOD_LONG;
}
@@ -395,7 +395,7 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
CholmodSimplicialLLT(const MatrixType& matrix) : Base()
{
init();
- compute(matrix);
+ Base::compute(matrix);
}
~CholmodSimplicialLLT() {}
@@ -442,7 +442,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
{
init();
- compute(matrix);
+ Base::compute(matrix);
}
~CholmodSimplicialLDLT() {}
@@ -487,7 +487,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
CholmodSupernodalLLT(const MatrixType& matrix) : Base()
{
init();
- compute(matrix);
+ Base::compute(matrix);
}
~CholmodSupernodalLLT() {}
@@ -534,7 +534,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
CholmodDecomposition(const MatrixType& matrix) : Base()
{
init();
- compute(matrix);
+ Base::compute(matrix);
}
~CholmodDecomposition() {}
diff --git a/extern/Eigen3/Eigen/src/Core/Array.h b/extern/Eigen3/Eigen/src/Core/Array.h
index 0ab03eff0f0..0b9c38c8219 100644
--- a/extern/Eigen3/Eigen/src/Core/Array.h
+++ b/extern/Eigen3/Eigen/src/Core/Array.h
@@ -124,6 +124,21 @@ class Array
}
#endif
+#ifdef EIGEN_HAVE_RVALUE_REFERENCES
+ Array(Array&& other)
+ : Base(std::move(other))
+ {
+ Base::_check_template_params();
+ if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
+ Base::_set_noalias(other);
+ }
+ Array& operator=(Array&& other)
+ {
+ other.swap(*this);
+ return *this;
+ }
+#endif
+
/** Constructs a vector or row-vector with given dimension. \only_for_vectors
*
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
diff --git a/extern/Eigen3/Eigen/src/Core/ArrayBase.h b/extern/Eigen3/Eigen/src/Core/ArrayBase.h
index 38852600dc2..33ff553712e 100644
--- a/extern/Eigen3/Eigen/src/Core/ArrayBase.h
+++ b/extern/Eigen3/Eigen/src/Core/ArrayBase.h
@@ -46,9 +46,6 @@ template<typename Derived> class ArrayBase
typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
- using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
- typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
-
typedef typename internal::traits<Derived>::StorageKind StorageKind;
typedef typename internal::traits<Derived>::Index Index;
typedef typename internal::traits<Derived>::Scalar Scalar;
@@ -56,6 +53,7 @@ template<typename Derived> class ArrayBase
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DenseBase<Derived> Base;
+ using Base::operator*;
using Base::RowsAtCompileTime;
using Base::ColsAtCompileTime;
using Base::SizeAtCompileTime;
diff --git a/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h b/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h
index a791bc3581a..b4641e2a01f 100644
--- a/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h
+++ b/extern/Eigen3/Eigen/src/Core/ArrayWrapper.h
@@ -29,6 +29,11 @@ struct traits<ArrayWrapper<ExpressionType> >
: public traits<typename remove_all<typename ExpressionType::Nested>::type >
{
typedef ArrayXpr XprKind;
+ // Let's remove NestByRefBit
+ enum {
+ Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
+ Flags = Flags0 & ~NestByRefBit
+ };
};
}
@@ -149,6 +154,11 @@ struct traits<MatrixWrapper<ExpressionType> >
: public traits<typename remove_all<typename ExpressionType::Nested>::type >
{
typedef MatrixXpr XprKind;
+ // Let's remove NestByRefBit
+ enum {
+ Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
+ Flags = Flags0 & ~NestByRefBit
+ };
};
}
diff --git a/extern/Eigen3/Eigen/src/Core/Assign.h b/extern/Eigen3/Eigen/src/Core/Assign.h
index 1dccc2f4212..f4817317279 100644
--- a/extern/Eigen3/Eigen/src/Core/Assign.h
+++ b/extern/Eigen3/Eigen/src/Core/Assign.h
@@ -439,19 +439,26 @@ struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling, Ve
typedef typename Derived1::Index Index;
static inline void run(Derived1 &dst, const Derived2 &src)
{
- typedef packet_traits<typename Derived1::Scalar> PacketTraits;
+ typedef typename Derived1::Scalar Scalar;
+ typedef packet_traits<Scalar> PacketTraits;
enum {
packetSize = PacketTraits::size,
alignable = PacketTraits::AlignedOnScalar,
- dstAlignment = alignable ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
+ dstIsAligned = assign_traits<Derived1,Derived2>::DstIsAligned,
+ dstAlignment = alignable ? Aligned : int(dstIsAligned),
srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
};
+ const Scalar *dst_ptr = &dst.coeffRef(0,0);
+ if((!bool(dstIsAligned)) && (size_t(dst_ptr) % sizeof(Scalar))>0)
+ {
+ // the pointer is not aligend-on scalar, so alignment is not possible
+ return assign_impl<Derived1,Derived2,DefaultTraversal,NoUnrolling>::run(dst, src);
+ }
const Index packetAlignedMask = packetSize - 1;
const Index innerSize = dst.innerSize();
const Index outerSize = dst.outerSize();
const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
- Index alignedStart = ((!alignable) || assign_traits<Derived1,Derived2>::DstIsAligned) ? 0
- : internal::first_aligned(&dst.coeffRef(0,0), innerSize);
+ Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize);
for(Index outer = 0; outer < outerSize; ++outer)
{
diff --git a/extern/Eigen3/Eigen/src/Core/Block.h b/extern/Eigen3/Eigen/src/Core/Block.h
index 358b3188b38..82789444327 100644
--- a/extern/Eigen3/Eigen/src/Core/Block.h
+++ b/extern/Eigen3/Eigen/src/Core/Block.h
@@ -66,8 +66,9 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
: int(traits<XprType>::MaxColsAtCompileTime),
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
- IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
- : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
+ IsDense = is_same<StorageKind,Dense>::value,
+ IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
+ : (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
: XprTypeIsRowMajor,
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
@@ -81,7 +82,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
&& (InnerStrideAtCompileTime == 1)
? PacketAccessBit : 0,
MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0,
- FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
+ FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (traits<XprType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
diff --git a/extern/Eigen3/Eigen/src/Core/CommaInitializer.h b/extern/Eigen3/Eigen/src/Core/CommaInitializer.h
index a96867af4d5..a036d8c3bc9 100644
--- a/extern/Eigen3/Eigen/src/Core/CommaInitializer.h
+++ b/extern/Eigen3/Eigen/src/Core/CommaInitializer.h
@@ -43,6 +43,17 @@ struct CommaInitializer
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
}
+ /* Copy/Move constructor which transfers ownership. This is crucial in
+ * absence of return value optimization to avoid assertions during destruction. */
+ // FIXME in C++11 mode this could be replaced by a proper RValue constructor
+ inline CommaInitializer(const CommaInitializer& o)
+ : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
+ // Mark original object as finished. In absence of R-value references we need to const_cast:
+ const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
+ const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
+ const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
+ }
+
/* inserts a scalar value in the target matrix */
CommaInitializer& operator,(const Scalar& s)
{
diff --git a/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h b/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h
index 586f77aaf32..519a866e605 100644
--- a/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h
+++ b/extern/Eigen3/Eigen/src/Core/CwiseBinaryOp.h
@@ -81,7 +81,8 @@ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
)
),
Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
- CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits<BinaryOp>::Cost
+ Cost0 = EIGEN_ADD_COST(LhsCoeffReadCost,RhsCoeffReadCost),
+ CoeffReadCost = EIGEN_ADD_COST(Cost0,functor_traits<BinaryOp>::Cost)
};
};
} // end namespace internal
diff --git a/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h b/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h
index f2de749f92b..f7ee60e9879 100644
--- a/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h
+++ b/extern/Eigen3/Eigen/src/Core/CwiseUnaryOp.h
@@ -47,7 +47,7 @@ struct traits<CwiseUnaryOp<UnaryOp, XprType> >
Flags = _XprTypeNested::Flags & (
HereditaryBits | LinearAccessBit | AlignedBit
| (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
- CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits<UnaryOp>::Cost
+ CoeffReadCost = EIGEN_ADD_COST(_XprTypeNested::CoeffReadCost, functor_traits<UnaryOp>::Cost)
};
};
}
diff --git a/extern/Eigen3/Eigen/src/Core/DenseBase.h b/extern/Eigen3/Eigen/src/Core/DenseBase.h
index c5800f6c8c8..4b371b075b8 100644
--- a/extern/Eigen3/Eigen/src/Core/DenseBase.h
+++ b/extern/Eigen3/Eigen/src/Core/DenseBase.h
@@ -40,15 +40,14 @@ static inline void check_DenseIndex_is_signed() {
*/
template<typename Derived> class DenseBase
#ifndef EIGEN_PARSED_BY_DOXYGEN
- : public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
- typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>
+ : public internal::special_scalar_op_base<Derived, typename internal::traits<Derived>::Scalar,
+ typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
+ DenseCoeffsBase<Derived> >
#else
: public DenseCoeffsBase<Derived>
#endif // not EIGEN_PARSED_BY_DOXYGEN
{
public:
- using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
- typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
class InnerIterator;
@@ -63,8 +62,9 @@ template<typename Derived> class DenseBase
typedef typename internal::traits<Derived>::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef internal::special_scalar_op_base<Derived,Scalar,RealScalar, DenseCoeffsBase<Derived> > Base;
- typedef DenseCoeffsBase<Derived> Base;
+ using Base::operator*;
using Base::derived;
using Base::const_cast_derived;
using Base::rows;
@@ -183,10 +183,6 @@ template<typename Derived> class DenseBase
/** \returns the number of nonzero coefficients which is in practice the number
* of stored coefficients. */
inline Index nonZeros() const { return size(); }
- /** \returns true if either the number of rows or the number of columns is equal to 1.
- * In other words, this function returns
- * \code rows()==1 || cols()==1 \endcode
- * \sa rows(), cols(), IsVectorAtCompileTime. */
/** \returns the outer size.
*
@@ -266,11 +262,13 @@ template<typename Derived> class DenseBase
template<typename OtherDerived>
Derived& operator=(const ReturnByValue<OtherDerived>& func);
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- /** Copies \a other into *this without evaluating other. \returns a reference to *this. */
+ /** \internal Copies \a other into *this without evaluating other. \returns a reference to *this. */
template<typename OtherDerived>
Derived& lazyAssign(const DenseBase<OtherDerived>& other);
-#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ /** \internal Evaluates \a other into *this. \returns a reference to *this. */
+ template<typename OtherDerived>
+ Derived& lazyAssign(const ReturnByValue<OtherDerived>& other);
CommaInitializer<Derived> operator<< (const Scalar& s);
@@ -462,8 +460,10 @@ template<typename Derived> class DenseBase
template<int p> RealScalar lpNorm() const;
template<int RowFactor, int ColFactor>
- const Replicate<Derived,RowFactor,ColFactor> replicate() const;
- const Replicate<Derived,Dynamic,Dynamic> replicate(Index rowFacor,Index colFactor) const;
+ inline const Replicate<Derived,RowFactor,ColFactor> replicate() const;
+
+ typedef Replicate<Derived,Dynamic,Dynamic> ReplicateReturnType;
+ inline const ReplicateReturnType replicate(Index rowFacor,Index colFactor) const;
typedef Reverse<Derived, BothDirections> ReverseReturnType;
typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;
diff --git a/extern/Eigen3/Eigen/src/Core/DenseStorage.h b/extern/Eigen3/Eigen/src/Core/DenseStorage.h
index 3e7f9c1b7a7..568493cbae0 100644
--- a/extern/Eigen3/Eigen/src/Core/DenseStorage.h
+++ b/extern/Eigen3/Eigen/src/Core/DenseStorage.h
@@ -24,6 +24,14 @@ namespace internal {
struct constructor_without_unaligned_array_assert {};
+template<typename T, int Size> void check_static_allocation_size()
+{
+ // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit
+ #if EIGEN_STACK_ALLOCATION_LIMIT
+ EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+ #endif
+}
+
/** \internal
* Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
* to 16 bytes boundary if the total size is a multiple of 16 bytes.
@@ -38,12 +46,12 @@ struct plain_array
plain_array()
{
- EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+ check_static_allocation_size<T,Size>();
}
plain_array(constructor_without_unaligned_array_assert)
{
- EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+ check_static_allocation_size<T,Size>();
}
};
@@ -76,12 +84,12 @@ struct plain_array<T, Size, MatrixOrArrayOptions, 16>
plain_array()
{
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf);
- EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+ check_static_allocation_size<T,Size>();
}
plain_array(constructor_without_unaligned_array_assert)
{
- EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+ check_static_allocation_size<T,Size>();
}
};
@@ -114,33 +122,41 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
{
internal::plain_array<T,Size,_Options> m_data;
public:
- inline DenseStorage() {}
- inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+ DenseStorage() {}
+ DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()) {}
- inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
- inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
- static inline DenseIndex rows(void) {return _Rows;}
- static inline DenseIndex cols(void) {return _Cols;}
- inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
- inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
- inline const T *data() const { return m_data.array; }
- inline T *data() { return m_data.array; }
+ DenseStorage(const DenseStorage& other) : m_data(other.m_data) {}
+ DenseStorage& operator=(const DenseStorage& other)
+ {
+ if (this != &other) m_data = other.m_data;
+ return *this;
+ }
+ DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
+ void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
+ static DenseIndex rows(void) {return _Rows;}
+ static DenseIndex cols(void) {return _Cols;}
+ void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
+ void resize(DenseIndex,DenseIndex,DenseIndex) {}
+ const T *data() const { return m_data.array; }
+ T *data() { return m_data.array; }
};
// null matrix
template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
{
public:
- inline DenseStorage() {}
- inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
- inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
- inline void swap(DenseStorage& ) {}
- static inline DenseIndex rows(void) {return _Rows;}
- static inline DenseIndex cols(void) {return _Cols;}
- inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
- inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
- inline const T *data() const { return 0; }
- inline T *data() { return 0; }
+ DenseStorage() {}
+ DenseStorage(internal::constructor_without_unaligned_array_assert) {}
+ DenseStorage(const DenseStorage&) {}
+ DenseStorage& operator=(const DenseStorage&) { return *this; }
+ DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
+ void swap(DenseStorage& ) {}
+ static DenseIndex rows(void) {return _Rows;}
+ static DenseIndex cols(void) {return _Cols;}
+ void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
+ void resize(DenseIndex,DenseIndex,DenseIndex) {}
+ const T *data() const { return 0; }
+ T *data() { return 0; }
};
// more specializations for null matrices; these are necessary to resolve ambiguities
@@ -160,18 +176,29 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
DenseIndex m_rows;
DenseIndex m_cols;
public:
- inline DenseStorage() : m_rows(0), m_cols(0) {}
- inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+ DenseStorage() : m_rows(0), m_cols(0) {}
+ DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
- inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
- inline void swap(DenseStorage& other)
+ DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {}
+ DenseStorage& operator=(const DenseStorage& other)
+ {
+ if (this != &other)
+ {
+ m_data = other.m_data;
+ m_rows = other.m_rows;
+ m_cols = other.m_cols;
+ }
+ return *this;
+ }
+ DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
+ void swap(DenseStorage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
- inline DenseIndex rows() const {return m_rows;}
- inline DenseIndex cols() const {return m_cols;}
- inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
- inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
- inline const T *data() const { return m_data.array; }
- inline T *data() { return m_data.array; }
+ DenseIndex rows() const {return m_rows;}
+ DenseIndex cols() const {return m_cols;}
+ void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
+ void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
+ const T *data() const { return m_data.array; }
+ T *data() { return m_data.array; }
};
// dynamic-size matrix with fixed-size storage and fixed width
@@ -180,17 +207,27 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_rows;
public:
- inline DenseStorage() : m_rows(0) {}
- inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+ DenseStorage() : m_rows(0) {}
+ DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
- inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
- inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
- inline DenseIndex rows(void) const {return m_rows;}
- inline DenseIndex cols(void) const {return _Cols;}
- inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
- inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
- inline const T *data() const { return m_data.array; }
- inline T *data() { return m_data.array; }
+ DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows) {}
+ DenseStorage& operator=(const DenseStorage& other)
+ {
+ if (this != &other)
+ {
+ m_data = other.m_data;
+ m_rows = other.m_rows;
+ }
+ return *this;
+ }
+ DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
+ void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
+ DenseIndex rows(void) const {return m_rows;}
+ DenseIndex cols(void) const {return _Cols;}
+ void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
+ void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
+ const T *data() const { return m_data.array; }
+ T *data() { return m_data.array; }
};
// dynamic-size matrix with fixed-size storage and fixed height
@@ -199,17 +236,27 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
internal::plain_array<T,Size,_Options> m_data;
DenseIndex m_cols;
public:
- inline DenseStorage() : m_cols(0) {}
- inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+ DenseStorage() : m_cols(0) {}
+ DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
- inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
- inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
- inline DenseIndex rows(void) const {return _Rows;}
- inline DenseIndex cols(void) const {return m_cols;}
- inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
- inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
- inline const T *data() const { return m_data.array; }
- inline T *data() { return m_data.array; }
+ DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_cols(other.m_cols) {}
+ DenseStorage& operator=(const DenseStorage& other)
+ {
+ if (this != &other)
+ {
+ m_data = other.m_data;
+ m_cols = other.m_cols;
+ }
+ return *this;
+ }
+ DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
+ void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
+ DenseIndex rows(void) const {return _Rows;}
+ DenseIndex cols(void) const {return m_cols;}
+ void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
+ void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
+ const T *data() const { return m_data.array; }
+ T *data() { return m_data.array; }
};
// purely dynamic matrix.
@@ -219,18 +266,35 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
DenseIndex m_rows;
DenseIndex m_cols;
public:
- inline DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
- inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+ DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
+ DenseStorage(internal::constructor_without_unaligned_array_assert)
: m_data(0), m_rows(0), m_cols(0) {}
- inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
+ DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
- inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
- inline void swap(DenseStorage& other)
+#ifdef EIGEN_HAVE_RVALUE_REFERENCES
+ DenseStorage(DenseStorage&& other)
+ : m_data(std::move(other.m_data))
+ , m_rows(std::move(other.m_rows))
+ , m_cols(std::move(other.m_cols))
+ {
+ other.m_data = nullptr;
+ }
+ DenseStorage& operator=(DenseStorage&& other)
+ {
+ using std::swap;
+ swap(m_data, other.m_data);
+ swap(m_rows, other.m_rows);
+ swap(m_cols, other.m_cols);
+ return *this;
+ }
+#endif
+ ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
+ void swap(DenseStorage& other)
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
- inline DenseIndex rows(void) const {return m_rows;}
- inline DenseIndex cols(void) const {return m_cols;}
- inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
+ DenseIndex rows(void) const {return m_rows;}
+ DenseIndex cols(void) const {return m_cols;}
+ void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
m_rows = nbRows;
@@ -250,8 +314,11 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
m_rows = nbRows;
m_cols = nbCols;
}
- inline const T *data() const { return m_data; }
- inline T *data() { return m_data; }
+ const T *data() const { return m_data; }
+ T *data() { return m_data; }
+ private:
+ DenseStorage(const DenseStorage&);
+ DenseStorage& operator=(const DenseStorage&);
};
// matrix with dynamic width and fixed height (so that matrix has dynamic size).
@@ -260,15 +327,30 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
T *m_data;
DenseIndex m_cols;
public:
- inline DenseStorage() : m_data(0), m_cols(0) {}
- inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
- inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
+ DenseStorage() : m_data(0), m_cols(0) {}
+ DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
+ DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
- inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
- inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
- static inline DenseIndex rows(void) {return _Rows;}
- inline DenseIndex cols(void) const {return m_cols;}
- inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
+#ifdef EIGEN_HAVE_RVALUE_REFERENCES
+ DenseStorage(DenseStorage&& other)
+ : m_data(std::move(other.m_data))
+ , m_cols(std::move(other.m_cols))
+ {
+ other.m_data = nullptr;
+ }
+ DenseStorage& operator=(DenseStorage&& other)
+ {
+ using std::swap;
+ swap(m_data, other.m_data);
+ swap(m_cols, other.m_cols);
+ return *this;
+ }
+#endif
+ ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
+ void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
+ static DenseIndex rows(void) {return _Rows;}
+ DenseIndex cols(void) const {return m_cols;}
+ void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
m_cols = nbCols;
@@ -286,8 +368,11 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
}
m_cols = nbCols;
}
- inline const T *data() const { return m_data; }
- inline T *data() { return m_data; }
+ const T *data() const { return m_data; }
+ T *data() { return m_data; }
+ private:
+ DenseStorage(const DenseStorage&);
+ DenseStorage& operator=(const DenseStorage&);
};
// matrix with dynamic height and fixed width (so that matrix has dynamic size).
@@ -296,15 +381,30 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
T *m_data;
DenseIndex m_rows;
public:
- inline DenseStorage() : m_data(0), m_rows(0) {}
- inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
- inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
+ DenseStorage() : m_data(0), m_rows(0) {}
+ DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
+ DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
- inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
- inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
- inline DenseIndex rows(void) const {return m_rows;}
- static inline DenseIndex cols(void) {return _Cols;}
- inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
+#ifdef EIGEN_HAVE_RVALUE_REFERENCES
+ DenseStorage(DenseStorage&& other)
+ : m_data(std::move(other.m_data))
+ , m_rows(std::move(other.m_rows))
+ {
+ other.m_data = nullptr;
+ }
+ DenseStorage& operator=(DenseStorage&& other)
+ {
+ using std::swap;
+ swap(m_data, other.m_data);
+ swap(m_rows, other.m_rows);
+ return *this;
+ }
+#endif
+ ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
+ void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
+ DenseIndex rows(void) const {return m_rows;}
+ static DenseIndex cols(void) {return _Cols;}
+ void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
{
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
m_rows = nbRows;
@@ -322,8 +422,11 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
}
m_rows = nbRows;
}
- inline const T *data() const { return m_data; }
- inline T *data() { return m_data; }
+ const T *data() const { return m_data; }
+ T *data() { return m_data; }
+ private:
+ DenseStorage(const DenseStorage&);
+ DenseStorage& operator=(const DenseStorage&);
};
} // end namespace Eigen
diff --git a/extern/Eigen3/Eigen/src/Core/Diagonal.h b/extern/Eigen3/Eigen/src/Core/Diagonal.h
index aab8007b3fc..68cf6d4b044 100644
--- a/extern/Eigen3/Eigen/src/Core/Diagonal.h
+++ b/extern/Eigen3/Eigen/src/Core/Diagonal.h
@@ -190,18 +190,18 @@ MatrixBase<Derived>::diagonal() const
*
* \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived>
-inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<DynamicIndex>::Type
+inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
MatrixBase<Derived>::diagonal(Index index)
{
- return typename DiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
+ return DiagonalDynamicIndexReturnType(derived(), index);
}
/** This is the const version of diagonal(Index). */
template<typename Derived>
-inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<DynamicIndex>::Type
+inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
MatrixBase<Derived>::diagonal(Index index) const
{
- return typename ConstDiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
+ return ConstDiagonalDynamicIndexReturnType(derived(), index);
}
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
diff --git a/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h b/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h
index c03a0c2e12b..cc6b536e199 100644
--- a/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h
+++ b/extern/Eigen3/Eigen/src/Core/DiagonalProduct.h
@@ -34,8 +34,9 @@ struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
_LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
- Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
- CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
+ Flags = ((HereditaryBits|_LinearAccessMask|AlignedBit) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
+ Cost0 = EIGEN_ADD_COST(NumTraits<Scalar>::MulCost, MatrixType::CoeffReadCost),
+ CoeffReadCost = EIGEN_ADD_COST(Cost0,DiagonalType::DiagonalVectorType::CoeffReadCost)
};
};
}
diff --git a/extern/Eigen3/Eigen/src/Core/Functors.h b/extern/Eigen3/Eigen/src/Core/Functors.h
index 04fb217323d..5f14c6587e0 100644
--- a/extern/Eigen3/Eigen/src/Core/Functors.h
+++ b/extern/Eigen3/Eigen/src/Core/Functors.h
@@ -259,6 +259,47 @@ template<> struct functor_traits<scalar_boolean_or_op> {
};
};
+/** \internal
+ * \brief Template functors for comparison of two scalars
+ * \todo Implement packet-comparisons
+ */
+template<typename Scalar, ComparisonName cmp> struct scalar_cmp_op;
+
+template<typename Scalar, ComparisonName cmp>
+struct functor_traits<scalar_cmp_op<Scalar, cmp> > {
+ enum {
+ Cost = NumTraits<Scalar>::AddCost,
+ PacketAccess = false
+ };
+};
+
+template<ComparisonName Cmp, typename Scalar>
+struct result_of<scalar_cmp_op<Scalar, Cmp>(Scalar,Scalar)> {
+ typedef bool type;
+};
+
+
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_EQ> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+ EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a==b;}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LT> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+ EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<b;}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LE> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+ EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<=b;}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_UNORD> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+ EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return !(a<=b || b<=a);}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_NEQ> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+ EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a!=b;}
+};
+
// unary functors:
/** \internal
@@ -589,7 +630,7 @@ struct linspaced_op_impl<Scalar,true>
template<typename Index>
EIGEN_STRONG_INLINE const Packet packetOp(Index i) const
- { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(i),m_interPacket))); }
+ { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(Scalar(i)),m_interPacket))); }
const Scalar m_low;
const Scalar m_step;
@@ -609,7 +650,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o
template <typename Scalar, bool RandomAccess> struct linspaced_op
{
typedef typename packet_traits<Scalar>::type Packet;
- linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
+ linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {}
template<typename Index>
EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
diff --git a/extern/Eigen3/Eigen/src/Core/GeneralProduct.h b/extern/Eigen3/Eigen/src/Core/GeneralProduct.h
index 2a59d94645e..0eae529909f 100644
--- a/extern/Eigen3/Eigen/src/Core/GeneralProduct.h
+++ b/extern/Eigen3/Eigen/src/Core/GeneralProduct.h
@@ -232,7 +232,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest&
// FIXME not very good if rhs is real and lhs complex while alpha is real too
const Index cols = dest.cols();
for (Index j=0; j<cols; ++j)
- func(dest.col(j), prod.rhs().coeff(j) * prod.lhs());
+ func(dest.col(j), prod.rhs().coeff(0,j) * prod.lhs());
}
// Row major
@@ -243,7 +243,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest&
// FIXME not very good if lhs is real and rhs complex while alpha is real too
const Index rows = dest.rows();
for (Index i=0; i<rows; ++i)
- func(dest.row(i), prod.lhs().coeff(i) * prod.rhs());
+ func(dest.row(i), prod.lhs().coeff(i,0) * prod.rhs());
}
template<typename Lhs, typename Rhs>
@@ -257,7 +257,7 @@ template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, OuterProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
{
- template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
+ template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
@@ -281,22 +281,22 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
template<typename Dest>
inline void evalTo(Dest& dest) const {
- internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>());
+ internal::outer_product_selector_run(*this, dest, set(), is_row_major<Dest>());
}
template<typename Dest>
inline void addTo(Dest& dest) const {
- internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>());
+ internal::outer_product_selector_run(*this, dest, add(), is_row_major<Dest>());
}
template<typename Dest>
inline void subTo(Dest& dest) const {
- internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor<Dest>());
+ internal::outer_product_selector_run(*this, dest, sub(), is_row_major<Dest>());
}
template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
{
- internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor<Dest>());
+ internal::outer_product_selector_run(*this, dest, adds(alpha), is_row_major<Dest>());
}
};
diff --git a/extern/Eigen3/Eigen/src/Core/MapBase.h b/extern/Eigen3/Eigen/src/Core/MapBase.h
index 6876de588c0..a9828f7f4b2 100644
--- a/extern/Eigen3/Eigen/src/Core/MapBase.h
+++ b/extern/Eigen3/Eigen/src/Core/MapBase.h
@@ -123,7 +123,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
}
- inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
+ explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
{
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
checkSanity();
@@ -157,7 +157,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
internal::inner_stride_at_compile_time<Derived>::ret==1),
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
- && "data is not aligned");
+ && "input pointer is not aligned on a 16 byte boundary");
}
PointerType m_data;
@@ -168,6 +168,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
template<typename Derived> class MapBase<Derived, WriteAccessors>
: public MapBase<Derived, ReadOnlyAccessors>
{
+ typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
public:
typedef MapBase<Derived, ReadOnlyAccessors> Base;
@@ -230,13 +231,17 @@ template<typename Derived> class MapBase<Derived, WriteAccessors>
Derived& operator=(const MapBase& other)
{
- Base::Base::operator=(other);
+ ReadOnlyMapBase::Base::operator=(other);
return derived();
}
- using Base::Base::operator=;
+ // In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base,
+ // see bugs 821 and 920.
+ using ReadOnlyMapBase::Base::operator=;
};
+#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS
+
} // end namespace Eigen
#endif // EIGEN_MAPBASE_H
diff --git a/extern/Eigen3/Eigen/src/Core/MathFunctions.h b/extern/Eigen3/Eigen/src/Core/MathFunctions.h
index 2bfc5ebd98a..adf2f9c511b 100644
--- a/extern/Eigen3/Eigen/src/Core/MathFunctions.h
+++ b/extern/Eigen3/Eigen/src/Core/MathFunctions.h
@@ -294,7 +294,7 @@ struct hypot_impl
RealScalar _x = abs(x);
RealScalar _y = abs(y);
RealScalar p = (max)(_x, _y);
- if(p==RealScalar(0)) return 0;
+ if(p==RealScalar(0)) return RealScalar(0);
RealScalar q = (min)(_x, _y);
RealScalar qp = q/p;
return p * sqrt(RealScalar(1) + qp*qp);
diff --git a/extern/Eigen3/Eigen/src/Core/Matrix.h b/extern/Eigen3/Eigen/src/Core/Matrix.h
index d7d0b5b9a4f..02be142d8cc 100644
--- a/extern/Eigen3/Eigen/src/Core/Matrix.h
+++ b/extern/Eigen3/Eigen/src/Core/Matrix.h
@@ -211,6 +211,21 @@ class Matrix
: Base(internal::constructor_without_unaligned_array_assert())
{ Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
+#ifdef EIGEN_HAVE_RVALUE_REFERENCES
+ Matrix(Matrix&& other)
+ : Base(std::move(other))
+ {
+ Base::_check_template_params();
+ if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
+ Base::_set_noalias(other);
+ }
+ Matrix& operator=(Matrix&& other)
+ {
+ other.swap(*this);
+ return *this;
+ }
+#endif
+
/** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
*
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
diff --git a/extern/Eigen3/Eigen/src/Core/MatrixBase.h b/extern/Eigen3/Eigen/src/Core/MatrixBase.h
index 344b38f2fc7..e83ef4dc056 100644
--- a/extern/Eigen3/Eigen/src/Core/MatrixBase.h
+++ b/extern/Eigen3/Eigen/src/Core/MatrixBase.h
@@ -159,13 +159,11 @@ template<typename Derived> class MatrixBase
template<typename OtherDerived>
Derived& operator=(const ReturnByValue<OtherDerived>& other);
-#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename ProductDerived, typename Lhs, typename Rhs>
Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
template<typename MatrixPower, typename Lhs, typename Rhs>
Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
-#endif // not EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
Derived& operator+=(const MatrixBase<OtherDerived>& other);
@@ -215,7 +213,7 @@ template<typename Derived> class MatrixBase
typedef Diagonal<Derived> DiagonalReturnType;
DiagonalReturnType diagonal();
- typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
+ typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
ConstDiagonalReturnType diagonal() const;
template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
@@ -223,16 +221,12 @@ template<typename Derived> class MatrixBase
template<int Index> typename DiagonalIndexReturnType<Index>::Type diagonal();
template<int Index> typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const;
+
+ typedef Diagonal<Derived,DynamicIndex> DiagonalDynamicIndexReturnType;
+ typedef typename internal::add_const<Diagonal<const Derived,DynamicIndex> >::type ConstDiagonalDynamicIndexReturnType;
- // Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
- // On the other hand they confuse MSVC8...
- #if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later
- typename MatrixBase::template DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
- typename MatrixBase::template ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
- #else
- typename DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
- typename ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
- #endif
+ DiagonalDynamicIndexReturnType diagonal(Index index);
+ ConstDiagonalDynamicIndexReturnType diagonal(Index index) const;
#ifdef EIGEN2_SUPPORT
template<unsigned int Mode> typename internal::eigen2_part_return_type<Derived, Mode>::type part();
@@ -446,6 +440,15 @@ template<typename Derived> class MatrixBase
template<typename OtherScalar>
void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
+///////// SparseCore module /////////
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE const typename SparseMatrixBase<OtherDerived>::template CwiseProductDenseReturnType<Derived>::Type
+ cwiseProduct(const SparseMatrixBase<OtherDerived> &other) const
+ {
+ return other.cwiseProduct(derived());
+ }
+
///////// MatrixFunctions module /////////
typedef typename internal::stem_function<Scalar>::type StemFunction;
diff --git a/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h b/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h
index 1297b8413fb..85ffae2653b 100644
--- a/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h
+++ b/extern/Eigen3/Eigen/src/Core/PermutationMatrix.h
@@ -250,6 +250,35 @@ class PermutationBase : public EigenBase<Derived>
template<typename Other> friend
inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
{ return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
+
+ /** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation.
+ *
+ * This function is O(\c n) procedure allocating a buffer of \c n booleans.
+ */
+ Index determinant() const
+ {
+ Index res = 1;
+ Index n = size();
+ Matrix<bool,RowsAtCompileTime,1,0,MaxRowsAtCompileTime> mask(n);
+ mask.fill(false);
+ Index r = 0;
+ while(r < n)
+ {
+ // search for the next seed
+ while(r<n && mask[r]) r++;
+ if(r>=n)
+ break;
+ // we got one, let's follow it until we are back to the seed
+ Index k0 = r++;
+ mask.coeffRef(k0) = true;
+ for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k))
+ {
+ mask.coeffRef(k) = true;
+ res = -res;
+ }
+ }
+ return res;
+ }
protected:
@@ -555,7 +584,10 @@ struct permut_matrix_product_retval
const Index n = Side==OnTheLeft ? rows() : cols();
// FIXME we need an is_same for expression that is not sensitive to constness. For instance
// is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true.
- if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix))
+ if( is_same<MatrixTypeNestedCleaned,Dest>::value
+ && blas_traits<MatrixTypeNestedCleaned>::HasUsableDirectAccess
+ && blas_traits<Dest>::HasUsableDirectAccess
+ && extract_data(dst) == extract_data(m_matrix))
{
// apply the permutation inplace
Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());
diff --git a/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h b/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
index dd34b59e541..a4e4af4a7b2 100644
--- a/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
+++ b/extern/Eigen3/Eigen/src/Core/PlainObjectBase.h
@@ -437,6 +437,36 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
}
#endif
+#ifdef EIGEN_HAVE_RVALUE_REFERENCES
+ PlainObjectBase(PlainObjectBase&& other)
+ : m_storage( std::move(other.m_storage) )
+ {
+ }
+
+ PlainObjectBase& operator=(PlainObjectBase&& other)
+ {
+ using std::swap;
+ swap(m_storage, other.m_storage);
+ return *this;
+ }
+#endif
+
+ /** Copy constructor */
+ EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other)
+ : m_storage()
+ {
+ _check_template_params();
+ lazyAssign(other);
+ }
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase<OtherDerived> &other)
+ : m_storage()
+ {
+ _check_template_params();
+ lazyAssign(other);
+ }
+
EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
: m_storage(a_size, nbRows, nbCols)
{
@@ -573,6 +603,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
: (rows() == other.rows() && cols() == other.cols())))
&& "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
EIGEN_ONLY_USED_FOR_DEBUG(other);
+ if(this->size()==0)
+ resizeLike(other);
#else
resizeLike(other);
#endif
diff --git a/extern/Eigen3/Eigen/src/Core/ProductBase.h b/extern/Eigen3/Eigen/src/Core/ProductBase.h
index a494b5f8703..cf74470a9a1 100644
--- a/extern/Eigen3/Eigen/src/Core/ProductBase.h
+++ b/extern/Eigen3/Eigen/src/Core/ProductBase.h
@@ -85,7 +85,14 @@ class ProductBase : public MatrixBase<Derived>
public:
+#ifndef EIGEN_NO_MALLOC
+ typedef typename Base::PlainObject BasePlainObject;
+ typedef Matrix<Scalar,RowsAtCompileTime==1?1:Dynamic,ColsAtCompileTime==1?1:Dynamic,BasePlainObject::Options> DynPlainObject;
+ typedef typename internal::conditional<(BasePlainObject::SizeAtCompileTime==Dynamic) || (BasePlainObject::SizeAtCompileTime*int(sizeof(Scalar)) < int(EIGEN_STACK_ALLOCATION_LIMIT)),
+ BasePlainObject, DynPlainObject>::type PlainObject;
+#else
typedef typename Base::PlainObject PlainObject;
+#endif
ProductBase(const Lhs& a_lhs, const Rhs& a_rhs)
: m_lhs(a_lhs), m_rhs(a_rhs)
@@ -180,7 +187,12 @@ namespace internal {
template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
struct nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
{
- typedef PlainObject const& type;
+ typedef typename GeneralProduct<Lhs,Rhs,Mode>::PlainObject const& type;
+};
+template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
+struct nested<const GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
+{
+ typedef typename GeneralProduct<Lhs,Rhs,Mode>::PlainObject const& type;
};
}
diff --git a/extern/Eigen3/Eigen/src/Core/Redux.h b/extern/Eigen3/Eigen/src/Core/Redux.h
index 50548fa9a0e..9b8662a6f9a 100644
--- a/extern/Eigen3/Eigen/src/Core/Redux.h
+++ b/extern/Eigen3/Eigen/src/Core/Redux.h
@@ -247,8 +247,9 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
}
};
-template<typename Func, typename Derived>
-struct redux_impl<Func, Derived, SliceVectorizedTraversal, NoUnrolling>
+// NOTE: for SliceVectorizedTraversal we simply bypass unrolling
+template<typename Func, typename Derived, int Unrolling>
+struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>
{
typedef typename Derived::Scalar Scalar;
typedef typename packet_traits<Scalar>::type PacketScalar;
diff --git a/extern/Eigen3/Eigen/src/Core/Ref.h b/extern/Eigen3/Eigen/src/Core/Ref.h
index 00d9e6d2b41..7a3becaf882 100644
--- a/extern/Eigen3/Eigen/src/Core/Ref.h
+++ b/extern/Eigen3/Eigen/src/Core/Ref.h
@@ -101,14 +101,15 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
template<typename Derived> struct match {
enum {
HasDirectAccess = internal::has_direct_access<Derived>::ret,
- StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
+ StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)
|| int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)
|| (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),
OuterStrideMatch = Derived::IsVectorAtCompileTime
|| int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
- MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch
+ ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,
+ MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch
};
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
};
@@ -172,8 +173,12 @@ protected:
}
else
::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols());
- ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
- StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());
+
+ if(Expression::IsVectorAtCompileTime && (!PlainObjectType::IsVectorAtCompileTime) && ((Expression::Flags&RowMajorBit)!=(PlainObjectType::Flags&RowMajorBit)))
+ ::new (&m_stride) StrideBase(expr.innerStride(), StrideType::InnerStrideAtCompileTime==0?0:1);
+ else
+ ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
+ StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());
}
StrideBase m_stride;
@@ -183,7 +188,11 @@ protected:
template<typename PlainObjectType, int Options, typename StrideType> class Ref
: public RefBase<Ref<PlainObjectType, Options, StrideType> >
{
+ private:
typedef internal::traits<Ref> Traits;
+ template<typename Derived>
+ inline Ref(const PlainObjectBase<Derived>& expr,
+ typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0);
public:
typedef RefBase<Ref> Base;
@@ -195,17 +204,20 @@ template<typename PlainObjectType, int Options, typename StrideType> class Ref
inline Ref(PlainObjectBase<Derived>& expr,
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
{
- Base::construct(expr);
+ EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+ Base::construct(expr.derived());
}
template<typename Derived>
inline Ref(const DenseBase<Derived>& expr,
- typename internal::enable_if<bool(internal::is_lvalue<Derived>::value&&bool(Traits::template match<Derived>::MatchAtCompileTime)),Derived>::type* = 0,
- int = Derived::ThisConstantIsPrivateInPlainObjectBase)
+ typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
#else
template<typename Derived>
inline Ref(DenseBase<Derived>& expr)
#endif
{
+ EIGEN_STATIC_ASSERT(static_cast<bool>(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
+ EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+ enum { THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY = Derived::ThisConstantIsPrivateInPlainObjectBase};
Base::construct(expr.const_cast_derived());
}
@@ -224,13 +236,23 @@ template<typename TPlainObjectType, int Options, typename StrideType> class Ref<
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
template<typename Derived>
- inline Ref(const DenseBase<Derived>& expr)
+ inline Ref(const DenseBase<Derived>& expr,
+ typename internal::enable_if<bool(Traits::template match<Derived>::ScalarTypeMatch),Derived>::type* = 0)
{
// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
// std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
construct(expr.derived(), typename Traits::template match<Derived>::type());
}
+
+ inline Ref(const Ref& other) : Base(other) {
+ // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
+ }
+
+ template<typename OtherRef>
+ inline Ref(const RefBase<OtherRef>& other) {
+ construct(other.derived(), typename Traits::template match<OtherRef>::type());
+ }
protected:
diff --git a/extern/Eigen3/Eigen/src/Core/Replicate.h b/extern/Eigen3/Eigen/src/Core/Replicate.h
index dde86a8349b..ac4537c1422 100644
--- a/extern/Eigen3/Eigen/src/Core/Replicate.h
+++ b/extern/Eigen3/Eigen/src/Core/Replicate.h
@@ -135,7 +135,7 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
*/
template<typename Derived>
template<int RowFactor, int ColFactor>
-inline const Replicate<Derived,RowFactor,ColFactor>
+const Replicate<Derived,RowFactor,ColFactor>
DenseBase<Derived>::replicate() const
{
return Replicate<Derived,RowFactor,ColFactor>(derived());
@@ -150,7 +150,7 @@ DenseBase<Derived>::replicate() const
* \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
*/
template<typename Derived>
-inline const Replicate<Derived,Dynamic,Dynamic>
+const typename DenseBase<Derived>::ReplicateReturnType
DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const
{
return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor);
diff --git a/extern/Eigen3/Eigen/src/Core/ReturnByValue.h b/extern/Eigen3/Eigen/src/Core/ReturnByValue.h
index d66c24ba0f8..f635598dccf 100644
--- a/extern/Eigen3/Eigen/src/Core/ReturnByValue.h
+++ b/extern/Eigen3/Eigen/src/Core/ReturnByValue.h
@@ -72,6 +72,8 @@ template<typename Derived> class ReturnByValue
const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
+ template<int LoadMode> Unusable& packet(Index) const;
+ template<int LoadMode> Unusable& packet(Index, Index) const;
#endif
};
@@ -83,6 +85,15 @@ Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
return derived();
}
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::lazyAssign(const ReturnByValue<OtherDerived>& other)
+{
+ other.evalTo(derived());
+ return derived();
+}
+
+
} // end namespace Eigen
#endif // EIGEN_RETURNBYVALUE_H
diff --git a/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
index 22f3047b43f..0956475af51 100644
--- a/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
+++ b/extern/Eigen3/Eigen/src/Core/SelfCwiseBinaryOp.h
@@ -180,15 +180,9 @@ inline Derived& DenseBase<Derived>::operator*=(const Scalar& other)
template<typename Derived>
inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
{
- typedef typename internal::conditional<NumTraits<Scalar>::IsInteger,
- internal::scalar_quotient_op<Scalar>,
- internal::scalar_product_op<Scalar> >::type BinOp;
typedef typename Derived::PlainObject PlainObject;
- SelfCwiseBinaryOp<BinOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
- Scalar actual_other;
- if(NumTraits<Scalar>::IsInteger) actual_other = other;
- else actual_other = Scalar(1)/other;
- tmp = PlainObject::Constant(rows(),cols(), actual_other);
+ SelfCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
+ tmp = PlainObject::Constant(rows(),cols(), other);
return derived();
}
diff --git a/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h b/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h
index fba07365f6f..4d65392c685 100644
--- a/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h
+++ b/extern/Eigen3/Eigen/src/Core/TriangularMatrix.h
@@ -278,21 +278,21 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
/** Efficient triangular matrix times vector/matrix product */
template<typename OtherDerived>
- TriangularProduct<Mode,true,MatrixType,false,OtherDerived, OtherDerived::IsVectorAtCompileTime>
+ TriangularProduct<Mode, true, MatrixType, false, OtherDerived, OtherDerived::ColsAtCompileTime==1>
operator*(const MatrixBase<OtherDerived>& rhs) const
{
return TriangularProduct
- <Mode,true,MatrixType,false,OtherDerived,OtherDerived::IsVectorAtCompileTime>
+ <Mode, true, MatrixType, false, OtherDerived, OtherDerived::ColsAtCompileTime==1>
(m_matrix, rhs.derived());
}
/** Efficient vector/matrix times triangular matrix product */
template<typename OtherDerived> friend
- TriangularProduct<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
+ TriangularProduct<Mode, false, OtherDerived, OtherDerived::RowsAtCompileTime==1, MatrixType, false>
operator*(const MatrixBase<OtherDerived>& lhs, const TriangularView& rhs)
{
return TriangularProduct
- <Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
+ <Mode, false, OtherDerived, OtherDerived::RowsAtCompileTime==1, MatrixType, false>
(lhs.derived(),rhs.m_matrix);
}
@@ -380,19 +380,19 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
{
setZero();
- return assignProduct(other,1);
+ return assignProduct(other.derived(),1);
}
template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
{
- return assignProduct(other,1);
+ return assignProduct(other.derived(),1);
}
template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
{
- return assignProduct(other,-1);
+ return assignProduct(other.derived(),-1);
}
@@ -400,25 +400,34 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct<ProductDerived>& other)
{
setZero();
- return assignProduct(other,other.alpha());
+ return assignProduct(other.derived(),other.alpha());
}
template<typename ProductDerived>
EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct<ProductDerived>& other)
{
- return assignProduct(other,other.alpha());
+ return assignProduct(other.derived(),other.alpha());
}
template<typename ProductDerived>
EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct<ProductDerived>& other)
{
- return assignProduct(other,-other.alpha());
+ return assignProduct(other.derived(),-other.alpha());
}
protected:
template<typename ProductDerived, typename Lhs, typename Rhs>
EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase<ProductDerived, Lhs,Rhs>& prod, const Scalar& alpha);
+
+ template<int Mode, bool LhsIsTriangular,
+ typename Lhs, bool LhsIsVector,
+ typename Rhs, bool RhsIsVector>
+ EIGEN_STRONG_INLINE TriangularView& assignProduct(const TriangularProduct<Mode, LhsIsTriangular, Lhs, LhsIsVector, Rhs, RhsIsVector>& prod, const Scalar& alpha)
+ {
+ lazyAssign(alpha*prod.eval());
+ return *this;
+ }
MatrixTypeNested m_matrix;
};
diff --git a/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h b/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h
index f183d31de2a..8d9255eef6a 100644
--- a/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h
+++ b/extern/Eigen3/Eigen/src/Core/arch/NEON/Complex.h
@@ -110,7 +110,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
-template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { __pld((float *)addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ARM_PREFETCH((float *)addr); }
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
{
diff --git a/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h b/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
index 163bac215e6..d49670e0410 100644
--- a/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/extern/Eigen3/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -48,9 +48,18 @@ typedef uint32x4_t Packet4ui;
#define EIGEN_INIT_NEON_PACKET2(X, Y) {X, Y}
#define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W}
#endif
-
-#ifndef __pld
-#define __pld(x) asm volatile ( " pld [%[addr]]\n" :: [addr] "r" (x) : "cc" );
+
+// arm64 does have the pld instruction. If available, let's trust the __builtin_prefetch built-in function
+// which available on LLVM and GCC (at least)
+#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+ #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR);
+#elif defined __pld
+ #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR)
+#elif !defined(__aarch64__)
+ #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ( " pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" );
+#else
+ // by default no explicit prefetching
+ #define EIGEN_ARM_PREFETCH(ADDR)
#endif
template<> struct packet_traits<float> : default_packet_traits
@@ -209,8 +218,8 @@ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& f
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
-template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { __pld(addr); }
-template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { __pld(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_ARM_PREFETCH(addr); }
// FIXME only store the 2 first elements ?
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }
@@ -375,6 +384,7 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
a_lo = vget_low_s32(a);
a_hi = vget_high_s32(a);
max = vpmax_s32(a_lo, a_hi);
+ max = vpmax_s32(max, max);
return vget_lane_s32(max, 0);
}
diff --git a/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h b/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
index 99cbd0d95be..2b07168ae4f 100644
--- a/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
+++ b/extern/Eigen3/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -52,7 +52,7 @@ Packet4f plog<Packet4f>(const Packet4f& _x)
Packet4i emm0;
- Packet4f invalid_mask = _mm_cmplt_ps(x, _mm_setzero_ps());
+ Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN
Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps());
x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
@@ -126,7 +126,7 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
_EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
_EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
- Packet4f tmp = _mm_setzero_ps(), fx;
+ Packet4f tmp, fx;
Packet4i emm0;
// clamp x
@@ -166,7 +166,7 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
emm0 = _mm_cvttps_epi32(fx);
emm0 = _mm_add_epi32(emm0, p4i_0x7f);
emm0 = _mm_slli_epi32(emm0, 23);
- return pmul(y, _mm_castsi128_ps(emm0));
+ return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x);
}
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
Packet2d pexp<Packet2d>(const Packet2d& _x)
@@ -195,7 +195,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
_EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
- Packet2d tmp = _mm_setzero_pd(), fx;
+ Packet2d tmp, fx;
Packet4i emm0;
// clamp x
@@ -239,7 +239,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
emm0 = _mm_add_epi32(emm0, p4i_1023_0);
emm0 = _mm_slli_epi32(emm0, 20);
emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
- return pmul(x, _mm_castsi128_pd(emm0));
+ return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
}
/* evaluation of 4 sines at onces, using SSE2 intrinsics.
@@ -279,7 +279,7 @@ Packet4f psin<Packet4f>(const Packet4f& _x)
_EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
_EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
- Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
+ Packet4f xmm1, xmm2, xmm3, sign_bit, y;
Packet4i emm0, emm2;
sign_bit = x;
@@ -378,7 +378,7 @@ Packet4f pcos<Packet4f>(const Packet4f& _x)
_EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
_EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
- Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
+ Packet4f xmm1, xmm2, xmm3, y;
Packet4i emm0, emm2;
x = pabs(x);
diff --git a/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h b/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h
index c06a0df1c21..2a9d65b9473 100644
--- a/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h
+++ b/extern/Eigen3/Eigen/src/Core/products/CoeffBasedProduct.h
@@ -90,6 +90,7 @@ struct traits<CoeffBasedProduct<LhsNested,RhsNested,NestingFlags> >
| (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0),
CoeffReadCost = InnerSize == Dynamic ? Dynamic
+ : InnerSize == 0 ? 0
: InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost)
+ (InnerSize - 1) * NumTraits<Scalar>::AddCost,
@@ -133,7 +134,7 @@ class CoeffBasedProduct
};
typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
- Unroll ? InnerSize-1 : Dynamic,
+ Unroll ? InnerSize : Dynamic,
_LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
@@ -184,7 +185,7 @@ class CoeffBasedProduct
{
PacketScalar res;
internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
- Unroll ? InnerSize-1 : Dynamic,
+ Unroll ? InnerSize : Dynamic,
_LhsNested, _RhsNested, PacketScalar, LoadMode>
::run(row, col, m_lhs, m_rhs, res);
return res;
@@ -242,12 +243,12 @@ struct product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{
product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
- res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
+ res += lhs.coeff(row, UnrollingIndex-1) * rhs.coeff(UnrollingIndex-1, col);
}
};
template<typename Lhs, typename Rhs, typename RetScalar>
-struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
+struct product_coeff_impl<DefaultTraversal, 1, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
@@ -257,15 +258,22 @@ struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
};
template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
+{
+ typedef typename Lhs::Index Index;
+ static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
+ {
+ res = RetScalar(0);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
struct product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res)
{
- eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
- res = lhs.coeff(row, 0) * rhs.coeff(0, col);
- for(Index i = 1; i < lhs.cols(); ++i)
- res += lhs.coeff(row, i) * rhs.coeff(i, col);
+ res = (lhs.row(row).transpose().cwiseProduct( rhs.col(col) )).sum();
}
};
@@ -295,6 +303,16 @@ struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet>
}
};
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<InnerVectorizedTraversal, 0, Lhs, Rhs, RetScalar>
+{
+ typedef typename Lhs::Index Index;
+ static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
+ {
+ res = 0;
+ }
+};
+
template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
{
@@ -304,8 +322,7 @@ struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, Re
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{
Packet pres;
- product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
- product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
+ product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
res = predux(pres);
}
};
@@ -373,7 +390,7 @@ struct product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
{
product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
- res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
+ res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex-1)), rhs.template packet<LoadMode>(UnrollingIndex-1, col), res);
}
};
@@ -384,12 +401,12 @@ struct product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
{
product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
- res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), pset1<Packet>(rhs.coeff(UnrollingIndex, col)), res);
+ res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex-1), pset1<Packet>(rhs.coeff(UnrollingIndex-1, col)), res);
}
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
-struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
+struct product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
@@ -399,7 +416,7 @@ struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
-struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
+struct product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
@@ -409,15 +426,34 @@ struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
+{
+ typedef typename Lhs::Index Index;
+ static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
+ {
+ res = pset1<Packet>(0);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
+{
+ typedef typename Lhs::Index Index;
+ static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
+ {
+ res = pset1<Packet>(0);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
{
- eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
- res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
- for(Index i = 1; i < lhs.cols(); ++i)
- res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
+ res = pset1<Packet>(0);
+ for(Index i = 0; i < lhs.cols(); ++i)
+ res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
}
};
@@ -427,10 +463,9 @@ struct product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
{
- eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
- res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
- for(Index i = 1; i < lhs.cols(); ++i)
- res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
+ res = pset1<Packet>(0);
+ for(Index i = 0; i < lhs.cols(); ++i)
+ res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
}
};
diff --git a/extern/Eigen3/Eigen/src/Core/products/Parallelizer.h b/extern/Eigen3/Eigen/src/Core/products/Parallelizer.h
index 5c3e9b7ac15..6937ee33284 100644
--- a/extern/Eigen3/Eigen/src/Core/products/Parallelizer.h
+++ b/extern/Eigen3/Eigen/src/Core/products/Parallelizer.h
@@ -125,19 +125,22 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos
if(transpose)
std::swap(rows,cols);
- Index blockCols = (cols / threads) & ~Index(0x3);
- Index blockRows = (rows / threads) & ~Index(0x7);
-
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
- #pragma omp parallel for schedule(static,1) num_threads(threads)
- for(Index i=0; i<threads; ++i)
+ #pragma omp parallel num_threads(threads)
{
+ Index i = omp_get_thread_num();
+ // Note that the actual number of threads might be lower than the number of request ones.
+ Index actual_threads = omp_get_num_threads();
+
+ Index blockCols = (cols / actual_threads) & ~Index(0x3);
+ Index blockRows = (rows / actual_threads) & ~Index(0x7);
+
Index r0 = i*blockRows;
- Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
+ Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
- Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
+ Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].rhs_start = c0;
info[i].rhs_length = actualBlockCols;
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
index ba41a1c99f6..4cc56a42fd5 100644
--- a/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
@@ -109,7 +109,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
if (rows != depth) { \
\
- int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
+ int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
\
if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
@@ -223,7 +223,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
if (cols != depth) { \
\
- int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
+ int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
\
if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
diff --git a/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
index f103eae72c0..04240ab5032 100644
--- a/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
+++ b/extern/Eigen3/Eigen/src/Core/products/TriangularSolverMatrix.h
@@ -302,9 +302,12 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conj
for (Index i=0; i<actual_mc; ++i)
r[i] -= a[i] * b;
}
- Scalar b = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(rhs(j,j));
- for (Index i=0; i<actual_mc; ++i)
- r[i] *= b;
+ if((Mode & UnitDiag)==0)
+ {
+ Scalar b = conj(rhs(j,j));
+ for (Index i=0; i<actual_mc; ++i)
+ r[i] /= b;
+ }
}
// pack the just computed part of lhs to A
diff --git a/extern/Eigen3/Eigen/src/Core/util/Constants.h b/extern/Eigen3/Eigen/src/Core/util/Constants.h
index 14b9624e1d9..1e6277c4f96 100644
--- a/extern/Eigen3/Eigen/src/Core/util/Constants.h
+++ b/extern/Eigen3/Eigen/src/Core/util/Constants.h
@@ -433,6 +433,19 @@ struct MatrixXpr {};
/** The type used to identify an array expression */
struct ArrayXpr {};
+namespace internal {
+ /** \internal
+ * Constants for comparison functors
+ */
+ enum ComparisonName {
+ cmp_EQ = 0,
+ cmp_LT = 1,
+ cmp_LE = 2,
+ cmp_UNORD = 3,
+ cmp_NEQ = 4
+ };
+}
+
} // end namespace Eigen
#endif // EIGEN_CONSTANTS_H
diff --git a/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h b/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h
index d6a814586d3..f277720077b 100644
--- a/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h
+++ b/extern/Eigen3/Eigen/src/Core/util/ForwardDeclarations.h
@@ -235,6 +235,9 @@ template<typename Scalar> class Rotation2D;
template<typename Scalar> class AngleAxis;
template<typename Scalar,int Dim> class Translation;
+// Sparse module:
+template<typename Derived> class SparseMatrixBase;
+
#ifdef EIGEN2_SUPPORT
template<typename Derived, int _Dim> class eigen2_RotationBase;
template<typename Lhs, typename Rhs> class eigen2_Cross;
diff --git a/extern/Eigen3/Eigen/src/Core/util/MKL_support.h b/extern/Eigen3/Eigen/src/Core/util/MKL_support.h
index 1e6e355d626..1ef3b61db14 100644
--- a/extern/Eigen3/Eigen/src/Core/util/MKL_support.h
+++ b/extern/Eigen3/Eigen/src/Core/util/MKL_support.h
@@ -54,11 +54,60 @@
#endif
#if defined EIGEN_USE_MKL
+# include <mkl.h>
+/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/
+# ifndef INTEL_MKL_VERSION
+# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */
+# elif INTEL_MKL_VERSION < 100305 /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/
+# undef EIGEN_USE_MKL
+# endif
+# ifndef EIGEN_USE_MKL
+ /*If the MKL version is too old, undef everything*/
+# undef EIGEN_USE_MKL_ALL
+# undef EIGEN_USE_BLAS
+# undef EIGEN_USE_LAPACKE
+# undef EIGEN_USE_MKL_VML
+# undef EIGEN_USE_LAPACKE_STRICT
+# undef EIGEN_USE_LAPACKE
+# endif
+#endif
-#include <mkl.h>
+#if defined EIGEN_USE_MKL
#include <mkl_lapacke.h>
#define EIGEN_MKL_VML_THRESHOLD 128
+/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */
+/* MKL_BLAS, etc are not defined in 11.2 */
+#ifdef MKL_DOMAIN_ALL
+#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL
+#else
+#define EIGEN_MKL_DOMAIN_ALL MKL_ALL
+#endif
+
+#ifdef MKL_DOMAIN_BLAS
+#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS
+#else
+#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS
+#endif
+
+#ifdef MKL_DOMAIN_FFT
+#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT
+#else
+#define EIGEN_MKL_DOMAIN_FFT MKL_FFT
+#endif
+
+#ifdef MKL_DOMAIN_VML
+#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML
+#else
+#define EIGEN_MKL_DOMAIN_VML MKL_VML
+#endif
+
+#ifdef MKL_DOMAIN_PARDISO
+#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO
+#else
+#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO
+#endif
+
namespace Eigen {
typedef std::complex<double> dcomplex;
diff --git a/extern/Eigen3/Eigen/src/Core/util/Macros.h b/extern/Eigen3/Eigen/src/Core/util/Macros.h
index 0088621acf7..53fb5fae420 100644
--- a/extern/Eigen3/Eigen/src/Core/util/Macros.h
+++ b/extern/Eigen3/Eigen/src/Core/util/Macros.h
@@ -13,7 +13,7 @@
#define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 2
-#define EIGEN_MINOR_VERSION 1
+#define EIGEN_MINOR_VERSION 7
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
@@ -96,6 +96,27 @@
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
#endif
+// A Clang feature extension to determine compiler features.
+// We use it to determine 'cxx_rvalue_references'
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+// Do we support r-value references?
+#if (__has_feature(cxx_rvalue_references) || \
+ defined(__GXX_EXPERIMENTAL_CXX0X__) || \
+ (defined(_MSC_VER) && _MSC_VER >= 1600))
+ #define EIGEN_HAVE_RVALUE_REFERENCES
+#endif
+
+
+// Cross compiler wrapper around LLVM's __has_builtin
+#ifdef __has_builtin
+# define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define EIGEN_HAS_BUILTIN(x) 0
+#endif
+
/** Allows to disable some optimizations which might affect the accuracy of the result.
* Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
* They currently include:
@@ -247,7 +268,7 @@ namespace Eigen {
#if !defined(EIGEN_ASM_COMMENT)
#if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
- #define EIGEN_ASM_COMMENT(X) asm("#" X)
+ #define EIGEN_ASM_COMMENT(X) __asm__("#" X)
#else
#define EIGEN_ASM_COMMENT(X)
#endif
@@ -271,6 +292,7 @@ namespace Eigen {
#error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
#endif
+#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
#if EIGEN_ALIGN_STATICALLY
@@ -289,7 +311,8 @@ namespace Eigen {
#endif
#ifndef EIGEN_STACK_ALLOCATION_LIMIT
-#define EIGEN_STACK_ALLOCATION_LIMIT 20000
+// 131072 == 128 KB
+#define EIGEN_STACK_ALLOCATION_LIMIT 131072
#endif
#ifndef EIGEN_DEFAULT_IO_FORMAT
@@ -305,7 +328,7 @@ namespace Eigen {
// just an empty macro !
#define EIGEN_EMPTY
-#if defined(_MSC_VER) && (!defined(__INTEL_COMPILER))
+#if defined(_MSC_VER) && (_MSC_VER < 1900) && (!defined(__INTEL_COMPILER))
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =;
#elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
@@ -324,8 +347,11 @@ namespace Eigen {
}
#endif
-#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
- EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
+/** \internal
+ * \brief Macro to manually inherit assignment operators.
+ * This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
+ */
+#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
/**
* Just a side note. Commenting within defines works only by documenting
@@ -397,6 +423,8 @@ namespace Eigen {
#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
: ((int)a >= (int)b) ? (int)a : (int)b)
+#define EIGEN_ADD_COST(a,b) int(a)==Dynamic || int(b)==Dynamic ? Dynamic : int(a)+int(b)
+
#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
#define EIGEN_IMPLIES(a,b) (!(a) || (b))
diff --git a/extern/Eigen3/Eigen/src/Core/util/Memory.h b/extern/Eigen3/Eigen/src/Core/util/Memory.h
index cacbd02fd12..b9af5cf8c7b 100644
--- a/extern/Eigen3/Eigen/src/Core/util/Memory.h
+++ b/extern/Eigen3/Eigen/src/Core/util/Memory.h
@@ -63,7 +63,7 @@
// Currently, let's include it only on unix systems:
#if defined(__unix__) || defined(__unix)
#include <unistd.h>
- #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
+ #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || (defined __PGI) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
#define EIGEN_HAS_POSIX_MEMALIGN 1
#endif
#endif
@@ -272,12 +272,12 @@ inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
// The defined(_mm_free) is just here to verify that this MSVC version
// implements _mm_malloc/_mm_free based on the corresponding _aligned_
// functions. This may not always be the case and we just try to be safe.
- #if defined(_MSC_VER) && defined(_mm_free)
+ #if defined(_MSC_VER) && (!defined(_WIN32_WCE)) && defined(_mm_free)
result = _aligned_realloc(ptr,new_size,16);
#else
result = generic_aligned_realloc(ptr,new_size,old_size);
#endif
-#elif defined(_MSC_VER)
+#elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
result = _aligned_realloc(ptr,new_size,16);
#else
result = handmade_aligned_realloc(ptr,new_size,old_size);
@@ -417,6 +417,8 @@ template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pt
template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size)
{
+ if(size==0)
+ return 0; // short-cut. Also fixes Bug 884
check_size_for_overflow<T>(size);
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
if(NumTraits<T>::RequireInitialization)
@@ -464,9 +466,8 @@ template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *
template<typename Scalar, typename Index>
static inline Index first_aligned(const Scalar* array, Index size)
{
- enum { PacketSize = packet_traits<Scalar>::size,
- PacketAlignedMask = PacketSize-1
- };
+ static const Index PacketSize = packet_traits<Scalar>::size;
+ static const Index PacketAlignedMask = PacketSize-1;
if(PacketSize==1)
{
@@ -522,7 +523,7 @@ template<typename T> struct smart_copy_helper<T,false> {
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
// to the appropriate stack allocation function
#ifndef EIGEN_ALLOCA
- #if (defined __linux__)
+ #if (defined __linux__) || (defined __APPLE__) || (defined alloca)
#define EIGEN_ALLOCA alloca
#elif defined(_MSC_VER)
#define EIGEN_ALLOCA _alloca
@@ -612,7 +613,6 @@ template<typename T> class aligned_stack_memory_handler
void* operator new(size_t size, const std::nothrow_t&) throw() { \
try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
catch (...) { return 0; } \
- return 0; \
}
#else
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
@@ -777,9 +777,9 @@ namespace internal {
#ifdef EIGEN_CPUID
-inline bool cpuid_is_vendor(int abcd[4], const char* vendor)
+inline bool cpuid_is_vendor(int abcd[4], const int vendor[3])
{
- return abcd[1]==(reinterpret_cast<const int*>(vendor))[0] && abcd[3]==(reinterpret_cast<const int*>(vendor))[1] && abcd[2]==(reinterpret_cast<const int*>(vendor))[2];
+ return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2];
}
inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
@@ -921,13 +921,16 @@ inline void queryCacheSizes(int& l1, int& l2, int& l3)
{
#ifdef EIGEN_CPUID
int abcd[4];
+ const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
+ const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
+ const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"
// identify the CPU vendor
EIGEN_CPUID(abcd,0x0,0);
int max_std_funcs = abcd[1];
- if(cpuid_is_vendor(abcd,"GenuineIntel"))
+ if(cpuid_is_vendor(abcd,GenuineIntel))
queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
- else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!"))
+ else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
queryCacheSizes_amd(l1,l2,l3);
else
// by default let's use Intel's API
diff --git a/extern/Eigen3/Eigen/src/Core/util/StaticAssert.h b/extern/Eigen3/Eigen/src/Core/util/StaticAssert.h
index 8872c5b648e..bac5d9fe92b 100644
--- a/extern/Eigen3/Eigen/src/Core/util/StaticAssert.h
+++ b/extern/Eigen3/Eigen/src/Core/util/StaticAssert.h
@@ -90,7 +90,9 @@
YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,
- OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG
+ OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG,
+ IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY,
+ STORAGE_LAYOUT_DOES_NOT_MATCH
};
};
diff --git a/extern/Eigen3/Eigen/src/Core/util/XprHelper.h b/extern/Eigen3/Eigen/src/Core/util/XprHelper.h
index 3c4773054b4..d05f8e5f6f4 100644
--- a/extern/Eigen3/Eigen/src/Core/util/XprHelper.h
+++ b/extern/Eigen3/Eigen/src/Core/util/XprHelper.h
@@ -341,7 +341,7 @@ template<typename T, int n=1, typename PlainObject = typename eval<T>::type> str
};
template<typename T>
-T* const_cast_ptr(const T* ptr)
+inline T* const_cast_ptr(const T* ptr)
{
return const_cast<T*>(ptr);
}
@@ -366,17 +366,17 @@ struct dense_xpr_base<Derived, ArrayXpr>
/** \internal Helper base class to add a scalar multiple operator
* overloads for complex types */
-template<typename Derived,typename Scalar,typename OtherScalar,
+template<typename Derived, typename Scalar, typename OtherScalar, typename BaseType,
bool EnableIt = !is_same<Scalar,OtherScalar>::value >
-struct special_scalar_op_base : public DenseCoeffsBase<Derived>
+struct special_scalar_op_base : public BaseType
{
// dummy operator* so that the
// "using special_scalar_op_base::operator*" compiles
void operator*() const;
};
-template<typename Derived,typename Scalar,typename OtherScalar>
-struct special_scalar_op_base<Derived,Scalar,OtherScalar,true> : public DenseCoeffsBase<Derived>
+template<typename Derived,typename Scalar,typename OtherScalar, typename BaseType>
+struct special_scalar_op_base<Derived,Scalar,OtherScalar,BaseType,true> : public BaseType
{
const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
operator*(const OtherScalar& scalar) const
diff --git a/extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h b/extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h
index 0e6fdb4889d..7992d494425 100644
--- a/extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h
+++ b/extern/Eigen3/Eigen/src/Eigen2Support/LeastSquares.h
@@ -147,7 +147,6 @@ void fitHyperplane(int numPoints,
// compute the covariance matrix
CovMatrixType covMat = CovMatrixType::Zero(size, size);
- VectorType remean = VectorType::Zero(size);
for(int i = 0; i < numPoints; ++i)
{
VectorType diff = (*(points[i]) - mean).conjugate();
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h
index af434bc9bd6..417c72944e1 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -234,6 +234,12 @@ template<typename _MatrixType> class ComplexEigenSolver
}
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
EigenvectorType m_eivec;
EigenvalueType m_eivalues;
ComplexSchur<MatrixType> m_schur;
@@ -251,6 +257,8 @@ template<typename MatrixType>
ComplexEigenSolver<MatrixType>&
ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
{
+ check_template_parameters();
+
// this code is inspired from Jampack
eigen_assert(matrix.cols() == matrix.rows());
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h
index 6e7150685a2..20c59a7a2e6 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/EigenSolver.h
@@ -298,6 +298,13 @@ template<typename _MatrixType> class EigenSolver
void doComputeEigenvectors();
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
+ }
+
MatrixType m_eivec;
EigenvalueType m_eivalues;
bool m_isInitialized;
@@ -364,6 +371,8 @@ template<typename MatrixType>
EigenSolver<MatrixType>&
EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
{
+ check_template_parameters();
+
using std::sqrt;
using std::abs;
eigen_assert(matrix.cols() == matrix.rows());
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
index dc240e13e13..956e80d9edc 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
@@ -263,6 +263,13 @@ template<typename _MatrixType> class GeneralizedEigenSolver
}
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
+ }
+
MatrixType m_eivec;
ComplexVectorType m_alphas;
VectorType m_betas;
@@ -290,6 +297,8 @@ template<typename MatrixType>
GeneralizedEigenSolver<MatrixType>&
GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
{
+ check_template_parameters();
+
using std::sqrt;
using std::abs;
eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/RealQZ.h b/extern/Eigen3/Eigen/src/Eigenvalues/RealQZ.h
index 5706eeebe91..aa3833ebad1 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/RealQZ.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/RealQZ.h
@@ -240,10 +240,10 @@ namespace Eigen {
m_S.coeffRef(i,j) = Scalar(0.0);
m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
+ // update Q
+ if (m_computeQZ)
+ m_Q.applyOnTheRight(i-1,i,G);
}
- // update Q
- if (m_computeQZ)
- m_Q.applyOnTheRight(i-1,i,G);
// kill T(i,i-1)
if(m_T.coeff(i,i-1)!=Scalar(0))
{
@@ -251,10 +251,10 @@ namespace Eigen {
m_T.coeffRef(i,i-1) = Scalar(0.0);
m_S.applyOnTheRight(i,i-1,G);
m_T.topRows(i).applyOnTheRight(i,i-1,G);
+ // update Z
+ if (m_computeQZ)
+ m_Z.applyOnTheLeft(i,i-1,G.adjoint());
}
- // update Z
- if (m_computeQZ)
- m_Z.applyOnTheLeft(i,i-1,G.adjoint());
}
}
}
@@ -313,7 +313,7 @@ namespace Eigen {
using std::abs;
using std::sqrt;
const Index dim=m_S.cols();
- if (abs(m_S.coeff(i+1,i)==Scalar(0)))
+ if (abs(m_S.coeff(i+1,i))==Scalar(0))
return;
Index z = findSmallDiagEntry(i,i+1);
if (z==i-1)
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h b/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h
index 64d13634141..16d3875372e 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/RealSchur.h
@@ -234,7 +234,7 @@ template<typename _MatrixType> class RealSchur
typedef Matrix<Scalar,3,1> Vector3s;
Scalar computeNormOfT();
- Index findSmallSubdiagEntry(Index iu, const Scalar& norm);
+ Index findSmallSubdiagEntry(Index iu);
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
@@ -286,7 +286,7 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
{
while (iu >= 0)
{
- Index il = findSmallSubdiagEntry(iu, norm);
+ Index il = findSmallSubdiagEntry(iu);
// Check for convergence
if (il == iu) // One root found
@@ -343,16 +343,14 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
/** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType>
-inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& norm)
+inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
{
using std::abs;
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
- if (s == 0.0)
- s = norm;
- if (abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
+ if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)
break;
res--;
}
@@ -457,9 +455,7 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
- {
break;
- }
}
}
diff --git a/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index 3993046a88e..1131c8af8ad 100644
--- a/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/extern/Eigen3/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -80,6 +80,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
/** \brief Scalar type for matrices of type \p _MatrixType. */
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
+
+ typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;
/** \brief Real scalar type for \p _MatrixType.
*
@@ -225,7 +227,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
*
* \sa eigenvalues()
*/
- const MatrixType& eigenvectors() const
+ const EigenvectorsType& eigenvectors() const
{
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
@@ -351,7 +353,12 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
#endif // EIGEN2_SUPPORT
protected:
- MatrixType m_eivec;
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
+ EigenvectorsType m_eivec;
RealVectorType m_eivalues;
typename TridiagonalizationType::SubDiagonalType m_subdiag;
ComputationInfo m_info;
@@ -376,7 +383,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
* "implicit symmetric QR step with Wilkinson shift"
*/
namespace internal {
-template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
+template<typename RealScalar, typename Scalar, typename Index>
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
}
@@ -384,6 +391,8 @@ template<typename MatrixType>
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
::compute(const MatrixType& matrix, int options)
{
+ check_template_parameters();
+
using std::abs;
eigen_assert(matrix.cols() == matrix.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
@@ -406,7 +415,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
// declare some aliases
RealVectorType& diag = m_eivalues;
- MatrixType& mat = m_eivec;
+ EigenvectorsType& mat = m_eivec;
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
mat = matrix.template triangularView<Lower>();
@@ -442,7 +451,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
while (start>0 && m_subdiag[start-1]!=0)
start--;
- internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
+ internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
}
if (iter <= m_maxIterations * n)
@@ -490,7 +499,13 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
typedef typename SolverType::MatrixType MatrixType;
typedef typename SolverType::RealVectorType VectorType;
typedef typename SolverType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename SolverType::EigenvectorsType EigenvectorsType;
+ /** \internal
+ * Computes the roots of the characteristic polynomial of \a m.
+ * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
+ */
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
using std::sqrt;
@@ -510,148 +525,123 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
// Construct the parameters used in classifying the roots of the equation
// and in solving the equation for the roots in closed form.
Scalar c2_over_3 = c2*s_inv3;
- Scalar a_over_3 = (c1 - c2*c2_over_3)*s_inv3;
- if (a_over_3 > Scalar(0))
+ Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
+ if(a_over_3<Scalar(0))
a_over_3 = Scalar(0);
Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
- Scalar q = half_b*half_b + a_over_3*a_over_3*a_over_3;
- if (q > Scalar(0))
+ Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
+ if(q<Scalar(0))
q = Scalar(0);
// Compute the eigenvalues by solving for the roots of the polynomial.
- Scalar rho = sqrt(-a_over_3);
- Scalar theta = atan2(sqrt(-q),half_b)*s_inv3;
+ Scalar rho = sqrt(a_over_3);
+ Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
Scalar cos_theta = cos(theta);
Scalar sin_theta = sin(theta);
- roots(0) = c2_over_3 + Scalar(2)*rho*cos_theta;
- roots(1) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta);
- roots(2) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta);
-
- // Sort in increasing order.
- if (roots(0) >= roots(1))
- std::swap(roots(0),roots(1));
- if (roots(1) >= roots(2))
- {
- std::swap(roots(1),roots(2));
- if (roots(0) >= roots(1))
- std::swap(roots(0),roots(1));
- }
+ // roots are already sorted, since cos is monotonically decreasing on [0, pi]
+ roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
+ roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
+ roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
}
-
+
+ static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
+ {
+ using std::abs;
+ Index i0;
+ // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
+ mat.diagonal().cwiseAbs().maxCoeff(&i0);
+ // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
+ // so let's save it:
+ representative = mat.col(i0);
+ Scalar n0, n1;
+ VectorType c0, c1;
+ n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
+ n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
+ if(n0>n1) res = c0/std::sqrt(n0);
+ else res = c1/std::sqrt(n1);
+
+ return true;
+ }
+
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
- using std::sqrt;
eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& "invalid option parameter");
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
- MatrixType& eivecs = solver.m_eivec;
+ EigenvectorsType& eivecs = solver.m_eivec;
VectorType& eivals = solver.m_eivalues;
- // map the matrix coefficients to [-1:1] to avoid over- and underflow.
- Scalar scale = mat.cwiseAbs().maxCoeff();
- MatrixType scaledMat = mat / scale;
+ // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
+ Scalar shift = mat.trace() / Scalar(3);
+ // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
+ MatrixType scaledMat = mat.template selfadjointView<Lower>();
+ scaledMat.diagonal().array() -= shift;
+ Scalar scale = scaledMat.cwiseAbs().maxCoeff();
+ if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations
// compute the eigenvalues
computeRoots(scaledMat,eivals);
- // compute the eigen vectors
+ // compute the eigenvectors
if(computeEigenvectors)
{
- Scalar safeNorm2 = Eigen::NumTraits<Scalar>::epsilon();
- safeNorm2 *= safeNorm2;
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
{
+ // All three eigenvalues are numerically the same
eivecs.setIdentity();
}
else
{
- scaledMat = scaledMat.template selfadjointView<Lower>();
MatrixType tmp;
tmp = scaledMat;
+ // Compute the eigenvector of the most distinct eigenvalue
Scalar d0 = eivals(2) - eivals(1);
Scalar d1 = eivals(1) - eivals(0);
- int k = d0 > d1 ? 2 : 0;
- d0 = d0 > d1 ? d1 : d0;
-
- tmp.diagonal().array () -= eivals(k);
- VectorType cross;
- Scalar n;
- n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm();
-
- if(n>safeNorm2)
- eivecs.col(k) = cross / sqrt(n);
- else
+ Index k(0), l(2);
+ if(d0 > d1)
{
- n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm();
-
- if(n>safeNorm2)
- eivecs.col(k) = cross / sqrt(n);
- else
- {
- n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm();
-
- if(n>safeNorm2)
- eivecs.col(k) = cross / sqrt(n);
- else
- {
- // the input matrix and/or the eigenvaues probably contains some inf/NaN,
- // => exit
- // scale back to the original size.
- eivals *= scale;
-
- solver.m_info = NumericalIssue;
- solver.m_isInitialized = true;
- solver.m_eigenvectorsOk = computeEigenvectors;
- return;
- }
- }
+ std::swap(k,l);
+ d0 = d1;
}
- tmp = scaledMat;
- tmp.diagonal().array() -= eivals(1);
+ // Compute the eigenvector of index k
+ {
+ tmp.diagonal().array () -= eivals(k);
+ // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
+ extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
+ }
- if(d0<=Eigen::NumTraits<Scalar>::epsilon())
- eivecs.col(1) = eivecs.col(k).unitOrthogonal();
+ // Compute eigenvector of index l
+ if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
+ {
+ // If d0 is too small, then the two other eigenvalues are numerically the same,
+ // and thus we only have to ortho-normalize the near orthogonal vector we saved above.
+ eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
+ eivecs.col(l).normalize();
+ }
else
{
- n = (cross = eivecs.col(k).cross(tmp.row(0).normalized())).squaredNorm();
- if(n>safeNorm2)
- eivecs.col(1) = cross / sqrt(n);
- else
- {
- n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm();
- if(n>safeNorm2)
- eivecs.col(1) = cross / sqrt(n);
- else
- {
- n = (cross = eivecs.col(k).cross(tmp.row(2))).squaredNorm();
- if(n>safeNorm2)
- eivecs.col(1) = cross / sqrt(n);
- else
- {
- // we should never reach this point,
- // if so the last two eigenvalues are likely to ve very closed to each other
- eivecs.col(1) = eivecs.col(k).unitOrthogonal();
- }
- }
- }
-
- // make sure that eivecs[1] is orthogonal to eivecs[2]
- Scalar d = eivecs.col(1).dot(eivecs.col(k));
- eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized();
+ tmp = scaledMat;
+ tmp.diagonal().array () -= eivals(l);
+
+ VectorType dummy;
+ extract_kernel(tmp, eivecs.col(l), dummy);
}
- eivecs.col(k==2 ? 0 : 2) = eivecs.col(k).cross(eivecs.col(1)).normalized();
+ // Compute last eigenvector from the other two
+ eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
}
}
+
// Rescale back to the original size.
eivals *= scale;
+ eivals.array() += shift;
solver.m_info = Success;
solver.m_isInitialized = true;
@@ -665,11 +655,12 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
typedef typename SolverType::MatrixType MatrixType;
typedef typename SolverType::RealVectorType VectorType;
typedef typename SolverType::Scalar Scalar;
+ typedef typename SolverType::EigenvectorsType EigenvectorsType;
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
using std::sqrt;
- const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
+ const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
roots(0) = t1 - t0;
roots(1) = t1 + t0;
@@ -678,13 +669,15 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
using std::sqrt;
+ using std::abs;
+
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
&& "invalid option parameter");
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
- MatrixType& eivecs = solver.m_eivec;
+ EigenvectorsType& eivecs = solver.m_eivec;
VectorType& eivals = solver.m_eivalues;
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
@@ -698,22 +691,29 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
// compute the eigen vectors
if(computeEigenvectors)
{
- scaledMat.diagonal().array () -= eivals(1);
- Scalar a2 = numext::abs2(scaledMat(0,0));
- Scalar c2 = numext::abs2(scaledMat(1,1));
- Scalar b2 = numext::abs2(scaledMat(1,0));
- if(a2>c2)
+ if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
{
- eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
- eivecs.col(1) /= sqrt(a2+b2);
+ eivecs.setIdentity();
}
else
{
- eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
- eivecs.col(1) /= sqrt(c2+b2);
- }
+ scaledMat.diagonal().array () -= eivals(1);
+ Scalar a2 = numext::abs2(scaledMat(0,0));
+ Scalar c2 = numext::abs2(scaledMat(1,1));
+ Scalar b2 = numext::abs2(scaledMat(1,0));
+ if(a2>c2)
+ {
+ eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
+ eivecs.col(1) /= sqrt(a2+b2);
+ }
+ else
+ {
+ eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
+ eivecs.col(1) /= sqrt(c2+b2);
+ }
- eivecs.col(0) << eivecs.col(1).unitOrthogonal();
+ eivecs.col(0) << eivecs.col(1).unitOrthogonal();
+ }
}
// Rescale back to the original size.
@@ -736,7 +736,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
}
namespace internal {
-template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
+template<typename RealScalar, typename Scalar, typename Index>
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
{
using std::abs;
@@ -788,8 +788,7 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta
// apply the givens rotation to the unit matrix Q = Q * G
if (matrixQ)
{
- // FIXME if StorageOrder == RowMajor this operation is not very efficient
- Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
+ Map<Matrix<Scalar,Dynamic,Dynamic,ColMajor> > q(matrixQ,n,n);
q.applyOnTheRight(k,k+1,rot);
}
}
diff --git a/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h b/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h
index 8e186d57a34..7e1cd9eb79c 100644
--- a/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h
+++ b/extern/Eigen3/Eigen/src/Geometry/AlignedBox.h
@@ -19,10 +19,12 @@ namespace Eigen {
*
* \brief An axis aligned box
*
- * \param _Scalar the type of the scalar coefficients
- * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+ * \tparam _Scalar the type of the scalar coefficients
+ * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
*
* This class represents an axis aligned box as a pair of the minimal and maximal corners.
+ * \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty().
+ * \sa alignedboxtypedefs
*/
template <typename _Scalar, int _AmbientDim>
class AlignedBox
@@ -40,18 +42,21 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
enum CornerType
{
- /** 1D names */
+ /** 1D names @{ */
Min=0, Max=1,
+ /** @} */
- /** Added names for 2D */
+ /** Identifier for 2D corner @{ */
BottomLeft=0, BottomRight=1,
TopLeft=2, TopRight=3,
+ /** @} */
- /** Added names for 3D */
+ /** Identifier for 3D corner @{ */
BottomLeftFloor=0, BottomRightFloor=1,
TopLeftFloor=2, TopRightFloor=3,
BottomLeftCeil=4, BottomRightCeil=5,
TopLeftCeil=6, TopRightCeil=7
+ /** @} */
};
@@ -63,34 +68,33 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
{ setEmpty(); }
- /** Constructs a box with extremities \a _min and \a _max. */
+ /** Constructs a box with extremities \a _min and \a _max.
+ * \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */
template<typename OtherVectorType1, typename OtherVectorType2>
inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
/** Constructs a box containing a single point \a p. */
template<typename Derived>
- inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
- {
- typename internal::nested<Derived,2>::type p(a_p.derived());
- m_min = p;
- m_max = p;
- }
+ inline explicit AlignedBox(const MatrixBase<Derived>& p) : m_min(p), m_max(m_min)
+ { }
~AlignedBox() {}
/** \returns the dimension in which the box holds */
inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
- /** \deprecated use isEmpty */
+ /** \deprecated use isEmpty() */
inline bool isNull() const { return isEmpty(); }
- /** \deprecated use setEmpty */
+ /** \deprecated use setEmpty() */
inline void setNull() { setEmpty(); }
- /** \returns true if the box is empty. */
+ /** \returns true if the box is empty.
+ * \sa setEmpty */
inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
- /** Makes \c *this an empty box. */
+ /** Makes \c *this an empty box.
+ * \sa isEmpty */
inline void setEmpty()
{
m_min.setConstant( ScalarTraits::highest() );
@@ -159,7 +163,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
* a uniform distribution */
inline VectorType sample() const
{
- VectorType r;
+ VectorType r(dim());
for(Index d=0; d<dim(); ++d)
{
if(!ScalarTraits::IsInteger)
@@ -175,27 +179,34 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** \returns true if the point \a p is inside the box \c *this. */
template<typename Derived>
- inline bool contains(const MatrixBase<Derived>& a_p) const
+ inline bool contains(const MatrixBase<Derived>& p) const
{
- typename internal::nested<Derived,2>::type p(a_p.derived());
- return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all();
+ typename internal::nested<Derived,2>::type p_n(p.derived());
+ return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all();
}
/** \returns true if the box \a b is entirely inside the box \c *this. */
inline bool contains(const AlignedBox& b) const
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
- /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
+ /** \returns true if the box \a b is intersecting the box \c *this.
+ * \sa intersection, clamp */
+ inline bool intersects(const AlignedBox& b) const
+ { return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); }
+
+ /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this.
+ * \sa extend(const AlignedBox&) */
template<typename Derived>
- inline AlignedBox& extend(const MatrixBase<Derived>& a_p)
+ inline AlignedBox& extend(const MatrixBase<Derived>& p)
{
- typename internal::nested<Derived,2>::type p(a_p.derived());
- m_min = m_min.cwiseMin(p);
- m_max = m_max.cwiseMax(p);
+ typename internal::nested<Derived,2>::type p_n(p.derived());
+ m_min = m_min.cwiseMin(p_n);
+ m_max = m_max.cwiseMax(p_n);
return *this;
}
- /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
+ /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this.
+ * \sa merged, extend(const MatrixBase&) */
inline AlignedBox& extend(const AlignedBox& b)
{
m_min = m_min.cwiseMin(b.m_min);
@@ -203,7 +214,9 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
return *this;
}
- /** Clamps \c *this by the box \a b and returns a reference to \c *this. */
+ /** Clamps \c *this by the box \a b and returns a reference to \c *this.
+ * \note If the boxes don't intersect, the resulting box is empty.
+ * \sa intersection(), intersects() */
inline AlignedBox& clamp(const AlignedBox& b)
{
m_min = m_min.cwiseMax(b.m_min);
@@ -211,11 +224,15 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
return *this;
}
- /** Returns an AlignedBox that is the intersection of \a b and \c *this */
+ /** Returns an AlignedBox that is the intersection of \a b and \c *this
+ * \note If the boxes don't intersect, the resulting box is empty.
+ * \sa intersects(), clamp, contains() */
inline AlignedBox intersection(const AlignedBox& b) const
{return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
- /** Returns an AlignedBox that is the union of \a b and \c *this */
+ /** Returns an AlignedBox that is the union of \a b and \c *this.
+ * \note Merging with an empty box may result in a box bigger than \c *this.
+ * \sa extend(const AlignedBox&) */
inline AlignedBox merged(const AlignedBox& b) const
{ return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
@@ -231,20 +248,20 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** \returns the squared distance between the point \a p and the box \c *this,
* and zero if \a p is inside the box.
- * \sa exteriorDistance()
+ * \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&)
*/
template<typename Derived>
- inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& a_p) const;
+ inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& p) const;
/** \returns the squared distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
- * \sa exteriorDistance()
+ * \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&)
*/
inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
/** \returns the distance between the point \a p and the box \c *this,
* and zero if \a p is inside the box.
- * \sa squaredExteriorDistance()
+ * \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&)
*/
template<typename Derived>
inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
@@ -252,7 +269,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** \returns the distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
- * \sa squaredExteriorDistance()
+ * \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&)
*/
inline NonInteger exteriorDistance(const AlignedBox& b) const
{ using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); }
diff --git a/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h b/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h
index 553d38c7449..bbf6a7ed8ed 100644
--- a/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h
+++ b/extern/Eigen3/Eigen/src/Geometry/AngleAxis.h
@@ -131,7 +131,7 @@ public:
m_angle = Scalar(other.angle());
}
- static inline const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); }
+ static inline const AngleAxis Identity() { return AngleAxis(Scalar(0), Vector3::UnitX()); }
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
* determined by \a prec.
@@ -165,8 +165,8 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived
Scalar n2 = q.vec().squaredNorm();
if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
{
- m_angle = 0;
- m_axis << 1, 0, 0;
+ m_angle = Scalar(0);
+ m_axis << Scalar(1), Scalar(0), Scalar(0);
}
else
{
diff --git a/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h b/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h
index 00e71d190c3..372e422b92c 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Homogeneous.h
@@ -79,7 +79,7 @@ template<typename MatrixType,int _Direction> class Homogeneous
{
if( (int(Direction)==Vertical && row==m_matrix.rows())
|| (int(Direction)==Horizontal && col==m_matrix.cols()))
- return 1;
+ return Scalar(1);
return m_matrix.coeff(row, col);
}
diff --git a/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h b/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h
index aeff43fefa6..00b7c4300fd 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Hyperplane.h
@@ -100,7 +100,17 @@ public:
{
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
Hyperplane result(p0.size());
- result.normal() = (p2 - p0).cross(p1 - p0).normalized();
+ VectorType v0(p2 - p0), v1(p1 - p0);
+ result.normal() = v0.cross(v1);
+ RealScalar norm = result.normal().norm();
+ if(norm <= v0.norm() * v1.norm() * NumTraits<RealScalar>::epsilon())
+ {
+ Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
+ JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
+ result.normal() = svd.matrixV().col(2);
+ }
+ else
+ result.normal() /= norm;
result.offset() = -p0.dot(result.normal());
return result;
}
diff --git a/extern/Eigen3/Eigen/src/Geometry/Quaternion.h b/extern/Eigen3/Eigen/src/Geometry/Quaternion.h
index 9fee0c91980..25ed17bb690 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Quaternion.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Quaternion.h
@@ -102,11 +102,11 @@ public:
/** \returns a quaternion representing an identity rotation
* \sa MatrixBase::Identity()
*/
- static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(1, 0, 0, 0); }
+ static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); }
/** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
*/
- inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; }
+ inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; }
/** \returns the squared norm of the quaternion's coefficients
* \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
@@ -161,7 +161,7 @@ public:
{ return coeffs().isApprox(other.coeffs(), prec); }
/** return the result vector of \a v through the rotation*/
- EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const;
+ EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const;
/** \returns \c *this with scalar type casted to \a NewScalarType
*
@@ -203,6 +203,8 @@ public:
* \li \c Quaternionf for \c float
* \li \c Quaterniond for \c double
*
+ * \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized.
+ *
* \sa class AngleAxis, class Transform
*/
@@ -229,7 +231,7 @@ class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
public:
typedef _Scalar Scalar;
- EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion)
using Base::operator*=;
typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
@@ -339,12 +341,12 @@ class Map<const Quaternion<_Scalar>, _Options >
public:
typedef _Scalar Scalar;
typedef typename internal::traits<Map>::Coefficients Coefficients;
- EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
using Base::operator*=;
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
*
- * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
+ * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order:
* \code *coeffs == {x, y, z, w} \endcode
*
* If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
@@ -376,7 +378,7 @@ class Map<Quaternion<_Scalar>, _Options >
public:
typedef _Scalar Scalar;
typedef typename internal::traits<Map>::Coefficients Coefficients;
- EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
using Base::operator*=;
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
@@ -459,12 +461,12 @@ EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const Quaterni
*/
template <class Derived>
EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
-QuaternionBase<Derived>::_transformVector(Vector3 v) const
+QuaternionBase<Derived>::_transformVector(const Vector3& v) const
{
// Note that this algorithm comes from the optimization by hand
// of the conversion to a Matrix followed by a Matrix/Vector product.
// It appears to be much faster than the common algorithm found
- // in the litterature (30 versus 39 flops). It also requires two
+ // in the literature (30 versus 39 flops). It also requires two
// Vector3 as temporaries.
Vector3 uv = this->vec().cross(v);
uv += uv;
@@ -584,7 +586,7 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri
// which yields a singular value problem
if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
{
- c = max<Scalar>(c,-1);
+ c = (max)(c,Scalar(-1));
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
Vector3 axis = svd.matrixV().col(2);
@@ -635,7 +637,7 @@ inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Der
{
// FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ??
Scalar n2 = this->squaredNorm();
- if (n2 > 0)
+ if (n2 > Scalar(0))
return Quaternion<Scalar>(conjugate().coeffs() / n2);
else
{
@@ -665,12 +667,10 @@ template <class OtherDerived>
inline typename internal::traits<Derived>::Scalar
QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
{
- using std::acos;
+ using std::atan2;
using std::abs;
- double d = abs(this->dot(other));
- if (d>=1.0)
- return Scalar(0);
- return static_cast<Scalar>(2 * acos(d));
+ Quaternion<Scalar> d = (*this) * other.conjugate();
+ return Scalar(2) * atan2( d.vec().norm(), abs(d.w()) );
}
@@ -710,7 +710,7 @@ QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerive
scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
scale1 = sin( ( t * theta) ) / sinTheta;
}
- if(d<0) scale1 = -scale1;
+ if(d<Scalar(0)) scale1 = -scale1;
return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
}
diff --git a/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h b/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h
index 1cac343a5ee..a2d59fce10f 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Rotation2D.h
@@ -60,6 +60,9 @@ public:
/** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
inline Rotation2D(const Scalar& a) : m_angle(a) {}
+
+ /** Default constructor wihtout initialization. The represented rotation is undefined. */
+ Rotation2D() {}
/** \returns the rotation angle */
inline Scalar angle() const { return m_angle; }
@@ -81,10 +84,10 @@ public:
/** Applies the rotation to a 2D vector */
Vector2 operator* (const Vector2& vec) const
{ return toRotationMatrix() * vec; }
-
+
template<typename Derived>
Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
- Matrix2 toRotationMatrix(void) const;
+ Matrix2 toRotationMatrix() const;
/** \returns the spherical interpolation between \c *this and \a other using
* parameter \a t. It is in fact equivalent to a linear interpolation.
diff --git a/extern/Eigen3/Eigen/src/Geometry/Transform.h b/extern/Eigen3/Eigen/src/Geometry/Transform.h
index 498fea41a90..e786e535695 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Transform.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Transform.h
@@ -62,6 +62,8 @@ struct transform_construct_from_matrix;
template<typename TransformType> struct transform_take_affine_part;
+template<int Mode> struct transform_make_affine;
+
} // end namespace internal
/** \geometry_module \ingroup Geometry_Module
@@ -194,9 +196,9 @@ public:
/** type of the matrix used to represent the linear part of the transformation */
typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;
/** type of read/write reference to the linear part of the transformation */
- typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact)> LinearPart;
+ typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart;
/** type of read reference to the linear part of the transformation */
- typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact)> ConstLinearPart;
+ typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> ConstLinearPart;
/** type of read/write reference to the affine part of the transformation */
typedef typename internal::conditional<int(Mode)==int(AffineCompact),
MatrixType&,
@@ -230,8 +232,7 @@ public:
inline Transform()
{
check_template_params();
- if (int(Mode)==Affine)
- makeAffine();
+ internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix);
}
inline Transform(const Transform& other)
@@ -591,11 +592,7 @@ public:
*/
void makeAffine()
{
- if(int(Mode)!=int(AffineCompact))
- {
- matrix().template block<1,Dim>(Dim,0).setZero();
- matrix().coeffRef(Dim,Dim) = Scalar(1);
- }
+ internal::transform_make_affine<int(Mode)>::run(m_matrix);
}
/** \internal
@@ -1079,6 +1076,24 @@ Transform<Scalar,Dim,Mode,Options>::fromPositionOrientationScale(const MatrixBas
namespace internal {
+template<int Mode>
+struct transform_make_affine
+{
+ template<typename MatrixType>
+ static void run(MatrixType &mat)
+ {
+ static const int Dim = MatrixType::ColsAtCompileTime-1;
+ mat.template block<1,Dim>(Dim,0).setZero();
+ mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1);
+ }
+};
+
+template<>
+struct transform_make_affine<AffineCompact>
+{
+ template<typename MatrixType> static void run(MatrixType &) { }
+};
+
// selector needed to avoid taking the inverse of a 3x4 matrix
template<typename TransformType, int Mode=TransformType::Mode>
struct projective_transform_inverse
diff --git a/extern/Eigen3/Eigen/src/Geometry/Umeyama.h b/extern/Eigen3/Eigen/src/Geometry/Umeyama.h
index 345b47e0c37..5e20662f803 100644
--- a/extern/Eigen3/Eigen/src/Geometry/Umeyama.h
+++ b/extern/Eigen3/Eigen/src/Geometry/Umeyama.h
@@ -113,7 +113,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
const Index n = src.cols(); // number of measurements
// required for demeaning ...
- const RealScalar one_over_n = 1 / static_cast<RealScalar>(n);
+ const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n);
// computation of mean
const VectorType src_mean = src.rowwise().sum() * one_over_n;
@@ -136,16 +136,16 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
// Eq. (39)
VectorType S = VectorType::Ones(m);
- if (sigma.determinant()<0) S(m-1) = -1;
+ if (sigma.determinant()<Scalar(0)) S(m-1) = Scalar(-1);
// Eq. (40) and (43)
const VectorType& d = svd.singularValues();
Index rank = 0; for (Index i=0; i<m; ++i) if (!internal::isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
if (rank == m-1) {
- if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) {
+ if ( svd.matrixU().determinant() * svd.matrixV().determinant() > Scalar(0) ) {
Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose();
} else {
- const Scalar s = S(m-1); S(m-1) = -1;
+ const Scalar s = S(m-1); S(m-1) = Scalar(-1);
Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
S(m-1) = s;
}
@@ -156,7 +156,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
if (with_scaling)
{
// Eq. (42)
- const Scalar c = 1/src_var * svd.singularValues().dot(S);
+ const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S);
// Eq. (41)
Rt.col(m).head(m) = dst_mean;
diff --git a/extern/Eigen3/Eigen/src/Householder/BlockHouseholder.h b/extern/Eigen3/Eigen/src/Householder/BlockHouseholder.h
index 1991c652738..60dbea5f56a 100644
--- a/extern/Eigen3/Eigen/src/Householder/BlockHouseholder.h
+++ b/extern/Eigen3/Eigen/src/Householder/BlockHouseholder.h
@@ -48,7 +48,7 @@ void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vec
typedef typename MatrixType::Index Index;
enum { TFactorSize = MatrixType::ColsAtCompileTime };
Index nbVecs = vectors.cols();
- Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize> T(nbVecs,nbVecs);
+ Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, ColMajor> T(nbVecs,nbVecs);
make_block_householder_triangular_factor(T, vectors, hCoeffs);
const TriangularView<const VectorsType, UnitLower>& V(vectors);
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
index 73ca9bfde6a..1f3c060d028 100644
--- a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
@@ -65,10 +65,10 @@ class DiagonalPreconditioner
{
typename MatType::InnerIterator it(mat,j);
while(it && it.index()!=j) ++it;
- if(it && it.index()==j)
+ if(it && it.index()==j && it.value()!=Scalar(0))
m_invdiag(j) = Scalar(1)/it.value();
else
- m_invdiag(j) = 0;
+ m_invdiag(j) = Scalar(1);
}
m_isInitialized = true;
return *this;
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
index 6fc6ab85225..5512219076b 100644
--- a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -39,7 +39,6 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
int maxIters = iters;
int n = mat.cols();
- x = precond.solve(x);
VectorType r = rhs - mat * x;
VectorType r0 = r;
@@ -61,6 +60,7 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
VectorType s(n), t(n);
RealScalar tol2 = tol*tol;
+ RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
int i = 0;
int restarts = 0;
@@ -69,7 +69,7 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
Scalar rho_old = rho;
rho = r0.dot(r);
- if (internal::isMuchSmallerThan(rho,r0_sqnorm))
+ if (abs(rho) < eps2*r0_sqnorm)
{
// The new residual vector became too orthogonal to the arbitrarily choosen direction r0
// Let's restart with a new r0:
@@ -142,7 +142,7 @@ struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
* SparseMatrix<double> A(n,n);
* // fill A and b
* BiCGSTAB<SparseMatrix<double> > solver;
- * solver(A);
+ * solver.compute(A);
* x = solver.solve(b);
* std::cout << "#iterations: " << solver.iterations() << std::endl;
* std::cout << "estimated error: " << solver.error() << std::endl;
@@ -151,20 +151,7 @@ struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * * \code
- * x = VectorXd::Random(n);
- * solver.setMaxIterations(1);
- * int i = 0;
- * do {
- * x = solver.solveWithGuess(b,x);
- * std::cout << i << " : " << solver.error() << std::endl;
- * ++i;
- * } while (solver.info()!=Success && i<100);
- * \endcode
- * Note that such a step by step excution is slightly slower.
+ * One can control the start using the solveWithGuess() method.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
@@ -199,7 +186,8 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- BiCGSTAB(const MatrixType& A) : Base(A) {}
+ template<typename MatrixDerived>
+ explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
~BiCGSTAB() {}
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
index a74a8155e68..1a7e569c806 100644
--- a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -112,9 +112,9 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
* This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
* The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
*
- * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
- * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
- * or Upper. Default is Lower.
+ * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
+ * Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
@@ -137,20 +137,7 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * * \code
- * x = VectorXd::Random(n);
- * cg.setMaxIterations(1);
- * int i = 0;
- * do {
- * x = cg.solveWithGuess(b,x);
- * std::cout << i << " : " << cg.error() << std::endl;
- * ++i;
- * } while (cg.info()!=Success && i<100);
- * \endcode
- * Note that such a step by step excution is slightly slower.
+ * One can control the start using the solveWithGuess() method.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
@@ -189,7 +176,8 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- ConjugateGradient(const MatrixType& A) : Base(A) {}
+ template<typename MatrixDerived>
+ explicit ConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
~ConjugateGradient() {}
@@ -213,6 +201,10 @@ public:
template<typename Rhs,typename Dest>
void _solveWithGuess(const Rhs& b, Dest& x) const
{
+ typedef typename internal::conditional<UpLo==(Lower|Upper),
+ const MatrixType&,
+ SparseSelfAdjointView<const MatrixType, UpLo>
+ >::type MatrixWrapperType;
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
@@ -222,8 +214,7 @@ public:
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
- internal::conjugate_gradient(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
- Base::m_preconditioner, m_iterations, m_error);
+ internal::conjugate_gradient(MatrixWrapperType(*mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
}
m_isInitialized = true;
@@ -234,7 +225,7 @@ public:
template<typename Rhs,typename Dest>
void _solve(const Rhs& b, Dest& x) const
{
- x.setOnes();
+ x.setZero();
_solveWithGuess(b,x);
}
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
index b55afc13636..d3f37fea2a1 100644
--- a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -150,7 +150,6 @@ class IncompleteLUT : internal::noncopyable
{
analyzePattern(amat);
factorize(amat);
- m_isInitialized = m_factorizationIsOk;
return *this;
}
@@ -160,7 +159,7 @@ class IncompleteLUT : internal::noncopyable
template<typename Rhs, typename Dest>
void _solve(const Rhs& b, Dest& x) const
{
- x = m_Pinv * b;
+ x = m_Pinv * b;
x = m_lu.template triangularView<UnitLower>().solve(x);
x = m_lu.template triangularView<Upper>().solve(x);
x = m_P * x;
@@ -223,18 +222,29 @@ template<typename _MatrixType>
void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
{
// Compute the Fill-reducing permutation
+ // Since ILUT does not perform any numerical pivoting,
+ // it is highly preferable to keep the diagonal through symmetric permutations.
+#ifndef EIGEN_MPL2_ONLY
+ // To this end, let's symmetrize the pattern and perform AMD on it.
SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
- // Symmetrize the pattern
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
- AtA.prune(keep_diag());
- internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P); // Then compute the AMD ordering...
-
- m_Pinv = m_P.inverse(); // ... and the inverse permutation
+ AMDOrdering<Index> ordering;
+ ordering(AtA,m_P);
+ m_Pinv = m_P.inverse(); // cache the inverse permutation
+#else
+ // If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine.
+ SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
+ COLAMDOrdering<Index> ordering;
+ ordering(mat1,m_Pinv);
+ m_P = m_Pinv.inverse();
+#endif
m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+ m_isInitialized = false;
}
template <typename Scalar>
@@ -442,6 +452,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
m_lu.makeCompressed();
m_factorizationIsOk = true;
+ m_isInitialized = m_factorizationIsOk;
m_info = Success;
}
diff --git a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
index 2036922d69c..501ef2f8d87 100644
--- a/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
+++ b/extern/Eigen3/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
@@ -49,10 +49,11 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- IterativeSolverBase(const MatrixType& A)
+ template<typename InputDerived>
+ IterativeSolverBase(const EigenBase<InputDerived>& A)
{
init();
- compute(A);
+ compute(A.derived());
}
~IterativeSolverBase() {}
@@ -62,9 +63,11 @@ public:
* Currently, this function mostly call analyzePattern on the preconditioner. In the future
* we might, for instance, implement column reodering for faster matrix vector products.
*/
- Derived& analyzePattern(const MatrixType& A)
+ template<typename InputDerived>
+ Derived& analyzePattern(const EigenBase<InputDerived>& A)
{
- m_preconditioner.analyzePattern(A);
+ grabInput(A.derived());
+ m_preconditioner.analyzePattern(*mp_matrix);
m_isInitialized = true;
m_analysisIsOk = true;
m_info = Success;
@@ -80,11 +83,12 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- Derived& factorize(const MatrixType& A)
+ template<typename InputDerived>
+ Derived& factorize(const EigenBase<InputDerived>& A)
{
+ grabInput(A.derived());
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
- mp_matrix = &A;
- m_preconditioner.factorize(A);
+ m_preconditioner.factorize(*mp_matrix);
m_factorizationIsOk = true;
m_info = Success;
return derived();
@@ -100,10 +104,11 @@ public:
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
- Derived& compute(const MatrixType& A)
+ template<typename InputDerived>
+ Derived& compute(const EigenBase<InputDerived>& A)
{
- mp_matrix = &A;
- m_preconditioner.compute(A);
+ grabInput(A.derived());
+ m_preconditioner.compute(*mp_matrix);
m_isInitialized = true;
m_analysisIsOk = true;
m_factorizationIsOk = true;
@@ -212,6 +217,28 @@ public:
}
protected:
+
+ template<typename InputDerived>
+ void grabInput(const EigenBase<InputDerived>& A)
+ {
+ // we const cast to prevent the creation of a MatrixType temporary by the compiler.
+ grabInput_impl(A.const_cast_derived());
+ }
+
+ template<typename InputDerived>
+ void grabInput_impl(const EigenBase<InputDerived>& A)
+ {
+ m_copyMatrix = A;
+ mp_matrix = &m_copyMatrix;
+ }
+
+ void grabInput_impl(MatrixType& A)
+ {
+ if(MatrixType::RowsAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==Dynamic)
+ m_copyMatrix.resize(0,0);
+ mp_matrix = &A;
+ }
+
void init()
{
m_isInitialized = false;
@@ -220,6 +247,7 @@ protected:
m_maxIterations = -1;
m_tolerance = NumTraits<Scalar>::epsilon();
}
+ MatrixType m_copyMatrix;
const MatrixType* mp_matrix;
Preconditioner m_preconditioner;
diff --git a/extern/Eigen3/Eigen/src/LU/FullPivLU.h b/extern/Eigen3/Eigen/src/LU/FullPivLU.h
index dfe25f424d7..26bc714475c 100644
--- a/extern/Eigen3/Eigen/src/LU/FullPivLU.h
+++ b/extern/Eigen3/Eigen/src/LU/FullPivLU.h
@@ -20,10 +20,11 @@ namespace Eigen {
*
* \param MatrixType the type of the matrix of which we are computing the LU decomposition
*
- * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A
- * is decomposed as A = PLUQ where L is unit-lower-triangular, U is upper-triangular, and P and Q
- * are permutation matrices. This is a rank-revealing LU decomposition. The eigenvalues (diagonal
- * coefficients) of U are sorted in such a way that any zeros are at the end.
+ * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is
+ * decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is
+ * upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU
+ * decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any
+ * zeros are at the end.
*
* This decomposition provides the generic approach to solving systems of linear equations, computing
* the rank, invertibility, inverse, kernel, and determinant.
@@ -373,6 +374,12 @@ template<typename _MatrixType> class FullPivLU
inline Index cols() const { return m_lu.cols(); }
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_lu;
PermutationPType m_p;
PermutationQType m_q;
@@ -417,6 +424,8 @@ FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix)
template<typename MatrixType>
FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
{
+ check_template_parameters();
+
// the permutations are stored as int indices, so just to be sure:
eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
@@ -511,8 +520,8 @@ typename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant
}
/** \returns the matrix represented by the decomposition,
- * i.e., it returns the product: P^{-1} L U Q^{-1}.
- * This function is provided for debug purpose. */
+ * i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$.
+ * This function is provided for debug purposes. */
template<typename MatrixType>
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
{
diff --git a/extern/Eigen3/Eigen/src/LU/PartialPivLU.h b/extern/Eigen3/Eigen/src/LU/PartialPivLU.h
index 740ee694c45..7d1db948c0a 100644
--- a/extern/Eigen3/Eigen/src/LU/PartialPivLU.h
+++ b/extern/Eigen3/Eigen/src/LU/PartialPivLU.h
@@ -171,6 +171,12 @@ template<typename _MatrixType> class PartialPivLU
inline Index cols() const { return m_lu.cols(); }
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_lu;
PermutationType m_p;
TranspositionType m_rowsTranspositions;
@@ -386,6 +392,8 @@ void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, t
template<typename MatrixType>
PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
{
+ check_template_parameters();
+
// the row permutation is stored as int indices, so just to be sure:
eigen_assert(matrix.rows()<NumTraits<int>::highest());
diff --git a/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h b/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h
index 41b4fd7e392..70550b8a90a 100644
--- a/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h
+++ b/extern/Eigen3/Eigen/src/OrderingMethods/Amd.h
@@ -137,22 +137,27 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
degree[i] = len[i]; // degree of node i
}
mark = internal::cs_wclear<Index>(0, 0, w, n); /* clear w */
- elen[n] = -2; /* n is a dead element */
- Cp[n] = -1; /* n is a root of assembly tree */
- w[n] = 0; /* n is a dead element */
/* --- Initialize degree lists ------------------------------------------ */
for(i = 0; i < n; i++)
{
+ bool has_diag = false;
+ for(p = Cp[i]; p<Cp[i+1]; ++p)
+ if(Ci[p]==i)
+ {
+ has_diag = true;
+ break;
+ }
+
d = degree[i];
- if(d == 0) /* node i is empty */
+ if(d == 1 && has_diag) /* node i is empty */
{
elen[i] = -2; /* element i is dead */
nel++;
Cp[i] = -1; /* i is a root of assembly tree */
w[i] = 0;
}
- else if(d > dense) /* node i is dense */
+ else if(d > dense || !has_diag) /* node i is dense or has no structural diagonal element */
{
nv[i] = 0; /* absorb i into element n */
elen[i] = -1; /* node i is dead */
@@ -168,6 +173,10 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
}
}
+ elen[n] = -2; /* n is a dead element */
+ Cp[n] = -1; /* n is a root of assembly tree */
+ w[n] = 0; /* n is a dead element */
+
while (nel < n) /* while (selecting pivots) do */
{
/* --- Select node of minimum approximate degree -------------------- */
diff --git a/extern/Eigen3/Eigen/src/OrderingMethods/Ordering.h b/extern/Eigen3/Eigen/src/OrderingMethods/Ordering.h
index b4da6531a1d..f3c31f9cbfc 100644
--- a/extern/Eigen3/Eigen/src/OrderingMethods/Ordering.h
+++ b/extern/Eigen3/Eigen/src/OrderingMethods/Ordering.h
@@ -109,7 +109,7 @@ class NaturalOrdering
* \class COLAMDOrdering
*
* Functor computing the \em column \em approximate \em minimum \em degree ordering
- * The matrix should be in column-major format
+ * The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()).
*/
template<typename Index>
class COLAMDOrdering
@@ -118,10 +118,14 @@ class COLAMDOrdering
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
typedef Matrix<Index, Dynamic, 1> IndexVector;
- /** Compute the permutation vector form a sparse matrix */
+ /** Compute the permutation vector \a perm form the sparse matrix \a mat
+ * \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
+ */
template <typename MatrixType>
void operator() (const MatrixType& mat, PermutationType& perm)
{
+ eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering");
+
Index m = mat.rows();
Index n = mat.cols();
Index nnz = mat.nonZeros();
@@ -132,12 +136,12 @@ class COLAMDOrdering
Index stats [COLAMD_STATS];
internal::colamd_set_defaults(knobs);
- Index info;
IndexVector p(n+1), A(Alen);
for(Index i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
for(Index i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
// Call Colamd routine to compute the ordering
- info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
+ Index info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
+ EIGEN_UNUSED_VARIABLE(info);
eigen_assert( info && "COLAMD failed " );
perm.resize(n);
diff --git a/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h b/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
index 1c48f0df7b5..18cd7d88aea 100644
--- a/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
+++ b/extern/Eigen3/Eigen/src/PardisoSupport/PardisoSupport.h
@@ -219,7 +219,7 @@ class PardisoImpl
void pardisoInit(int type)
{
m_type = type;
- bool symmetric = abs(m_type) < 10;
+ bool symmetric = std::abs(m_type) < 10;
m_iparm[0] = 1; // No solver default
m_iparm[1] = 3; // use Metis for the ordering
m_iparm[2] = 1; // Numbers of processors, value of OMP_NUM_THREADS
diff --git a/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h b/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h
index bec85810ccc..567eab7cda5 100644
--- a/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h
+++ b/extern/Eigen3/Eigen/src/QR/ColPivHouseholderQR.h
@@ -76,7 +76,8 @@ template<typename _MatrixType> class ColPivHouseholderQR
m_colsTranspositions(),
m_temp(),
m_colSqNorms(),
- m_isInitialized(false) {}
+ m_isInitialized(false),
+ m_usePrescribedThreshold(false) {}
/** \brief Default Constructor with memory preallocation
*
@@ -383,6 +384,12 @@ template<typename _MatrixType> class ColPivHouseholderQR
}
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_qr;
HCoeffsType m_hCoeffs;
PermutationType m_colsPermutation;
@@ -421,6 +428,8 @@ typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDetermina
template<typename MatrixType>
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
+ check_template_parameters();
+
using std::abs;
Index rows = matrix.rows();
Index cols = matrix.cols();
@@ -462,20 +471,10 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
// we store that back into our table: it can't hurt to correct our table.
m_colSqNorms.coeffRef(biggest_col_index) = biggest_col_sq_norm;
- // if the current biggest column is smaller than epsilon times the initial biggest column,
- // terminate to avoid generating nan/inf values.
- // Note that here, if we test instead for "biggest == 0", we get a failure every 1000 (or so)
- // repetitions of the unit test, with the result of solve() filled with large values of the order
- // of 1/(size*epsilon).
- if(biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
- {
+ // Track the number of meaningful pivots but do not stop the decomposition to make
+ // sure that the initial matrix is properly reproduced. See bug 941.
+ if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
m_nonzero_pivots = k;
- m_hCoeffs.tail(size-k).setZero();
- m_qr.bottomRightCorner(rows-k,cols-k)
- .template triangularView<StrictlyLower>()
- .setZero();
- break;
- }
// apply the transposition to the columns
m_colsTranspositions.coeffRef(k) = biggest_col_index;
@@ -504,7 +503,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
}
m_colsPermutation.setIdentity(PermIndexType(cols));
- for(PermIndexType k = 0; k < m_nonzero_pivots; ++k)
+ for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k)
m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k)));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
@@ -554,13 +553,15 @@ struct solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
} // end namespace internal
-/** \returns the matrix Q as a sequence of householder transformations */
+/** \returns the matrix Q as a sequence of householder transformations.
+ * You can extract the meaningful part only by using:
+ * \code qr.householderQ().setLength(qr.nonzeroPivots()) \endcode*/
template<typename MatrixType>
typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType>
::householderQ() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
- return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()).setLength(m_nonzero_pivots);
+ return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
}
/** \return the column-pivoting Householder QR decomposition of \c *this.
diff --git a/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h b/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h
index 6168e7abfb4..0b39966e145 100644
--- a/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/extern/Eigen3/Eigen/src/QR/FullPivHouseholderQR.h
@@ -368,6 +368,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
RealScalar maxPivot() const { return m_maxpivot; }
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_qr;
HCoeffsType m_hCoeffs;
IntDiagSizeVectorType m_rows_transpositions;
@@ -407,6 +413,8 @@ typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDetermin
template<typename MatrixType>
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
+ check_template_parameters();
+
using std::abs;
Index rows = matrix.rows();
Index cols = matrix.cols();
diff --git a/extern/Eigen3/Eigen/src/QR/HouseholderQR.h b/extern/Eigen3/Eigen/src/QR/HouseholderQR.h
index abc61bcbbe1..343a6649934 100644
--- a/extern/Eigen3/Eigen/src/QR/HouseholderQR.h
+++ b/extern/Eigen3/Eigen/src/QR/HouseholderQR.h
@@ -189,6 +189,12 @@ template<typename _MatrixType> class HouseholderQR
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_qr;
HCoeffsType m_hCoeffs;
RowVectorType m_temp;
@@ -251,56 +257,62 @@ void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename
}
/** \internal */
-template<typename MatrixQR, typename HCoeffs>
-void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
- typename MatrixQR::Index maxBlockSize=32,
- typename MatrixQR::Scalar* tempData = 0)
+template<typename MatrixQR, typename HCoeffs,
+ typename MatrixQRScalar = typename MatrixQR::Scalar,
+ bool InnerStrideIsOne = (MatrixQR::InnerStrideAtCompileTime == 1 && HCoeffs::InnerStrideAtCompileTime == 1)>
+struct householder_qr_inplace_blocked
{
- typedef typename MatrixQR::Index Index;
- typedef typename MatrixQR::Scalar Scalar;
- typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
-
- Index rows = mat.rows();
- Index cols = mat.cols();
- Index size = (std::min)(rows, cols);
-
- typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
- TempType tempVector;
- if(tempData==0)
+ // This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h
+ static void run(MatrixQR& mat, HCoeffs& hCoeffs,
+ typename MatrixQR::Index maxBlockSize=32,
+ typename MatrixQR::Scalar* tempData = 0)
{
- tempVector.resize(cols);
- tempData = tempVector.data();
- }
-
- Index blockSize = (std::min)(maxBlockSize,size);
+ typedef typename MatrixQR::Index Index;
+ typedef typename MatrixQR::Scalar Scalar;
+ typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
- Index k = 0;
- for (k = 0; k < size; k += blockSize)
- {
- Index bs = (std::min)(size-k,blockSize); // actual size of the block
- Index tcols = cols - k - bs; // trailing columns
- Index brows = rows-k; // rows of the block
+ Index rows = mat.rows();
+ Index cols = mat.cols();
+ Index size = (std::min)(rows, cols);
- // partition the matrix:
- // A00 | A01 | A02
- // mat = A10 | A11 | A12
- // A20 | A21 | A22
- // and performs the qr dec of [A11^T A12^T]^T
- // and update [A21^T A22^T]^T using level 3 operations.
- // Finally, the algorithm continue on A22
-
- BlockType A11_21 = mat.block(k,k,brows,bs);
- Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs);
+ typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
+ TempType tempVector;
+ if(tempData==0)
+ {
+ tempVector.resize(cols);
+ tempData = tempVector.data();
+ }
- householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData);
+ Index blockSize = (std::min)(maxBlockSize,size);
- if(tcols)
+ Index k = 0;
+ for (k = 0; k < size; k += blockSize)
{
- BlockType A21_22 = mat.block(k,k+bs,brows,tcols);
- apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint());
+ Index bs = (std::min)(size-k,blockSize); // actual size of the block
+ Index tcols = cols - k - bs; // trailing columns
+ Index brows = rows-k; // rows of the block
+
+ // partition the matrix:
+ // A00 | A01 | A02
+ // mat = A10 | A11 | A12
+ // A20 | A21 | A22
+ // and performs the qr dec of [A11^T A12^T]^T
+ // and update [A21^T A22^T]^T using level 3 operations.
+ // Finally, the algorithm continue on A22
+
+ BlockType A11_21 = mat.block(k,k,brows,bs);
+ Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs);
+
+ householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData);
+
+ if(tcols)
+ {
+ BlockType A21_22 = mat.block(k,k+bs,brows,tcols);
+ apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint());
+ }
}
}
-}
+};
template<typename _MatrixType, typename Rhs>
struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
@@ -343,6 +355,8 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
template<typename MatrixType>
HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
+ check_template_parameters();
+
Index rows = matrix.rows();
Index cols = matrix.cols();
Index size = (std::min)(rows,cols);
@@ -352,7 +366,7 @@ HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType&
m_temp.resize(cols);
- internal::householder_qr_inplace_blocked(m_qr, m_hCoeffs, 48, m_temp.data());
+ internal::householder_qr_inplace_blocked<MatrixType, HCoeffsType>::run(m_qr, m_hCoeffs, 48, m_temp.data());
m_isInitialized = true;
return *this;
diff --git a/extern/Eigen3/Eigen/src/QR/HouseholderQR_MKL.h b/extern/Eigen3/Eigen/src/QR/HouseholderQR_MKL.h
index 5313de604d2..b80f1b48dac 100644
--- a/extern/Eigen3/Eigen/src/QR/HouseholderQR_MKL.h
+++ b/extern/Eigen3/Eigen/src/QR/HouseholderQR_MKL.h
@@ -34,28 +34,30 @@
#ifndef EIGEN_QR_MKL_H
#define EIGEN_QR_MKL_H
-#include "Eigen/src/Core/util/MKL_support.h"
+#include "../Core/util/MKL_support.h"
namespace Eigen {
-namespace internal {
+ namespace internal {
-/** \internal Specialization for the data types supported by MKL */
+ /** \internal Specialization for the data types supported by MKL */
#define EIGEN_MKL_QR_NOPIV(EIGTYPE, MKLTYPE, MKLPREFIX) \
template<typename MatrixQR, typename HCoeffs> \
-void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, \
- typename MatrixQR::Index maxBlockSize=32, \
- EIGTYPE* tempData = 0) \
+struct householder_qr_inplace_blocked<MatrixQR, HCoeffs, EIGTYPE, true> \
{ \
- lapack_int m = mat.rows(); \
- lapack_int n = mat.cols(); \
- lapack_int lda = mat.outerStride(); \
- lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
- LAPACKE_##MKLPREFIX##geqrf( matrix_order, m, n, (MKLTYPE*)mat.data(), lda, (MKLTYPE*)hCoeffs.data()); \
- hCoeffs.adjointInPlace(); \
-\
-}
+ static void run(MatrixQR& mat, HCoeffs& hCoeffs, \
+ typename MatrixQR::Index = 32, \
+ typename MatrixQR::Scalar* = 0) \
+ { \
+ lapack_int m = (lapack_int) mat.rows(); \
+ lapack_int n = (lapack_int) mat.cols(); \
+ lapack_int lda = (lapack_int) mat.outerStride(); \
+ lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
+ LAPACKE_##MKLPREFIX##geqrf( matrix_order, m, n, (MKLTYPE*)mat.data(), lda, (MKLTYPE*)hCoeffs.data()); \
+ hCoeffs.adjointInPlace(); \
+ } \
+};
EIGEN_MKL_QR_NOPIV(double, double, d)
EIGEN_MKL_QR_NOPIV(float, float, s)
diff --git a/extern/Eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/extern/Eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
index a2cc2a9e261..36138101d74 100644
--- a/extern/Eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
+++ b/extern/Eigen3/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
@@ -47,7 +47,7 @@ namespace Eigen {
* You can then apply it to a vector.
*
* R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix.
- * NOTE : The Index type of R is always UF_long. You can get it with SPQR::Index
+ * NOTE : The Index type of R is always SuiteSparse_long. You can get it with SPQR::Index
*
* \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
* NOTE
@@ -59,24 +59,18 @@ class SPQR
public:
typedef typename _MatrixType::Scalar Scalar;
typedef typename _MatrixType::RealScalar RealScalar;
- typedef UF_long Index ;
+ typedef SuiteSparse_long Index ;
typedef SparseMatrix<Scalar, ColMajor, Index> MatrixType;
typedef PermutationMatrix<Dynamic, Dynamic> PermutationType;
public:
SPQR()
- : m_isInitialized(false),
- m_ordering(SPQR_ORDERING_DEFAULT),
- m_allow_tol(SPQR_DEFAULT_TOL),
- m_tolerance (NumTraits<Scalar>::epsilon())
+ : m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
}
- SPQR(const _MatrixType& matrix)
- : m_isInitialized(false),
- m_ordering(SPQR_ORDERING_DEFAULT),
- m_allow_tol(SPQR_DEFAULT_TOL),
- m_tolerance (NumTraits<Scalar>::epsilon())
+ SPQR(const _MatrixType& matrix)
+ : m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
compute(matrix);
@@ -101,10 +95,26 @@ class SPQR
if(m_isInitialized) SPQR_free();
MatrixType mat(matrix);
+
+ /* Compute the default threshold as in MatLab, see:
+ * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
+ * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
+ */
+ RealScalar pivotThreshold = m_tolerance;
+ if(m_useDefaultThreshold)
+ {
+ using std::max;
+ RealScalar max2Norm = 0.0;
+ for (int j = 0; j < mat.cols(); j++) max2Norm = (max)(max2Norm, mat.col(j).norm());
+ if(max2Norm==RealScalar(0))
+ max2Norm = RealScalar(1);
+ pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits<RealScalar>::epsilon();
+ }
+
cholmod_sparse A;
A = viewAsCholmod(mat);
Index col = matrix.cols();
- m_rank = SuiteSparseQR<Scalar>(m_ordering, m_tolerance, col, &A,
+ m_rank = SuiteSparseQR<Scalar>(m_ordering, pivotThreshold, col, &A,
&m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);
if (!m_cR)
@@ -120,7 +130,7 @@ class SPQR
/**
* Get the number of rows of the input matrix and the Q matrix
*/
- inline Index rows() const {return m_H->nrow; }
+ inline Index rows() const {return m_cR->nrow; }
/**
* Get the number of columns of the input matrix.
@@ -145,16 +155,25 @@ class SPQR
{
eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
eigen_assert(b.cols()==1 && "This method is for vectors only");
-
+
//Compute Q^T * b
- typename Dest::PlainObject y;
+ typename Dest::PlainObject y, y2;
y = matrixQ().transpose() * b;
- // Solves with the triangular matrix R
+
+ // Solves with the triangular matrix R
Index rk = this->rank();
- y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y.topRows(rk));
- y.bottomRows(cols()-rk).setZero();
+ y2 = y;
+ y.resize((std::max)(cols(),Index(y.rows())),y.cols());
+ y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y2.topRows(rk));
+
// Apply the column permutation
- dest.topRows(cols()) = colsPermutation() * y.topRows(cols());
+ // colsPermutation() performs a copy of the permutation,
+ // so let's apply it manually:
+ for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i);
+ for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero();
+
+// y.bottomRows(y.rows()-rk).setZero();
+// dest = colsPermutation() * y.topRows(cols());
m_info = Success;
}
@@ -197,7 +216,11 @@ class SPQR
/// Set the fill-reducing ordering method to be used
void setSPQROrdering(int ord) { m_ordering = ord;}
/// Set the tolerance tol to treat columns with 2-norm < =tol as zero
- void setPivotThreshold(const RealScalar& tol) { m_tolerance = tol; }
+ void setPivotThreshold(const RealScalar& tol)
+ {
+ m_useDefaultThreshold = false;
+ m_tolerance = tol;
+ }
/** \returns a pointer to the SPQR workspace */
cholmod_common *cholmodCommon() const { return &m_cc; }
@@ -230,6 +253,7 @@ class SPQR
mutable cholmod_dense *m_HTau; // The Householder coefficients
mutable Index m_rank; // The rank of the matrix
mutable cholmod_common m_cc; // Workspace and parameters
+ bool m_useDefaultThreshold; // Use default threshold
template<typename ,typename > friend struct SPQR_QProduct;
};
diff --git a/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h b/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h
index f44995cd39c..1b29774190a 100644
--- a/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h
+++ b/extern/Eigen3/Eigen/src/SVD/JacobiSVD.h
@@ -375,17 +375,19 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
Scalar z;
JacobiRotation<Scalar> rot;
RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
+
if(n==0)
{
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z;
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
if(work_matrix.coeff(q,q)!=Scalar(0))
+ {
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
- else
- z = Scalar(0);
- work_matrix.row(q) *= z;
- if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ // otherwise the second row is already zero, so we have nothing to do.
}
else
{
@@ -415,6 +417,7 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
JacobiRotation<RealScalar> *j_right)
{
using std::sqrt;
+ using std::abs;
Matrix<RealScalar,2,2> m;
m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
@@ -428,9 +431,11 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
}
else
{
- RealScalar u = d / t;
- rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
- rot1.s() = rot1.c() * u;
+ RealScalar t2d2 = numext::hypot(t,d);
+ rot1.c() = abs(t)/t2d2;
+ rot1.s() = d/t2d2;
+ if(t<RealScalar(0))
+ rot1.s() = -rot1.s();
}
m.applyOnTheLeft(0,1,rot1);
j_right->makeJacobi(m,0,1);
@@ -531,8 +536,9 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
JacobiSVD()
: m_isInitialized(false),
m_isAllocated(false),
+ m_usePrescribedThreshold(false),
m_computationOptions(0),
- m_rows(-1), m_cols(-1)
+ m_rows(-1), m_cols(-1), m_diagSize(0)
{}
@@ -545,6 +551,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
: m_isInitialized(false),
m_isAllocated(false),
+ m_usePrescribedThreshold(false),
m_computationOptions(0),
m_rows(-1), m_cols(-1)
{
@@ -564,6 +571,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
: m_isInitialized(false),
m_isAllocated(false),
+ m_usePrescribedThreshold(false),
m_computationOptions(0),
m_rows(-1), m_cols(-1)
{
@@ -665,23 +673,92 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
return m_nonzeroSingularValues;
}
+
+ /** \returns the rank of the matrix of which \c *this is the SVD.
+ *
+ * \note This method has to determine which singular values should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline Index rank() const
+ {
+ using std::abs;
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ if(m_singularValues.size()==0) return 0;
+ RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold();
+ Index i = m_nonzeroSingularValues-1;
+ while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
+ return i+1;
+ }
+
+ /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
+ * which need to determine when singular values are to be considered nonzero.
+ * This is not used for the SVD decomposition itself.
+ *
+ * When it needs to get the threshold value, Eigen calls threshold().
+ * The default is \c NumTraits<Scalar>::epsilon()
+ *
+ * \param threshold The new value to use as the threshold.
+ *
+ * A singular value will be considered nonzero if its value is strictly greater than
+ * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
+ *
+ * If you want to come back to the default behavior, call setThreshold(Default_t)
+ */
+ JacobiSVD& setThreshold(const RealScalar& threshold)
+ {
+ m_usePrescribedThreshold = true;
+ m_prescribedThreshold = threshold;
+ return *this;
+ }
+
+ /** Allows to come back to the default behavior, letting Eigen use its default formula for
+ * determining the threshold.
+ *
+ * You should pass the special object Eigen::Default as parameter here.
+ * \code svd.setThreshold(Eigen::Default); \endcode
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ JacobiSVD& setThreshold(Default_t)
+ {
+ m_usePrescribedThreshold = false;
+ return *this;
+ }
+
+ /** Returns the threshold that will be used by certain methods such as rank().
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ RealScalar threshold() const
+ {
+ eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+ return m_usePrescribedThreshold ? m_prescribedThreshold
+ : (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();
+ }
inline Index rows() const { return m_rows; }
inline Index cols() const { return m_cols; }
private:
void allocate(Index rows, Index cols, unsigned int computationOptions);
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
protected:
MatrixUType m_matrixU;
MatrixVType m_matrixV;
SingularValuesType m_singularValues;
WorkMatrixType m_workMatrix;
- bool m_isInitialized, m_isAllocated;
+ bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
bool m_computeFullU, m_computeThinU;
bool m_computeFullV, m_computeThinV;
unsigned int m_computationOptions;
Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
+ RealScalar m_prescribedThreshold;
template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
friend struct internal::svd_precondition_2x2_block_to_be_real;
@@ -690,6 +767,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
+ MatrixType m_scaledMatrix;
};
template<typename MatrixType, int QRPreconditioner>
@@ -736,14 +814,17 @@ void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, u
: 0);
m_workMatrix.resize(m_diagSize, m_diagSize);
- if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
- if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
+ if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
+ if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
+ if(m_cols!=m_cols) m_scaledMatrix.resize(rows,cols);
}
template<typename MatrixType, int QRPreconditioner>
JacobiSVD<MatrixType, QRPreconditioner>&
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
+ check_template_parameters();
+
using std::abs;
allocate(matrix.rows(), matrix.cols(), computationOptions);
@@ -754,11 +835,21 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
// limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
+ // Scaling factor to reduce over/under-flows
+ RealScalar scale = matrix.cwiseAbs().maxCoeff();
+ if(scale==RealScalar(0)) scale = RealScalar(1);
+
/*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
- if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix))
+ if(m_rows!=m_cols)
{
- m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize);
+ m_scaledMatrix = matrix / scale;
+ m_qr_precond_morecols.run(*this, m_scaledMatrix);
+ m_qr_precond_morerows.run(*this, m_scaledMatrix);
+ }
+ else
+ {
+ m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale;
if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
@@ -784,7 +875,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
using std::max;
RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
abs(m_workMatrix.coeff(q,q))));
- if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
+ // We compare both values to threshold instead of calling max to be robust to NaN (See bug 791)
+ if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
{
finished = false;
@@ -833,6 +925,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
}
}
+
+ m_singularValues *= scale;
m_isInitialized = true;
return *this;
@@ -854,11 +948,11 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
// So A^{-1} = V S^{-1} U^*
Matrix<Scalar, Dynamic, Rhs::ColsAtCompileTime, 0, _MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime> tmp;
- Index nonzeroSingVals = dec().nonzeroSingularValues();
+ Index rank = dec().rank();
- tmp.noalias() = dec().matrixU().leftCols(nonzeroSingVals).adjoint() * rhs();
- tmp = dec().singularValues().head(nonzeroSingVals).asDiagonal().inverse() * tmp;
- dst = dec().matrixV().leftCols(nonzeroSingVals) * tmp;
+ tmp.noalias() = dec().matrixU().leftCols(rank).adjoint() * rhs();
+ tmp = dec().singularValues().head(rank).asDiagonal().inverse() * tmp;
+ dst = dec().matrixV().leftCols(rank) * tmp;
}
};
} // end namespace internal
diff --git a/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h b/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
index f41d7e010f7..e1f96ba5a14 100644
--- a/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
+++ b/extern/Eigen3/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -37,6 +37,7 @@ class SimplicialCholeskyBase : internal::noncopyable
{
public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
+ typedef typename internal::traits<Derived>::OrderingType OrderingType;
enum { UpLo = internal::traits<Derived>::UpLo };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
@@ -240,15 +241,16 @@ class SimplicialCholeskyBase : internal::noncopyable
RealScalar m_shiftScale;
};
-template<typename _MatrixType, int _UpLo = Lower> class SimplicialLLT;
-template<typename _MatrixType, int _UpLo = Lower> class SimplicialLDLT;
-template<typename _MatrixType, int _UpLo = Lower> class SimplicialCholesky;
+template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLLT;
+template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLDLT;
+template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialCholesky;
namespace internal {
-template<typename _MatrixType, int _UpLo> struct traits<SimplicialLLT<_MatrixType,_UpLo> >
+template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >
{
typedef _MatrixType MatrixType;
+ typedef _Ordering OrderingType;
enum { UpLo = _UpLo };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
@@ -259,9 +261,10 @@ template<typename _MatrixType, int _UpLo> struct traits<SimplicialLLT<_MatrixTyp
static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
};
-template<typename _MatrixType,int _UpLo> struct traits<SimplicialLDLT<_MatrixType,_UpLo> >
+template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
{
typedef _MatrixType MatrixType;
+ typedef _Ordering OrderingType;
enum { UpLo = _UpLo };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
@@ -272,9 +275,10 @@ template<typename _MatrixType,int _UpLo> struct traits<SimplicialLDLT<_MatrixTyp
static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
};
-template<typename _MatrixType, int _UpLo> struct traits<SimplicialCholesky<_MatrixType,_UpLo> >
+template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
{
typedef _MatrixType MatrixType;
+ typedef _Ordering OrderingType;
enum { UpLo = _UpLo };
};
@@ -294,11 +298,12 @@ template<typename _MatrixType, int _UpLo> struct traits<SimplicialCholesky<_Matr
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower.
+ * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>
*
- * \sa class SimplicialLDLT
+ * \sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering
*/
-template<typename _MatrixType, int _UpLo>
- class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo> >
+template<typename _MatrixType, int _UpLo, typename _Ordering>
+ class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >
{
public:
typedef _MatrixType MatrixType;
@@ -382,11 +387,12 @@ public:
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower.
+ * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>
*
- * \sa class SimplicialLLT
+ * \sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering
*/
-template<typename _MatrixType, int _UpLo>
- class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo> >
+template<typename _MatrixType, int _UpLo, typename _Ordering>
+ class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
{
public:
typedef _MatrixType MatrixType;
@@ -467,8 +473,8 @@ public:
*
* \sa class SimplicialLDLT, class SimplicialLLT
*/
-template<typename _MatrixType, int _UpLo>
- class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo> >
+template<typename _MatrixType, int _UpLo, typename _Ordering>
+ class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
{
public:
typedef _MatrixType MatrixType;
@@ -612,15 +618,13 @@ void SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, CholMatrixTy
{
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
- // TODO allows to configure the permutation
// Note that amd compute the inverse permutation
{
CholMatrixType C;
C = a.template selfadjointView<UpLo>();
- // remove diagonal entries:
- // seems not to be needed
- // C.prune(keep_diag());
- internal::minimum_degree_ordering(C, m_Pinv);
+
+ OrderingType ordering;
+ ordering(C,m_Pinv);
}
if(m_Pinv.size()>0)
diff --git a/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h b/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h
index 17fff96a78b..220c6451cb9 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h
@@ -69,7 +69,7 @@ class AmbiVector
delete[] m_buffer;
if (size<1000)
{
- Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
+ Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
m_buffer = new Scalar[allocSize];
}
@@ -88,7 +88,7 @@ class AmbiVector
Index copyElements = m_allocatedElements;
m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
Index allocSize = m_allocatedElements * sizeof(ListEl);
- allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
+ allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
Scalar* newBuffer = new Scalar[allocSize];
memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
delete[] m_buffer;
diff --git a/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h b/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h
index 3321fab4a8a..a667cb56ebe 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h
@@ -51,8 +51,8 @@ class CompressedStorage
CompressedStorage& operator=(const CompressedStorage& other)
{
resize(other.size());
- memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
- memcpy(m_indices, other.m_indices, m_size * sizeof(Index));
+ internal::smart_copy(other.m_values, other.m_values + m_size, m_values);
+ internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
return *this;
}
@@ -83,10 +83,10 @@ class CompressedStorage
reallocate(m_size);
}
- void resize(size_t size, float reserveSizeFactor = 0)
+ void resize(size_t size, double reserveSizeFactor = 0)
{
if (m_allocatedSize<size)
- reallocate(size + size_t(reserveSizeFactor*size));
+ reallocate(size + size_t(reserveSizeFactor*double(size)));
m_size = size;
}
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h b/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h
index 16a20a5744e..0c90bafbeab 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h
@@ -57,6 +57,16 @@ public:
inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
{}
+
+ inline const Scalar coeff(int row, int col) const
+ {
+ return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 : m_outerStart);
+ }
+
+ inline const Scalar coeff(int index) const
+ {
+ return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
+ }
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
@@ -68,6 +78,8 @@ public:
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
+ private:
+ Index nonZeros() const;
};
@@ -82,6 +94,7 @@ class BlockImpl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true
typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
+ typedef Block<const SparseMatrixType, BlockRows, BlockCols, true> ConstBlockType;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
@@ -223,6 +236,118 @@ public:
else
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
}
+
+ inline Scalar& coeffRef(int row, int col)
+ {
+ return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
+ }
+
+ inline const Scalar coeff(int row, int col) const
+ {
+ return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
+ }
+
+ inline const Scalar coeff(int index) const
+ {
+ return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
+ }
+
+ const Scalar& lastCoeff() const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl);
+ eigen_assert(nonZeros()>0);
+ if(m_matrix.isCompressed())
+ return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
+ else
+ return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ typename SparseMatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
+
+};
+
+
+template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols>
+class BlockImpl<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse>
+ : public SparseMatrixBase<Block<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true> >
+{
+ typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
+ typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
+ typedef Block<const SparseMatrixType, BlockRows, BlockCols, true> BlockType;
+public:
+ enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+ EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+protected:
+ enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
+public:
+
+ class InnerIterator: public SparseMatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const BlockType& xpr, Index outer)
+ : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+ class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator
+ {
+ public:
+ inline ReverseInnerIterator(const BlockType& xpr, Index outer)
+ : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline BlockImpl(const SparseMatrixType& xpr, int i)
+ : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
+ {}
+
+ inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+ : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
+ {}
+
+ inline const Scalar* valuePtr() const
+ { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* innerIndexPtr() const
+ { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* outerIndexPtr() const
+ { return m_matrix.outerIndexPtr() + m_outerStart; }
+
+ Index nonZeros() const
+ {
+ if(m_matrix.isCompressed())
+ return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
+ - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]);
+ else if(m_outerSize.value()==0)
+ return 0;
+ else
+ return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
+ }
+
+ inline const Scalar coeff(int row, int col) const
+ {
+ return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
+ }
+
+ inline const Scalar coeff(int index) const
+ {
+ return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
+ }
const Scalar& lastCoeff() const
{
@@ -265,7 +390,8 @@ const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatri
* is col-major (resp. row-major).
*/
template<typename Derived>
-Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
+typename SparseMatrixBase<Derived>::InnerVectorsReturnType
+SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
{
return Block<Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
@@ -277,7 +403,8 @@ Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Inde
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
-const Block<const Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
+const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
+SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
{
return Block<const Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
@@ -304,8 +431,8 @@ public:
: m_matrix(xpr),
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
- m_blockRows(xpr.rows()),
- m_blockCols(xpr.cols())
+ m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
+ m_blockCols(BlockCols==1 ? 1 : xpr.cols())
{}
/** Dynamic-size constructor
@@ -407,3 +534,4 @@ public:
} // end namespace Eigen
#endif // EIGEN_SPARSE_BLOCK_H
+
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index ec86ca933c2..4ca9128337f 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -73,7 +73,8 @@ class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
BinaryOp,Lhs,Rhs, InnerIterator> Base;
- EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer)
+ // NOTE: we have to prefix Index by "typename Lhs::" to avoid an ICE with VC11
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename Lhs::Index outer)
: Base(binOp.derived(),outer)
{}
};
@@ -313,10 +314,10 @@ SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& othe
template<typename Derived>
template<typename OtherDerived>
-EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type
SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
{
- return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived());
+ return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());
}
} // end namespace Eigen
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
index 54fd633a10c..ccb6ae7b788 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -19,7 +19,10 @@ template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductRet
template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
{
- typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type;
+ typedef typename internal::conditional<
+ Lhs::IsRowMajor,
+ SparseDenseOuterProduct<Rhs,Lhs,true>,
+ SparseDenseOuterProduct<Lhs,Rhs,false> >::type Type;
};
template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
@@ -29,7 +32,10 @@ template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductRet
template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
{
- typedef SparseDenseOuterProduct<Rhs,Lhs,true> Type;
+ typedef typename internal::conditional<
+ Rhs::IsRowMajor,
+ SparseDenseOuterProduct<Rhs,Lhs,true>,
+ SparseDenseOuterProduct<Lhs,Rhs,false> >::type Type;
};
namespace internal {
@@ -114,17 +120,30 @@ class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNes
typedef typename SparseDenseOuterProduct::Index Index;
public:
EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
- : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
- {
- }
+ : Base(prod.lhs(), 0), m_outer(outer), m_factor(get(prod.rhs(), outer, typename internal::traits<Rhs>::StorageKind() ))
+ { }
inline Index outer() const { return m_outer; }
- inline Index row() const { return Transpose ? Base::row() : m_outer; }
- inline Index col() const { return Transpose ? m_outer : Base::row(); }
+ inline Index row() const { return Transpose ? m_outer : Base::index(); }
+ inline Index col() const { return Transpose ? Base::index() : m_outer; }
inline Scalar value() const { return Base::value() * m_factor; }
protected:
+ static Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense())
+ {
+ return rhs.coeff(outer);
+ }
+
+ static Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse())
+ {
+ typename Traits::_RhsNested::InnerIterator it(rhs, outer);
+ if (it && it.index()==0)
+ return it.value();
+
+ return Scalar(0);
+ }
+
Index m_outer;
Scalar m_factor;
};
@@ -161,7 +180,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
typename Res::Scalar tmp(0);
for(LhsInnerIterator it(lhs,j); it ;++it)
tmp += it.value() * rhs.coeff(it.index(),c);
- res.coeffRef(j,c) = alpha * tmp;
+ res.coeffRef(j,c) += alpha * tmp;
}
}
}
@@ -287,15 +306,6 @@ class DenseTimeSparseProduct
DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
};
-// sparse * dense
-template<typename Derived>
-template<typename OtherDerived>
-inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
-SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
-{
- return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
-}
-
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
index 01ce0dcfee3..2ff2015512f 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
@@ -691,7 +691,8 @@ class SparseMatrix
m_data.swap(other.m_data);
}
- /** Sets *this to the identity matrix */
+ /** Sets *this to the identity matrix.
+ * This function also turns the matrix into compressed mode, and drop any reserved memory. */
inline void setIdentity()
{
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
@@ -699,6 +700,8 @@ class SparseMatrix
Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
+ std::free(m_innerNonZeros);
+ m_innerNonZeros = 0;
}
inline SparseMatrix& operator=(const SparseMatrix& other)
{
@@ -940,7 +943,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
enum { IsRowMajor = SparseMatrixType::IsRowMajor };
typedef typename SparseMatrixType::Scalar Scalar;
typedef typename SparseMatrixType::Index Index;
- SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
+ SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
if(begin!=end)
{
@@ -1178,7 +1181,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
size_t p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
- float reallocRatio = 1;
+ double reallocRatio = 1;
if (m_data.allocatedSize()<=m_data.size())
{
// if there is no preallocated memory, let's reserve a minimum of 32 elements
@@ -1190,13 +1193,13 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
{
// we need to reallocate the data, to reduce multiple reallocations
// we use a smart resize algorithm based on the current filling ratio
- // in addition, we use float to avoid integers overflows
- float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
- reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
+ // in addition, we use double to avoid integers overflows
+ double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
+ reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
// furthermore we bound the realloc ratio to:
// 1) reduce multiple minor realloc when the matrix is almost filled
// 2) avoid to allocate too much memory when the matrix is almost empty
- reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
+ reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
}
}
m_data.resize(m_data.size()+1,reallocRatio);
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
index bbcf7fb1c62..9341d9ad2c0 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -23,7 +23,14 @@ namespace Eigen {
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
*/
-template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
+template<typename Derived> class SparseMatrixBase
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ : public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
+ typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
+ EigenBase<Derived> >
+#else
+ : public EigenBase<Derived>
+#endif // not EIGEN_PARSED_BY_DOXYGEN
{
public:
@@ -36,7 +43,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
>::type PacketReturnType;
typedef SparseMatrixBase StorageBaseType;
- typedef EigenBase<Derived> Base;
template<typename OtherDerived>
Derived& operator=(const EigenBase<OtherDerived> &other)
@@ -132,6 +138,9 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
inline Derived& derived() { return *static_cast<Derived*>(this); }
inline Derived& const_cast_derived() const
{ return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
+
+ typedef internal::special_scalar_op_base<Derived, Scalar, RealScalar, EigenBase<Derived> > Base;
+ using Base::operator*;
#endif // not EIGEN_PARSED_BY_DOXYGEN
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
@@ -317,20 +326,18 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
Derived& operator*=(const Scalar& other);
Derived& operator/=(const Scalar& other);
- #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
- CwiseBinaryOp< \
- internal::scalar_product_op< \
- typename internal::scalar_product_traits< \
- typename internal::traits<Derived>::Scalar, \
- typename internal::traits<OtherDerived>::Scalar \
- >::ReturnType \
- >, \
- const Derived, \
- const OtherDerived \
- >
+ template<typename OtherDerived> struct CwiseProductDenseReturnType {
+ typedef CwiseBinaryOp<internal::scalar_product_op<typename internal::scalar_product_traits<
+ typename internal::traits<Derived>::Scalar,
+ typename internal::traits<OtherDerived>::Scalar
+ >::ReturnType>,
+ const Derived,
+ const OtherDerived
+ > Type;
+ };
template<typename OtherDerived>
- EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+ EIGEN_STRONG_INLINE const typename CwiseProductDenseReturnType<OtherDerived>::Type
cwiseProduct(const MatrixBase<OtherDerived> &other) const;
// sparse * sparse
@@ -358,7 +365,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
/** sparse * dense (returns a dense object unless it is an outer product) */
template<typename OtherDerived>
const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
- operator*(const MatrixBase<OtherDerived> &other) const;
+ operator*(const MatrixBase<OtherDerived> &other) const
+ { return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived()); }
/** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
@@ -403,8 +411,10 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
const ConstInnerVectorReturnType innerVector(Index outer) const;
// set of inner-vectors
- Block<Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize);
- const Block<const Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize) const;
+ typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
+ typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
+ InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
+ const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
/** \internal use operator= */
template<typename DenseDerived>
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h b/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h
index b85be93f6f9..75e21000959 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h
@@ -61,7 +61,7 @@ struct permut_sparsematrix_product_retval
for(Index j=0; j<m_matrix.outerSize(); ++j)
{
Index jp = m_permutation.indices().coeff(j);
- sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).size();
+ sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros();
}
tmp.reserve(sizes);
for(Index j=0; j<m_matrix.outerSize(); ++j)
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h b/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h
index 7c300ee8dbc..76d031d52c8 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h
@@ -26,7 +26,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
};
-// NOTE: VC10 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
+// NOTE: VC10 and VC11 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
// a typedef typename TransposeImpl<MatrixType,Sparse>::Index Index;
// does not fix the issue.
// An alternative is to define the nested class in the parent class itself.
@@ -40,8 +40,8 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerItera
EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
: Base(trans.derived().nestedExpression(), outer)
{}
- Index row() const { return Base::col(); }
- Index col() const { return Base::row(); }
+ typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
+ typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
};
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
@@ -54,8 +54,8 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInn
EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
: Base(xpr.derived().nestedExpression(), outer)
{}
- Index row() const { return Base::col(); }
- Index col() const { return Base::row(); }
+ typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
+ typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
};
} // end namespace Eigen
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h b/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h
index 05023858b16..d627546def0 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h
@@ -67,7 +67,6 @@ const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
-template<typename Derived> class SparseMatrixBase;
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
@@ -84,8 +83,10 @@ template<typename Lhs, typename Rhs> class DenseTimeSparseProduct;
template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
-template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct DenseSparseProductReturnType;
-template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct SparseDenseProductReturnType;
+template<typename Lhs, typename Rhs,
+ int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
+template<typename Lhs, typename Rhs,
+ int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
namespace internal {
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h b/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h
index 7e15c814b6f..49865d0e72f 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h
@@ -158,6 +158,7 @@ class SparseVector
Index inner = IsColVector ? row : col;
Index outer = IsColVector ? col : row;
+ EIGEN_ONLY_USED_FOR_DEBUG(outer);
eigen_assert(outer==0);
return insert(inner);
}
diff --git a/extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h b/extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h
index cb8ad82b4f6..ccc12af7962 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h
@@ -69,7 +69,7 @@ struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
for(int i=lhs.rows()-1 ; i>=0 ; --i)
{
Scalar tmp = other.coeff(i,col);
- Scalar l_ii = 0;
+ Scalar l_ii(0);
typename Lhs::InnerIterator it(lhs, i);
while(it && it.index()<i)
++it;
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU.h
index 1d592f2c8c7..bdc4f193ddb 100644
--- a/extern/Eigen3/Eigen/src/SparseLU/SparseLU.h
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU.h
@@ -260,16 +260,16 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
// Initialize with the determinant of the row matrix
Scalar det = Scalar(1.);
- //Note that the diagonal blocks of U are stored in supernodes,
+ // Note that the diagonal blocks of U are stored in supernodes,
// which are available in the L part :)
for (Index j = 0; j < this->cols(); ++j)
{
for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
{
- if(it.row() < j) continue;
- if(it.row() == j)
+ if(it.index() == j)
{
- det *= (std::abs)(it.value());
+ using std::abs;
+ det *= abs(it.value());
break;
}
}
@@ -296,7 +296,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
if(it.row() < j) continue;
if(it.row() == j)
{
- det += (std::log)((std::abs)(it.value()));
+ using std::log; using std::abs;
+ det += log(abs(it.value()));
break;
}
}
@@ -304,21 +305,64 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
return det;
}
- /** \returns A number representing the sign of the determinant
- *
- * \sa absDeterminant(), logAbsDeterminant()
- */
- Scalar signDeterminant()
- {
- eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
- return Scalar(m_detPermR);
- }
+ /** \returns A number representing the sign of the determinant
+ *
+ * \sa absDeterminant(), logAbsDeterminant()
+ */
+ Scalar signDeterminant()
+ {
+ eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+ // Initialize with the determinant of the row matrix
+ Index det = 1;
+ // Note that the diagonal blocks of U are stored in supernodes,
+ // which are available in the L part :)
+ for (Index j = 0; j < this->cols(); ++j)
+ {
+ for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
+ {
+ if(it.index() == j)
+ {
+ if(it.value()<0)
+ det = -det;
+ else if(it.value()==0)
+ return 0;
+ break;
+ }
+ }
+ }
+ return det * m_detPermR * m_detPermC;
+ }
+
+ /** \returns The determinant of the matrix.
+ *
+ * \sa absDeterminant(), logAbsDeterminant()
+ */
+ Scalar determinant()
+ {
+ eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+ // Initialize with the determinant of the row matrix
+ Scalar det = Scalar(1.);
+ // Note that the diagonal blocks of U are stored in supernodes,
+ // which are available in the L part :)
+ for (Index j = 0; j < this->cols(); ++j)
+ {
+ for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
+ {
+ if(it.index() == j)
+ {
+ det *= it.value();
+ break;
+ }
+ }
+ }
+ return det * Scalar(m_detPermR * m_detPermC);
+ }
protected:
// Functions
void initperfvalues()
{
- m_perfv.panel_size = 1;
+ m_perfv.panel_size = 16;
m_perfv.relax = 1;
m_perfv.maxsuper = 128;
m_perfv.rowblk = 16;
@@ -346,8 +390,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
// values for performance
internal::perfvalues<Index> m_perfv;
RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
- Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
- Index m_detPermR; // Determinant of the coefficient matrix
+ Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
+ Index m_detPermR, m_detPermC; // Determinants of the permutation matrices
private:
// Disable copy constructor
SparseLU (const SparseLU& );
@@ -623,7 +667,8 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
}
// Update the determinant of the row permutation matrix
- if (pivrow != jj) m_detPermR *= -1;
+ // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot.
+ if (pivrow != jj) m_detPermR = -m_detPermR;
// Prune columns (0:jj-1) using column jj
Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
@@ -638,10 +683,13 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
jcol += panel_size; // Move to the next panel
} // end for -- end elimination
+ m_detPermR = m_perm_r.determinant();
+ m_detPermC = m_perm_c.determinant();
+
// Count the number of nonzeros in factors
Base::countnz(n, m_nnzL, m_nnzU, m_glu);
// Apply permutation to the L subscripts
- Base::fixupL(n, m_perm_r.indices(), m_glu);
+ Base::fixupL(n, m_perm_r.indices(), m_glu);
// Create supernode matrix L
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
@@ -701,8 +749,8 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator
}
else
{
- Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
- Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
+ Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
+ Map< Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
U = A.template triangularView<Upper>().solve(U);
}
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLUImpl.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLUImpl.h
index 14d70897df7..99d651e40d3 100644
--- a/extern/Eigen3/Eigen/src/SparseLU/SparseLUImpl.h
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLUImpl.h
@@ -21,6 +21,8 @@ class SparseLUImpl
{
public:
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
+ typedef Matrix<Scalar,Dynamic,Dynamic,ColMajor> ScalarMatrix;
+ typedef Map<ScalarMatrix, 0, OuterStride<> > MappedMatrixBlock;
typedef Matrix<Index,Dynamic,1> IndexVector;
typedef typename ScalarVector::RealScalar RealScalar;
typedef Ref<Matrix<Scalar,Dynamic,1> > BlockScalarVector;
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Memory.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Memory.h
index 1ffa7d54e96..45f96d16a8e 100644
--- a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Memory.h
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_Memory.h
@@ -153,8 +153,8 @@ Index SparseLUImpl<Scalar,Index>::memInit(Index m, Index n, Index annz, Index lw
{
Index& num_expansions = glu.num_expansions; //No memory expansions so far
num_expansions = 0;
- glu.nzumax = glu.nzlumax = (std::min)(fillratio * annz / n, m) * n; // estimated number of nonzeros in U
- glu.nzlmax = (std::max)(Index(4), fillratio) * annz / 4; // estimated nnz in L factor
+ glu.nzumax = glu.nzlumax = (std::min)(fillratio * (annz+1) / n, m) * n; // estimated number of nonzeros in U
+ glu.nzlmax = (std::max)(Index(4), fillratio) * (annz+1) / 4; // estimated nnz in L factor
// Return the estimated size to the user if necessary
Index tempSpace;
tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
index ad6f2183fed..54a56940861 100644
--- a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
@@ -189,8 +189,8 @@ class MappedSuperNodalMatrix<Scalar,Index>::InnerIterator
m_idval(mat.colIndexPtr()[outer]),
m_startidval(m_idval),
m_endidval(mat.colIndexPtr()[outer+1]),
- m_idrow(mat.rowIndexPtr()[outer]),
- m_endidrow(mat.rowIndexPtr()[outer+1])
+ m_idrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]]),
+ m_endidrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]+1])
{}
inline InnerIterator& operator++()
{
@@ -236,7 +236,7 @@ void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) con
Index n = X.rows();
Index nrhs = X.cols();
const Scalar * Lval = valuePtr(); // Nonzero values
- Matrix<Scalar,Dynamic,Dynamic> work(n, nrhs); // working vector
+ Matrix<Scalar,Dynamic,Dynamic, ColMajor> work(n, nrhs); // working vector
work.setZero();
for (Index k = 0; k <= nsuper(); k ++)
{
@@ -267,12 +267,12 @@ void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) con
Index lda = colIndexPtr()[fsupc+1] - luptr;
// Triangular solve
- Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
- Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
+ Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );
+ Map< Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
U = A.template triangularView<UnitLower>().solve(U);
// Matrix-vector product
- new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
+ new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
work.block(0, 0, nrow, nrhs) = A * U;
//Begin Scatter
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h
index f24bd87d3e9..cacc7e98712 100644
--- a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_column_bmod.h
@@ -162,11 +162,11 @@ Index SparseLUImpl<Scalar,Index>::column_bmod(const Index jcol, const Index nseg
// points to the beginning of jcol in snode L\U(jsupno)
ufirst = glu.xlusup(jcol) + d_fsupc;
Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
- Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
+ MappedMatrixBlock A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc);
u = A.template triangularView<UnitLower>().solve(u);
- new (&A) Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
+ new (&A) MappedMatrixBlock ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow);
l.noalias() -= A * u;
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
index 0d0283b132b..6af02675429 100644
--- a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
@@ -56,7 +56,7 @@ EIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const int segsi
// Dense triangular solve -- start effective triangle
luptr += lda * no_zeros + no_zeros;
// Form Eigen matrix and vector
- Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );
+ Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );
Map<Matrix<Scalar,SegSizeAtCompileTime,1> > u(tempv.data(), segsize);
u = A.template triangularView<UnitLower>().solve(u);
@@ -65,7 +65,7 @@ EIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const int segsi
luptr += segsize;
const Index PacketSize = internal::packet_traits<Scalar>::size;
Index ldl = internal::first_multiple(nrow, PacketSize);
- Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
+ Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
Index aligned_offset = internal::first_aligned(tempv.data()+segsize, PacketSize);
Index aligned_with_B_offset = (PacketSize-internal::first_aligned(B.data(), PacketSize))%PacketSize;
Map<Matrix<Scalar,Dynamic,1>, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) );
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_bmod.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_bmod.h
index da0e0fc3c60..9d2ff290635 100644
--- a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_bmod.h
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_panel_bmod.h
@@ -102,7 +102,7 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const Index m, const Index w, const
if(nsupc >= 2)
{
Index ldu = internal::first_multiple<Index>(u_rows, PacketSize);
- Map<Matrix<Scalar,Dynamic,Dynamic>, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
+ Map<ScalarMatrix, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
// gather U
Index u_col = 0;
@@ -136,17 +136,17 @@ void SparseLUImpl<Scalar,Index>::panel_bmod(const Index m, const Index w, const
Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);
no_zeros = (krep - u_rows + 1) - fsupc;
luptr += lda * no_zeros + no_zeros;
- Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
+ MappedMatrixBlock A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
U = A.template triangularView<UnitLower>().solve(U);
// update
luptr += u_rows;
- Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
+ MappedMatrixBlock B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
eigen_assert(tempv.size()>w*ldu + nrow*w + 1);
Index ldl = internal::first_multiple<Index>(nrow, PacketSize);
Index offset = (PacketSize-internal::first_aligned(B.data(), PacketSize)) % PacketSize;
- Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
+ MappedMatrixBlock L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
L.setZero();
internal::sparselu_gemm<Scalar>(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride());
diff --git a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h
index ddcd4ec98f8..2e49ef667f4 100644
--- a/extern/Eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h
+++ b/extern/Eigen3/Eigen/src/SparseLU/SparseLU_pivotL.h
@@ -71,13 +71,14 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
// Determine the largest abs numerical value for partial pivoting
Index diagind = iperm_c(jcol); // diagonal index
- RealScalar pivmax = 0.0;
+ RealScalar pivmax(-1.0);
Index pivptr = nsupc;
Index diag = emptyIdxLU;
RealScalar rtemp;
Index isub, icol, itemp, k;
for (isub = nsupc; isub < nsupr; ++isub) {
- rtemp = std::abs(lu_col_ptr[isub]);
+ using std::abs;
+ rtemp = abs(lu_col_ptr[isub]);
if (rtemp > pivmax) {
pivmax = rtemp;
pivptr = isub;
@@ -86,8 +87,9 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
}
// Test for singularity
- if ( pivmax == 0.0 ) {
- pivrow = lsub_ptr[pivptr];
+ if ( pivmax <= RealScalar(0.0) ) {
+ // if pivmax == -1, the column is structurally empty, otherwise it is only numerically zero
+ pivrow = pivmax < RealScalar(0.0) ? diagind : lsub_ptr[pivptr];
perm_r(pivrow) = jcol;
return (jcol+1);
}
@@ -101,7 +103,8 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
if (diag >= 0 )
{
// Diagonal element exists
- rtemp = std::abs(lu_col_ptr[diag]);
+ using std::abs;
+ rtemp = abs(lu_col_ptr[diag]);
if (rtemp != 0.0 && rtemp >= thresh) pivptr = diag;
}
pivrow = lsub_ptr[pivptr];
diff --git a/extern/Eigen3/Eigen/src/SparseQR/SparseQR.h b/extern/Eigen3/Eigen/src/SparseQR/SparseQR.h
index afda43bfc67..a00bd5db124 100644
--- a/extern/Eigen3/Eigen/src/SparseQR/SparseQR.h
+++ b/extern/Eigen3/Eigen/src/SparseQR/SparseQR.h
@@ -2,7 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2012-2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
-// Copyright (C) 2012-2013 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -58,6 +58,7 @@ namespace internal {
* \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module
* OrderingMethods \endlink module for the list of built-in and external ordering methods.
*
+ * \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
*
*/
template<typename _MatrixType, typename _OrderingType>
@@ -74,13 +75,26 @@ class SparseQR
typedef Matrix<Scalar, Dynamic, 1> ScalarVector;
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
public:
- SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false)
+ SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
{ }
- SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false)
+ /** Construct a QR factorization of the matrix \a mat.
+ *
+ * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
+ *
+ * \sa compute()
+ */
+ SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
{
compute(mat);
}
+
+ /** Computes the QR factorization of the sparse matrix \a mat.
+ *
+ * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
+ *
+ * \sa analyzePattern(), factorize()
+ */
void compute(const MatrixType& mat)
{
analyzePattern(mat);
@@ -166,7 +180,7 @@ class SparseQR
y.bottomRows(y.rows()-rank).setZero();
// Apply the column permutation
- if (m_perm_c.size()) dest.topRows(cols()) = colsPermutation() * y.topRows(cols());
+ if (m_perm_c.size()) dest = colsPermutation() * y.topRows(cols());
else dest = y.topRows(cols());
m_info = Success;
@@ -206,7 +220,7 @@ class SparseQR
/** \brief Reports whether previous computation was successful.
*
- * \returns \c Success if computation was succesful,
+ * \returns \c Success if computation was successful,
* \c NumericalIssue if the QR factorization reports a numerical problem
* \c InvalidInput if the input matrix is invalid
*
@@ -248,6 +262,7 @@ class SparseQR
IndexVector m_etree; // Column elimination tree
IndexVector m_firstRowElt; // First element in each row
bool m_isQSorted; // whether Q is sorted or not
+ bool m_isEtreeOk; // whether the elimination tree match the initial input matrix
template <typename, typename > friend struct SparseQR_QProduct;
template <typename > friend struct SparseQRMatrixQReturnType;
@@ -256,19 +271,25 @@ class SparseQR
/** \brief Preprocessing step of a QR factorization
*
+ * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
+ *
* In this step, the fill-reducing permutation is computed and applied to the columns of A
- * and the column elimination tree is computed as well. Only the sparcity pattern of \a mat is exploited.
+ * and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited.
*
* \note In this step it is assumed that there is no empty row in the matrix \a mat.
*/
template <typename MatrixType, typename OrderingType>
void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
{
+ eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR");
+ // Copy to a column major matrix if the input is rowmajor
+ typename internal::conditional<MatrixType::IsRowMajor,QRMatrixType,const MatrixType&>::type matCpy(mat);
// Compute the column fill reducing ordering
OrderingType ord;
- ord(mat, m_perm_c);
+ ord(matCpy, m_perm_c);
Index n = mat.cols();
Index m = mat.rows();
+ Index diagSize = (std::min)(m,n);
if (!m_perm_c.size())
{
@@ -278,22 +299,23 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
// Compute the column elimination tree of the permuted matrix
m_outputPerm_c = m_perm_c.inverse();
- internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
+ internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
+ m_isEtreeOk = true;
- m_R.resize(n, n);
- m_Q.resize(m, n);
+ m_R.resize(m, n);
+ m_Q.resize(m, diagSize);
// Allocate space for nonzero elements : rough estimation
m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
m_Q.reserve(2*mat.nonZeros());
- m_hcoeffs.resize(n);
+ m_hcoeffs.resize(diagSize);
m_analysisIsok = true;
}
/** \brief Performs the numerical QR factorization of the input matrix
*
* The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with
- * a matrix having the same sparcity pattern than \a mat.
+ * a matrix having the same sparsity pattern than \a mat.
*
* \param mat The sparse column-major matrix
*/
@@ -306,23 +328,47 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
Index m = mat.rows();
Index n = mat.cols();
- IndexVector mark(m); mark.setConstant(-1); // Record the visited nodes
- IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q
- Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
- ScalarVector tval(m); // The dense vector used to compute the current column
- bool found_diag;
-
+ Index diagSize = (std::min)(m,n);
+ IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes
+ IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q
+ Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q
+ ScalarVector tval(m); // The dense vector used to compute the current column
+ RealScalar pivotThreshold = m_threshold;
+
+ m_R.setZero();
+ m_Q.setZero();
m_pmat = mat;
+ if(!m_isEtreeOk)
+ {
+ m_outputPerm_c = m_perm_c.inverse();
+ internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
+ m_isEtreeOk = true;
+ }
+
m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
+
// Apply the fill-in reducing permutation lazily:
- for (int i = 0; i < n; i++)
{
- Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
- m_pmat.outerIndexPtr()[p] = mat.outerIndexPtr()[i];
- m_pmat.innerNonZeroPtr()[p] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i];
+ // If the input is row major, copy the original column indices,
+ // otherwise directly use the input matrix
+ //
+ IndexVector originalOuterIndicesCpy;
+ const Index *originalOuterIndices = mat.outerIndexPtr();
+ if(MatrixType::IsRowMajor)
+ {
+ originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1);
+ originalOuterIndices = originalOuterIndicesCpy.data();
+ }
+
+ for (int i = 0; i < n; i++)
+ {
+ Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
+ m_pmat.outerIndexPtr()[p] = originalOuterIndices[i];
+ m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i];
+ }
}
- /* Compute the default threshold, see :
+ /* Compute the default threshold as in MatLab, see:
* Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
* Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
*/
@@ -330,33 +376,35 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
{
RealScalar max2Norm = 0.0;
for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm());
- m_threshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
+ if(max2Norm==RealScalar(0))
+ max2Norm = RealScalar(1);
+ pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
}
// Initialize the numerical permutation
m_pivotperm.setIdentity(n);
Index nonzeroCol = 0; // Record the number of valid pivots
-
+ m_Q.startVec(0);
+
// Left looking rank-revealing QR factorization: compute a column of R and Q at a time
- for (Index col = 0; col < (std::min)(n,m); ++col)
+ for (Index col = 0; col < n; ++col)
{
mark.setConstant(-1);
m_R.startVec(col);
- m_Q.startVec(col);
mark(nonzeroCol) = col;
Qidx(0) = nonzeroCol;
nzcolR = 0; nzcolQ = 1;
- found_diag = col>=m;
+ bool found_diag = nonzeroCol>=m;
tval.setZero();
// Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,
// all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k.
// Note: if the diagonal entry does not exist, then its contribution must be explicitly added,
// thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found.
- for (typename MatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
+ for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
{
- Index curIdx = nonzeroCol ;
+ Index curIdx = nonzeroCol;
if(itp) curIdx = itp.row();
if(curIdx == nonzeroCol) found_diag = true;
@@ -398,7 +446,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
// Browse all the indexes of R(:,col) in reverse order
for (Index i = nzcolR-1; i >= 0; i--)
{
- Index curIdx = m_pivotperm.indices()(Ridx(i));
+ Index curIdx = Ridx(i);
// Apply the curIdx-th householder vector to the current column (temporarily stored into tval)
Scalar tdot(0);
@@ -427,33 +475,36 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
}
}
} // End update current column
-
- // Compute the Householder reflection that eliminate the current column
- // FIXME this step should call the Householder module.
- Scalar tau;
- RealScalar beta;
- Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);
- // First, the squared norm of Q((col+1):m, col)
- RealScalar sqrNorm = 0.;
- for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
+ Scalar tau = 0;
+ RealScalar beta = 0;
- if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
- {
- tau = RealScalar(0);
- beta = numext::real(c0);
- tval(Qidx(0)) = 1;
- }
- else
+ if(nonzeroCol < diagSize)
{
- beta = std::sqrt(numext::abs2(c0) + sqrNorm);
- if(numext::real(c0) >= RealScalar(0))
- beta = -beta;
- tval(Qidx(0)) = 1;
- for (Index itq = 1; itq < nzcolQ; ++itq)
- tval(Qidx(itq)) /= (c0 - beta);
- tau = numext::conj((beta-c0) / beta);
-
+ // Compute the Householder reflection that eliminate the current column
+ // FIXME this step should call the Householder module.
+ Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);
+
+ // First, the squared norm of Q((col+1):m, col)
+ RealScalar sqrNorm = 0.;
+ for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
+ if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
+ {
+ beta = numext::real(c0);
+ tval(Qidx(0)) = 1;
+ }
+ else
+ {
+ using std::sqrt;
+ beta = sqrt(numext::abs2(c0) + sqrNorm);
+ if(numext::real(c0) >= RealScalar(0))
+ beta = -beta;
+ tval(Qidx(0)) = 1;
+ for (Index itq = 1; itq < nzcolQ; ++itq)
+ tval(Qidx(itq)) /= (c0 - beta);
+ tau = numext::conj((beta-c0) / beta);
+
+ }
}
// Insert values in R
@@ -467,45 +518,49 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
}
}
- if(abs(beta) >= m_threshold)
+ if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold)
{
m_R.insertBackByOuterInner(col, nonzeroCol) = beta;
- nonzeroCol++;
// The householder coefficient
- m_hcoeffs(col) = tau;
+ m_hcoeffs(nonzeroCol) = tau;
// Record the householder reflections
for (Index itq = 0; itq < nzcolQ; ++itq)
{
Index iQ = Qidx(itq);
- m_Q.insertBackByOuterInnerUnordered(col,iQ) = tval(iQ);
+ m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ);
tval(iQ) = Scalar(0.);
- }
+ }
+ nonzeroCol++;
+ if(nonzeroCol<diagSize)
+ m_Q.startVec(nonzeroCol);
}
else
{
// Zero pivot found: move implicitly this column to the end
- m_hcoeffs(col) = Scalar(0);
for (Index j = nonzeroCol; j < n-1; j++)
std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]);
// Recompute the column elimination tree
internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
+ m_isEtreeOk = false;
}
}
+ m_hcoeffs.tail(diagSize-nonzeroCol).setZero();
+
// Finalize the column pointers of the sparse matrices R and Q
m_Q.finalize();
m_Q.makeCompressed();
m_R.finalize();
m_R.makeCompressed();
m_isQSorted = false;
-
+
m_nonzeropivots = nonzeroCol;
if(nonzeroCol<n)
{
// Permute the triangular factor to put the 'dead' columns to the end
- MatrixType tempR(m_R);
+ QRMatrixType tempR(m_R);
m_R = tempR * m_pivotperm;
// Update the column permutation
@@ -561,14 +616,16 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
template<typename DesType>
void evalTo(DesType& res) const
{
+ Index m = m_qr.rows();
Index n = m_qr.cols();
+ Index diagSize = (std::min)(m,n);
res = m_other;
if (m_transpose)
{
eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
//Compute res = Q' * other column by column
for(Index j = 0; j < res.cols(); j++){
- for (Index k = 0; k < n; k++)
+ for (Index k = 0; k < diagSize; k++)
{
Scalar tau = Scalar(0);
tau = m_qr.m_Q.col(k).dot(res.col(j));
@@ -581,10 +638,10 @@ struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived
else
{
eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
- // Compute res = Q' * other column by column
+ // Compute res = Q * other column by column
for(Index j = 0; j < res.cols(); j++)
{
- for (Index k = n-1; k >=0; k--)
+ for (Index k = diagSize-1; k >=0; k--)
{
Scalar tau = Scalar(0);
tau = m_qr.m_Q.col(k).dot(res.col(j));
@@ -618,7 +675,7 @@ struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<Sp
return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
}
inline Index rows() const { return m_qr.rows(); }
- inline Index cols() const { return m_qr.cols(); }
+ inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); }
// To use for operations with the transpose of Q
SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const
{
diff --git a/extern/Eigen3/Eigen/src/StlSupport/StdDeque.h b/extern/Eigen3/Eigen/src/StlSupport/StdDeque.h
index 4ee8e5c10a5..aaf66330b17 100644
--- a/extern/Eigen3/Eigen/src/StlSupport/StdDeque.h
+++ b/extern/Eigen3/Eigen/src/StlSupport/StdDeque.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_STDDEQUE_H
#define EIGEN_STDDEQUE_H
-#include "Eigen/src/StlSupport/details.h"
+#include "details.h"
// Define the explicit instantiation (e.g. necessary for the Intel compiler)
#if defined(__INTEL_COMPILER) || defined(__GNUC__)
diff --git a/extern/Eigen3/Eigen/src/StlSupport/StdList.h b/extern/Eigen3/Eigen/src/StlSupport/StdList.h
index 627381ecec0..3c742430c12 100644
--- a/extern/Eigen3/Eigen/src/StlSupport/StdList.h
+++ b/extern/Eigen3/Eigen/src/StlSupport/StdList.h
@@ -10,7 +10,7 @@
#ifndef EIGEN_STDLIST_H
#define EIGEN_STDLIST_H
-#include "Eigen/src/StlSupport/details.h"
+#include "details.h"
// Define the explicit instantiation (e.g. necessary for the Intel compiler)
#if defined(__INTEL_COMPILER) || defined(__GNUC__)
diff --git a/extern/Eigen3/Eigen/src/StlSupport/StdVector.h b/extern/Eigen3/Eigen/src/StlSupport/StdVector.h
index 40a9abefa82..611664a2e8a 100644
--- a/extern/Eigen3/Eigen/src/StlSupport/StdVector.h
+++ b/extern/Eigen3/Eigen/src/StlSupport/StdVector.h
@@ -11,7 +11,7 @@
#ifndef EIGEN_STDVECTOR_H
#define EIGEN_STDVECTOR_H
-#include "Eigen/src/StlSupport/details.h"
+#include "details.h"
/**
* This section contains a convenience MACRO which allows an easy specialization of
diff --git a/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h b/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
index 3a48cecf769..29c60c37875 100644
--- a/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
+++ b/extern/Eigen3/Eigen/src/UmfPackSupport/UmfPackSupport.h
@@ -107,6 +107,16 @@ inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *N
return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
}
+namespace internal {
+ template<typename T> struct umfpack_helper_is_sparse_plain : false_type {};
+ template<typename Scalar, int Options, typename StorageIndex>
+ struct umfpack_helper_is_sparse_plain<SparseMatrix<Scalar,Options,StorageIndex> >
+ : true_type {};
+ template<typename Scalar, int Options, typename StorageIndex>
+ struct umfpack_helper_is_sparse_plain<MappedSparseMatrix<Scalar,Options,StorageIndex> >
+ : true_type {};
+}
+
/** \ingroup UmfPackSupport_Module
* \brief A sparse LU factorization and solver based on UmfPack
*
@@ -192,10 +202,14 @@ class UmfPackLU : internal::noncopyable
* Note that the matrix should be column-major, and in compressed format for best performance.
* \sa SparseMatrix::makeCompressed().
*/
- void compute(const MatrixType& matrix)
+ template<typename InputMatrixType>
+ void compute(const InputMatrixType& matrix)
{
- analyzePattern(matrix);
- factorize(matrix);
+ if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
+ if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
+ grapInput(matrix.derived());
+ analyzePattern_impl();
+ factorize_impl();
}
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
@@ -230,23 +244,15 @@ class UmfPackLU : internal::noncopyable
*
* \sa factorize(), compute()
*/
- void analyzePattern(const MatrixType& matrix)
+ template<typename InputMatrixType>
+ void analyzePattern(const InputMatrixType& matrix)
{
- if(m_symbolic)
- umfpack_free_symbolic(&m_symbolic,Scalar());
- if(m_numeric)
- umfpack_free_numeric(&m_numeric,Scalar());
+ if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
+ if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar());
- grapInput(matrix);
-
- int errorCode = 0;
- errorCode = umfpack_symbolic(matrix.rows(), matrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
- &m_symbolic, 0, 0);
+ grapInput(matrix.derived());
- m_isInitialized = true;
- m_info = errorCode ? InvalidInput : Success;
- m_analysisIsOk = true;
- m_factorizationIsOk = false;
+ analyzePattern_impl();
}
/** Performs a numeric decomposition of \a matrix
@@ -255,20 +261,16 @@ class UmfPackLU : internal::noncopyable
*
* \sa analyzePattern(), compute()
*/
- void factorize(const MatrixType& matrix)
+ template<typename InputMatrixType>
+ void factorize(const InputMatrixType& matrix)
{
eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
if(m_numeric)
umfpack_free_numeric(&m_numeric,Scalar());
- grapInput(matrix);
-
- int errorCode;
- errorCode = umfpack_numeric(m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
- m_symbolic, &m_numeric, 0, 0);
-
- m_info = errorCode ? NumericalIssue : Success;
- m_factorizationIsOk = true;
+ grapInput(matrix.derived());
+
+ factorize_impl();
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -283,19 +285,20 @@ class UmfPackLU : internal::noncopyable
protected:
-
void init()
{
- m_info = InvalidInput;
- m_isInitialized = false;
- m_numeric = 0;
- m_symbolic = 0;
- m_outerIndexPtr = 0;
- m_innerIndexPtr = 0;
- m_valuePtr = 0;
+ m_info = InvalidInput;
+ m_isInitialized = false;
+ m_numeric = 0;
+ m_symbolic = 0;
+ m_outerIndexPtr = 0;
+ m_innerIndexPtr = 0;
+ m_valuePtr = 0;
+ m_extractedDataAreDirty = true;
}
- void grapInput(const MatrixType& mat)
+ template<typename InputMatrixType>
+ void grapInput_impl(const InputMatrixType& mat, internal::true_type)
{
m_copyMatrix.resize(mat.rows(), mat.cols());
if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::Index)!=sizeof(int) || !mat.isCompressed() )
@@ -313,6 +316,45 @@ class UmfPackLU : internal::noncopyable
m_valuePtr = mat.valuePtr();
}
}
+
+ template<typename InputMatrixType>
+ void grapInput_impl(const InputMatrixType& mat, internal::false_type)
+ {
+ m_copyMatrix = mat;
+ m_outerIndexPtr = m_copyMatrix.outerIndexPtr();
+ m_innerIndexPtr = m_copyMatrix.innerIndexPtr();
+ m_valuePtr = m_copyMatrix.valuePtr();
+ }
+
+ template<typename InputMatrixType>
+ void grapInput(const InputMatrixType& mat)
+ {
+ grapInput_impl(mat, internal::umfpack_helper_is_sparse_plain<InputMatrixType>());
+ }
+
+ void analyzePattern_impl()
+ {
+ int errorCode = 0;
+ errorCode = umfpack_symbolic(m_copyMatrix.rows(), m_copyMatrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
+ &m_symbolic, 0, 0);
+
+ m_isInitialized = true;
+ m_info = errorCode ? InvalidInput : Success;
+ m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+ m_extractedDataAreDirty = true;
+ }
+
+ void factorize_impl()
+ {
+ int errorCode;
+ errorCode = umfpack_numeric(m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
+ m_symbolic, &m_numeric, 0, 0);
+
+ m_info = errorCode ? NumericalIssue : Success;
+ m_factorizationIsOk = true;
+ m_extractedDataAreDirty = true;
+ }
// cached data to reduce reallocation, etc.
mutable LUMatrixType m_l;
diff --git a/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h
index 5c8c476eecf..1951286f3ae 100644
--- a/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h
+++ b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseBinaryOps.h
@@ -70,6 +70,43 @@ max
return (max)(Derived::PlainObject::Constant(rows(), cols(), other));
}
+
+#define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived> \
+OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
+{ \
+ return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived>(derived(), other.derived()); \
+}\
+typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \
+typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \
+EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \
+OP(const Scalar& s) const { \
+ return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \
+} \
+friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \
+OP(const Scalar& s, const Derived& d) { \
+ return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \
+}
+
+#define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived> \
+OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
+{ \
+ return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived>(other.derived(), derived()); \
+} \
+\
+inline const RCmp ## RCOMPARATOR ## ReturnType \
+OP(const Scalar& s) const { \
+ return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \
+} \
+friend inline const Cmp ## RCOMPARATOR ## ReturnType \
+OP(const Scalar& s, const Derived& d) { \
+ return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \
+}
+
+
/** \returns an expression of the coefficient-wise \< operator of *this and \a other
*
* Example: \include Cwise_less.cpp
@@ -77,7 +114,7 @@ max
*
* \sa all(), any(), operator>(), operator<=()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less)
+EIGEN_MAKE_CWISE_COMP_OP(operator<, LT)
/** \returns an expression of the coefficient-wise \<= operator of *this and \a other
*
@@ -86,7 +123,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less)
*
* \sa all(), any(), operator>=(), operator<()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal)
+EIGEN_MAKE_CWISE_COMP_OP(operator<=, LE)
/** \returns an expression of the coefficient-wise \> operator of *this and \a other
*
@@ -95,7 +132,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal)
*
* \sa all(), any(), operator>=(), operator<()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater)
+EIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT)
/** \returns an expression of the coefficient-wise \>= operator of *this and \a other
*
@@ -104,7 +141,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater)
*
* \sa all(), any(), operator>(), operator<=()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal)
+EIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE)
/** \returns an expression of the coefficient-wise == operator of *this and \a other
*
@@ -118,7 +155,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal)
*
* \sa all(), any(), isApprox(), isMuchSmallerThan()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to)
+EIGEN_MAKE_CWISE_COMP_OP(operator==, EQ)
/** \returns an expression of the coefficient-wise != operator of *this and \a other
*
@@ -132,7 +169,10 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to)
*
* \sa all(), any(), isApprox(), isMuchSmallerThan()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator!=,std::not_equal_to)
+EIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ)
+
+#undef EIGEN_MAKE_CWISE_COMP_OP
+#undef EIGEN_MAKE_CWISE_COMP_R_OP
// scalar addition
@@ -209,3 +249,5 @@ operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived());
}
+
+
diff --git a/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h
index a596367906f..1c3ed3fcd70 100644
--- a/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h
+++ b/extern/Eigen3/Eigen/src/plugins/ArrayCwiseUnaryOps.h
@@ -185,19 +185,3 @@ cube() const
{
return derived();
}
-
-#define EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(METHOD_NAME,FUNCTOR) \
- inline const CwiseUnaryOp<std::binder2nd<FUNCTOR<Scalar> >, const Derived> \
- METHOD_NAME(const Scalar& s) const { \
- return CwiseUnaryOp<std::binder2nd<FUNCTOR<Scalar> >, const Derived> \
- (derived(), std::bind2nd(FUNCTOR<Scalar>(), s)); \
- }
-
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator==, std::equal_to)
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator!=, std::not_equal_to)
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<, std::less)
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<=, std::less_equal)
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>, std::greater)
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>=, std::greater_equal)
-
-
diff --git a/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h
index 7f62149e04b..c4a042b7027 100644
--- a/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h
+++ b/extern/Eigen3/Eigen/src/plugins/MatrixCwiseBinaryOps.h
@@ -124,3 +124,20 @@ cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
}
+
+typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,internal::cmp_EQ>, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType;
+
+/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
+ *
+ * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+ * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+ * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+ * isMuchSmallerThan().
+ *
+ * \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
+ */
+inline const CwiseScalarEqualReturnType
+cwiseEqual(const Scalar& s) const
+{
+ return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op<Scalar,internal::cmp_EQ>());
+}
diff --git a/extern/Eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h b/extern/Eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h
index 0cf0640bae6..8de10935d55 100644
--- a/extern/Eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h
+++ b/extern/Eigen3/Eigen/src/plugins/MatrixCwiseUnaryOps.h
@@ -50,18 +50,3 @@ cwiseSqrt() const { return derived(); }
inline const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
cwiseInverse() const { return derived(); }
-/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
- *
- * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
- * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
- * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
- * isMuchSmallerThan().
- *
- * \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
- */
-inline const CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >, const Derived>
-cwiseEqual(const Scalar& s) const
-{
- return CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >,const Derived>
- (derived(), std::bind1st(std::equal_to<Scalar>(), s));
-}
diff --git a/extern/Eigen3/eigen-update.sh b/extern/Eigen3/eigen-update.sh
index 1cf0337adf6..9d22098c4fd 100755
--- a/extern/Eigen3/eigen-update.sh
+++ b/extern/Eigen3/eigen-update.sh
@@ -17,7 +17,7 @@ if [ -d eigen ]
then
cd eigen
# put here the version you want to use
- hg up 3.2.1
+ hg up 3.2.7
rm -f `find Eigen/ -type f -name "CMakeLists.txt"`
cp -r Eigen ..
cd ..