tomoto 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +3 -0
- data/LICENSE.txt +22 -0
- data/README.md +123 -0
- data/ext/tomoto/ext.cpp +245 -0
- data/ext/tomoto/extconf.rb +28 -0
- data/lib/tomoto.rb +12 -0
- data/lib/tomoto/ct.rb +11 -0
- data/lib/tomoto/hdp.rb +11 -0
- data/lib/tomoto/lda.rb +67 -0
- data/lib/tomoto/version.rb +3 -0
- data/vendor/EigenRand/EigenRand/Core.h +1139 -0
- data/vendor/EigenRand/EigenRand/Dists/Basic.h +111 -0
- data/vendor/EigenRand/EigenRand/Dists/Discrete.h +877 -0
- data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +108 -0
- data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +626 -0
- data/vendor/EigenRand/EigenRand/EigenRand +19 -0
- data/vendor/EigenRand/EigenRand/Macro.h +24 -0
- data/vendor/EigenRand/EigenRand/MorePacketMath.h +978 -0
- data/vendor/EigenRand/EigenRand/PacketFilter.h +286 -0
- data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +624 -0
- data/vendor/EigenRand/EigenRand/RandUtils.h +413 -0
- data/vendor/EigenRand/EigenRand/doc.h +220 -0
- data/vendor/EigenRand/LICENSE +21 -0
- data/vendor/EigenRand/README.md +288 -0
- data/vendor/eigen/COPYING.BSD +26 -0
- data/vendor/eigen/COPYING.GPL +674 -0
- data/vendor/eigen/COPYING.LGPL +502 -0
- data/vendor/eigen/COPYING.MINPACK +52 -0
- data/vendor/eigen/COPYING.MPL2 +373 -0
- data/vendor/eigen/COPYING.README +18 -0
- data/vendor/eigen/Eigen/CMakeLists.txt +19 -0
- data/vendor/eigen/Eigen/Cholesky +46 -0
- data/vendor/eigen/Eigen/CholmodSupport +48 -0
- data/vendor/eigen/Eigen/Core +537 -0
- data/vendor/eigen/Eigen/Dense +7 -0
- data/vendor/eigen/Eigen/Eigen +2 -0
- data/vendor/eigen/Eigen/Eigenvalues +61 -0
- data/vendor/eigen/Eigen/Geometry +62 -0
- data/vendor/eigen/Eigen/Householder +30 -0
- data/vendor/eigen/Eigen/IterativeLinearSolvers +48 -0
- data/vendor/eigen/Eigen/Jacobi +33 -0
- data/vendor/eigen/Eigen/LU +50 -0
- data/vendor/eigen/Eigen/MetisSupport +35 -0
- data/vendor/eigen/Eigen/OrderingMethods +73 -0
- data/vendor/eigen/Eigen/PaStiXSupport +48 -0
- data/vendor/eigen/Eigen/PardisoSupport +35 -0
- data/vendor/eigen/Eigen/QR +51 -0
- data/vendor/eigen/Eigen/QtAlignedMalloc +40 -0
- data/vendor/eigen/Eigen/SPQRSupport +34 -0
- data/vendor/eigen/Eigen/SVD +51 -0
- data/vendor/eigen/Eigen/Sparse +36 -0
- data/vendor/eigen/Eigen/SparseCholesky +45 -0
- data/vendor/eigen/Eigen/SparseCore +69 -0
- data/vendor/eigen/Eigen/SparseLU +46 -0
- data/vendor/eigen/Eigen/SparseQR +37 -0
- data/vendor/eigen/Eigen/StdDeque +27 -0
- data/vendor/eigen/Eigen/StdList +26 -0
- data/vendor/eigen/Eigen/StdVector +27 -0
- data/vendor/eigen/Eigen/SuperLUSupport +64 -0
- data/vendor/eigen/Eigen/UmfPackSupport +40 -0
- data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +673 -0
- data/vendor/eigen/Eigen/src/Cholesky/LLT.h +542 -0
- data/vendor/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
- data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +639 -0
- data/vendor/eigen/Eigen/src/Core/Array.h +329 -0
- data/vendor/eigen/Eigen/src/Core/ArrayBase.h +226 -0
- data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +209 -0
- data/vendor/eigen/Eigen/src/Core/Assign.h +90 -0
- data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +935 -0
- data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +178 -0
- data/vendor/eigen/Eigen/src/Core/BandMatrix.h +353 -0
- data/vendor/eigen/Eigen/src/Core/Block.h +452 -0
- data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +164 -0
- data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +160 -0
- data/vendor/eigen/Eigen/src/Core/ConditionEstimator.h +175 -0
- data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +1688 -0
- data/vendor/eigen/Eigen/src/Core/CoreIterators.h +127 -0
- data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +184 -0
- data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +866 -0
- data/vendor/eigen/Eigen/src/Core/CwiseTernaryOp.h +197 -0
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +103 -0
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +128 -0
- data/vendor/eigen/Eigen/src/Core/DenseBase.h +611 -0
- data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +681 -0
- data/vendor/eigen/Eigen/src/Core/DenseStorage.h +570 -0
- data/vendor/eigen/Eigen/src/Core/Diagonal.h +260 -0
- data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +343 -0
- data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +28 -0
- data/vendor/eigen/Eigen/src/Core/Dot.h +318 -0
- data/vendor/eigen/Eigen/src/Core/EigenBase.h +159 -0
- data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +146 -0
- data/vendor/eigen/Eigen/src/Core/Fuzzy.h +155 -0
- data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +455 -0
- data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +593 -0
- data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +187 -0
- data/vendor/eigen/Eigen/src/Core/IO.h +225 -0
- data/vendor/eigen/Eigen/src/Core/Inverse.h +118 -0
- data/vendor/eigen/Eigen/src/Core/Map.h +171 -0
- data/vendor/eigen/Eigen/src/Core/MapBase.h +303 -0
- data/vendor/eigen/Eigen/src/Core/MathFunctions.h +1415 -0
- data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +101 -0
- data/vendor/eigen/Eigen/src/Core/Matrix.h +459 -0
- data/vendor/eigen/Eigen/src/Core/MatrixBase.h +529 -0
- data/vendor/eigen/Eigen/src/Core/NestByValue.h +110 -0
- data/vendor/eigen/Eigen/src/Core/NoAlias.h +108 -0
- data/vendor/eigen/Eigen/src/Core/NumTraits.h +248 -0
- data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +633 -0
- data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +1035 -0
- data/vendor/eigen/Eigen/src/Core/Product.h +186 -0
- data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +1112 -0
- data/vendor/eigen/Eigen/src/Core/Random.h +182 -0
- data/vendor/eigen/Eigen/src/Core/Redux.h +505 -0
- data/vendor/eigen/Eigen/src/Core/Ref.h +283 -0
- data/vendor/eigen/Eigen/src/Core/Replicate.h +142 -0
- data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +117 -0
- data/vendor/eigen/Eigen/src/Core/Reverse.h +211 -0
- data/vendor/eigen/Eigen/src/Core/Select.h +162 -0
- data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +352 -0
- data/vendor/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
- data/vendor/eigen/Eigen/src/Core/Solve.h +188 -0
- data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +235 -0
- data/vendor/eigen/Eigen/src/Core/SolverBase.h +130 -0
- data/vendor/eigen/Eigen/src/Core/StableNorm.h +221 -0
- data/vendor/eigen/Eigen/src/Core/Stride.h +111 -0
- data/vendor/eigen/Eigen/src/Core/Swap.h +67 -0
- data/vendor/eigen/Eigen/src/Core/Transpose.h +403 -0
- data/vendor/eigen/Eigen/src/Core/Transpositions.h +407 -0
- data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +983 -0
- data/vendor/eigen/Eigen/src/Core/VectorBlock.h +96 -0
- data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +695 -0
- data/vendor/eigen/Eigen/src/Core/Visitor.h +273 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +451 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +439 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +637 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +51 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +391 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1316 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +430 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +322 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +1061 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +103 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +674 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/MathFunctions.h +91 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +333 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +1124 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +212 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +29 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +49 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +490 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +91 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +760 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +471 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +562 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +895 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +77 -0
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +397 -0
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +137 -0
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +945 -0
- data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +168 -0
- data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +475 -0
- data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +188 -0
- data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +136 -0
- data/vendor/eigen/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
- data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +792 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2156 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +492 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +311 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +122 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +619 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
- data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +163 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +521 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +287 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +260 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +93 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +466 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +315 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +335 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +163 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +145 -0
- data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +398 -0
- data/vendor/eigen/Eigen/src/Core/util/Constants.h +547 -0
- data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +83 -0
- data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +302 -0
- data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +130 -0
- data/vendor/eigen/Eigen/src/Core/util/Macros.h +1001 -0
- data/vendor/eigen/Eigen/src/Core/util/Memory.h +993 -0
- data/vendor/eigen/Eigen/src/Core/util/Meta.h +534 -0
- data/vendor/eigen/Eigen/src/Core/util/NonMPL2.h +3 -0
- data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +27 -0
- data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +218 -0
- data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +821 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +459 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +654 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +546 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +870 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +556 -0
- data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +392 -0
- data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +247 -0
- data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +114 -0
- data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +497 -0
- data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +282 -0
- data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +234 -0
- data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +195 -0
- data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +814 -0
- data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +199 -0
- data/vendor/eigen/Eigen/src/Geometry/RotationBase.h +206 -0
- data/vendor/eigen/Eigen/src/Geometry/Scaling.h +170 -0
- data/vendor/eigen/Eigen/src/Geometry/Transform.h +1542 -0
- data/vendor/eigen/Eigen/src/Geometry/Translation.h +208 -0
- data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +166 -0
- data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +161 -0
- data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +103 -0
- data/vendor/eigen/Eigen/src/Householder/Householder.h +172 -0
- data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +470 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +228 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +246 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +400 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +462 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +394 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +216 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +115 -0
- data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +462 -0
- data/vendor/eigen/Eigen/src/LU/Determinant.h +101 -0
- data/vendor/eigen/Eigen/src/LU/FullPivLU.h +891 -0
- data/vendor/eigen/Eigen/src/LU/InverseImpl.h +415 -0
- data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +611 -0
- data/vendor/eigen/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
- data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +338 -0
- data/vendor/eigen/Eigen/src/MetisSupport/MetisSupport.h +137 -0
- data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +445 -0
- data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +1843 -0
- data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +157 -0
- data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
- data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +543 -0
- data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +653 -0
- data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
- data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +562 -0
- data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +676 -0
- data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +409 -0
- data/vendor/eigen/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
- data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +313 -0
- data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +1246 -0
- data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +804 -0
- data/vendor/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
- data/vendor/eigen/Eigen/src/SVD/SVDBase.h +315 -0
- data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +689 -0
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +199 -0
- data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +377 -0
- data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +258 -0
- data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
- data/vendor/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +216 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +603 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseColEtree.h +206 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +341 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +726 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +148 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +320 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseDot.h +98 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseMap.h +305 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +1403 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +405 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparsePermutation.h +178 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +169 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseRedux.h +49 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +397 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +656 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseTranspose.h +92 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +178 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +478 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +253 -0
- data/vendor/eigen/Eigen/src/SparseCore/TriangularSolver.h +315 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +773 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +301 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
- data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +745 -0
- data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +126 -0
- data/vendor/eigen/Eigen/src/StlSupport/StdList.h +106 -0
- data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +131 -0
- data/vendor/eigen/Eigen/src/StlSupport/details.h +84 -0
- data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +1027 -0
- data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +506 -0
- data/vendor/eigen/Eigen/src/misc/Image.h +82 -0
- data/vendor/eigen/Eigen/src/misc/Kernel.h +79 -0
- data/vendor/eigen/Eigen/src/misc/RealSvd2x2.h +55 -0
- data/vendor/eigen/Eigen/src/misc/blas.h +440 -0
- data/vendor/eigen/Eigen/src/misc/lapack.h +152 -0
- data/vendor/eigen/Eigen/src/misc/lapacke.h +16291 -0
- data/vendor/eigen/Eigen/src/misc/lapacke_mangling.h +17 -0
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +332 -0
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +552 -0
- data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +1058 -0
- data/vendor/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
- data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +163 -0
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +85 -0
- data/vendor/eigen/README.md +3 -0
- data/vendor/eigen/bench/README.txt +55 -0
- data/vendor/eigen/bench/btl/COPYING +340 -0
- data/vendor/eigen/bench/btl/README +154 -0
- data/vendor/eigen/bench/tensors/README +21 -0
- data/vendor/eigen/blas/README.txt +6 -0
- data/vendor/eigen/demos/mandelbrot/README +10 -0
- data/vendor/eigen/demos/mix_eigen_and_c/README +9 -0
- data/vendor/eigen/demos/opengl/README +13 -0
- data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +1760 -0
- data/vendor/eigen/unsupported/README.txt +50 -0
- data/vendor/tomotopy/LICENSE +21 -0
- data/vendor/tomotopy/README.kr.rst +375 -0
- data/vendor/tomotopy/README.rst +382 -0
- data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +362 -0
- data/vendor/tomotopy/src/Labeling/FoRelevance.h +88 -0
- data/vendor/tomotopy/src/Labeling/Labeler.h +50 -0
- data/vendor/tomotopy/src/TopicModel/CT.h +37 -0
- data/vendor/tomotopy/src/TopicModel/CTModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/CTModel.hpp +293 -0
- data/vendor/tomotopy/src/TopicModel/DMR.h +51 -0
- data/vendor/tomotopy/src/TopicModel/DMRModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +374 -0
- data/vendor/tomotopy/src/TopicModel/DT.h +65 -0
- data/vendor/tomotopy/src/TopicModel/DTM.h +22 -0
- data/vendor/tomotopy/src/TopicModel/DTModel.cpp +15 -0
- data/vendor/tomotopy/src/TopicModel/DTModel.hpp +572 -0
- data/vendor/tomotopy/src/TopicModel/GDMR.h +37 -0
- data/vendor/tomotopy/src/TopicModel/GDMRModel.cpp +14 -0
- data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +485 -0
- data/vendor/tomotopy/src/TopicModel/HDP.h +74 -0
- data/vendor/tomotopy/src/TopicModel/HDPModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +592 -0
- data/vendor/tomotopy/src/TopicModel/HLDA.h +40 -0
- data/vendor/tomotopy/src/TopicModel/HLDAModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +681 -0
- data/vendor/tomotopy/src/TopicModel/HPA.h +27 -0
- data/vendor/tomotopy/src/TopicModel/HPAModel.cpp +21 -0
- data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +588 -0
- data/vendor/tomotopy/src/TopicModel/LDA.h +144 -0
- data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +442 -0
- data/vendor/tomotopy/src/TopicModel/LDAModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +1058 -0
- data/vendor/tomotopy/src/TopicModel/LLDA.h +45 -0
- data/vendor/tomotopy/src/TopicModel/LLDAModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +203 -0
- data/vendor/tomotopy/src/TopicModel/MGLDA.h +63 -0
- data/vendor/tomotopy/src/TopicModel/MGLDAModel.cpp +17 -0
- data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +558 -0
- data/vendor/tomotopy/src/TopicModel/PA.h +43 -0
- data/vendor/tomotopy/src/TopicModel/PAModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/PAModel.hpp +467 -0
- data/vendor/tomotopy/src/TopicModel/PLDA.h +17 -0
- data/vendor/tomotopy/src/TopicModel/PLDAModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +214 -0
- data/vendor/tomotopy/src/TopicModel/SLDA.h +54 -0
- data/vendor/tomotopy/src/TopicModel/SLDAModel.cpp +17 -0
- data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +456 -0
- data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +692 -0
- data/vendor/tomotopy/src/Utils/AliasMethod.hpp +169 -0
- data/vendor/tomotopy/src/Utils/Dictionary.h +80 -0
- data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +181 -0
- data/vendor/tomotopy/src/Utils/LBFGS.h +202 -0
- data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBacktracking.h +120 -0
- data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBracketing.h +122 -0
- data/vendor/tomotopy/src/Utils/LBFGS/Param.h +213 -0
- data/vendor/tomotopy/src/Utils/LUT.hpp +82 -0
- data/vendor/tomotopy/src/Utils/MultiNormalDistribution.hpp +69 -0
- data/vendor/tomotopy/src/Utils/PolyaGamma.hpp +200 -0
- data/vendor/tomotopy/src/Utils/PolyaGammaHybrid.hpp +672 -0
- data/vendor/tomotopy/src/Utils/ThreadPool.hpp +150 -0
- data/vendor/tomotopy/src/Utils/Trie.hpp +220 -0
- data/vendor/tomotopy/src/Utils/TruncMultiNormal.hpp +94 -0
- data/vendor/tomotopy/src/Utils/Utils.hpp +337 -0
- data/vendor/tomotopy/src/Utils/avx_gamma.h +46 -0
- data/vendor/tomotopy/src/Utils/avx_mathfun.h +736 -0
- data/vendor/tomotopy/src/Utils/exception.h +28 -0
- data/vendor/tomotopy/src/Utils/math.h +281 -0
- data/vendor/tomotopy/src/Utils/rtnorm.hpp +2690 -0
- data/vendor/tomotopy/src/Utils/sample.hpp +192 -0
- data/vendor/tomotopy/src/Utils/serializer.hpp +695 -0
- data/vendor/tomotopy/src/Utils/slp.hpp +131 -0
- data/vendor/tomotopy/src/Utils/sse_gamma.h +48 -0
- data/vendor/tomotopy/src/Utils/sse_mathfun.h +710 -0
- data/vendor/tomotopy/src/Utils/text.hpp +49 -0
- data/vendor/tomotopy/src/Utils/tvector.hpp +543 -0
- metadata +531 -0
@@ -0,0 +1,993 @@
|
|
1
|
+
// This file is part of Eigen, a lightweight C++ template library
|
2
|
+
// for linear algebra.
|
3
|
+
//
|
4
|
+
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
5
|
+
// Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
|
6
|
+
// Copyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com>
|
7
|
+
// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
|
8
|
+
// Copyright (C) 2010 Thomas Capricelli <orzel@freehackers.org>
|
9
|
+
// Copyright (C) 2013 Pavel Holoborodko <pavel@holoborodko.com>
|
10
|
+
//
|
11
|
+
// This Source Code Form is subject to the terms of the Mozilla
|
12
|
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
13
|
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
14
|
+
|
15
|
+
|
16
|
+
/*****************************************************************************
|
17
|
+
*** Platform checks for aligned malloc functions ***
|
18
|
+
*****************************************************************************/
|
19
|
+
|
20
|
+
#ifndef EIGEN_MEMORY_H
|
21
|
+
#define EIGEN_MEMORY_H
|
22
|
+
|
23
|
+
#ifndef EIGEN_MALLOC_ALREADY_ALIGNED
|
24
|
+
|
25
|
+
// Try to determine automatically if malloc is already aligned.
|
26
|
+
|
27
|
+
// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:
|
28
|
+
// http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html
|
29
|
+
// This is true at least since glibc 2.8.
|
30
|
+
// This leaves the question how to detect 64-bit. According to this document,
|
31
|
+
// http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf
|
32
|
+
// page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed
|
33
|
+
// quite safe, at least within the context of glibc, to equate 64-bit with LP64.
|
34
|
+
#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \
|
35
|
+
&& defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ ) && (EIGEN_DEFAULT_ALIGN_BYTES == 16)
|
36
|
+
#define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1
|
37
|
+
#else
|
38
|
+
#define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0
|
39
|
+
#endif
|
40
|
+
|
41
|
+
// FreeBSD 6 seems to have 16-byte aligned malloc
|
42
|
+
// See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup
|
43
|
+
// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures
|
44
|
+
// See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup
|
45
|
+
#if defined(__FreeBSD__) && !(EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) && (EIGEN_DEFAULT_ALIGN_BYTES == 16)
|
46
|
+
#define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1
|
47
|
+
#else
|
48
|
+
#define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0
|
49
|
+
#endif
|
50
|
+
|
51
|
+
#if (EIGEN_OS_MAC && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) \
|
52
|
+
|| (EIGEN_OS_WIN64 && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) \
|
53
|
+
|| EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \
|
54
|
+
|| EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
|
55
|
+
#define EIGEN_MALLOC_ALREADY_ALIGNED 1
|
56
|
+
#else
|
57
|
+
#define EIGEN_MALLOC_ALREADY_ALIGNED 0
|
58
|
+
#endif
|
59
|
+
|
60
|
+
#endif
|
61
|
+
|
62
|
+
namespace Eigen {
|
63
|
+
|
64
|
+
namespace internal {
|
65
|
+
|
66
|
+
EIGEN_DEVICE_FUNC
|
67
|
+
inline void throw_std_bad_alloc()
|
68
|
+
{
|
69
|
+
#ifdef EIGEN_EXCEPTIONS
|
70
|
+
throw std::bad_alloc();
|
71
|
+
#else
|
72
|
+
std::size_t huge = static_cast<std::size_t>(-1);
|
73
|
+
::operator new(huge);
|
74
|
+
#endif
|
75
|
+
}
|
76
|
+
|
77
|
+
/*****************************************************************************
|
78
|
+
*** Implementation of handmade aligned functions ***
|
79
|
+
*****************************************************************************/
|
80
|
+
|
81
|
+
/* ----- Hand made implementations of aligned malloc/free and realloc ----- */
|
82
|
+
|
83
|
+
/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
|
84
|
+
* Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
|
85
|
+
*/
|
86
|
+
inline void* handmade_aligned_malloc(std::size_t size)
|
87
|
+
{
|
88
|
+
void *original = std::malloc(size+EIGEN_DEFAULT_ALIGN_BYTES);
|
89
|
+
if (original == 0) return 0;
|
90
|
+
void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);
|
91
|
+
*(reinterpret_cast<void**>(aligned) - 1) = original;
|
92
|
+
return aligned;
|
93
|
+
}
|
94
|
+
|
95
|
+
/** \internal Frees memory allocated with handmade_aligned_malloc */
|
96
|
+
inline void handmade_aligned_free(void *ptr)
|
97
|
+
{
|
98
|
+
if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
|
99
|
+
}
|
100
|
+
|
101
|
+
/** \internal
|
102
|
+
* \brief Reallocates aligned memory.
|
103
|
+
* Since we know that our handmade version is based on std::malloc
|
104
|
+
* we can use std::realloc to implement efficient reallocation.
|
105
|
+
*/
|
106
|
+
inline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0)
|
107
|
+
{
|
108
|
+
if (ptr == 0) return handmade_aligned_malloc(size);
|
109
|
+
void *original = *(reinterpret_cast<void**>(ptr) - 1);
|
110
|
+
std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original);
|
111
|
+
original = std::realloc(original,size+EIGEN_DEFAULT_ALIGN_BYTES);
|
112
|
+
if (original == 0) return 0;
|
113
|
+
void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);
|
114
|
+
void *previous_aligned = static_cast<char *>(original)+previous_offset;
|
115
|
+
if(aligned!=previous_aligned)
|
116
|
+
std::memmove(aligned, previous_aligned, size);
|
117
|
+
|
118
|
+
*(reinterpret_cast<void**>(aligned) - 1) = original;
|
119
|
+
return aligned;
|
120
|
+
}
|
121
|
+
|
122
|
+
/*****************************************************************************
|
123
|
+
*** Implementation of portable aligned versions of malloc/free/realloc ***
|
124
|
+
*****************************************************************************/
|
125
|
+
|
126
|
+
#ifdef EIGEN_NO_MALLOC
|
127
|
+
EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()
|
128
|
+
{
|
129
|
+
eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
|
130
|
+
}
|
131
|
+
#elif defined EIGEN_RUNTIME_NO_MALLOC
|
132
|
+
EIGEN_DEVICE_FUNC inline bool is_malloc_allowed_impl(bool update, bool new_value = false)
|
133
|
+
{
|
134
|
+
static bool value = true;
|
135
|
+
if (update == 1)
|
136
|
+
value = new_value;
|
137
|
+
return value;
|
138
|
+
}
|
139
|
+
EIGEN_DEVICE_FUNC inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); }
|
140
|
+
EIGEN_DEVICE_FUNC inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); }
|
141
|
+
EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()
|
142
|
+
{
|
143
|
+
eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)");
|
144
|
+
}
|
145
|
+
#else
|
146
|
+
EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()
|
147
|
+
{}
|
148
|
+
#endif
|
149
|
+
|
150
|
+
/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 or 32 bytes alignment depending on the requirements.
|
151
|
+
* On allocation error, the returned pointer is null, and std::bad_alloc is thrown.
|
152
|
+
*/
|
153
|
+
EIGEN_DEVICE_FUNC inline void* aligned_malloc(std::size_t size)
|
154
|
+
{
|
155
|
+
check_that_malloc_is_allowed();
|
156
|
+
|
157
|
+
void *result;
|
158
|
+
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
|
159
|
+
result = std::malloc(size);
|
160
|
+
#if EIGEN_DEFAULT_ALIGN_BYTES==16
|
161
|
+
eigen_assert((size<16 || (std::size_t(result)%16)==0) && "System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade alignd memory allocator.");
|
162
|
+
#endif
|
163
|
+
#else
|
164
|
+
result = handmade_aligned_malloc(size);
|
165
|
+
#endif
|
166
|
+
|
167
|
+
if(!result && size)
|
168
|
+
throw_std_bad_alloc();
|
169
|
+
|
170
|
+
return result;
|
171
|
+
}
|
172
|
+
|
173
|
+
/** \internal Frees memory allocated with aligned_malloc. */
|
174
|
+
EIGEN_DEVICE_FUNC inline void aligned_free(void *ptr)
|
175
|
+
{
|
176
|
+
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
|
177
|
+
std::free(ptr);
|
178
|
+
#else
|
179
|
+
handmade_aligned_free(ptr);
|
180
|
+
#endif
|
181
|
+
}
|
182
|
+
|
183
|
+
/**
|
184
|
+
* \internal
|
185
|
+
* \brief Reallocates an aligned block of memory.
|
186
|
+
* \throws std::bad_alloc on allocation failure
|
187
|
+
*/
|
188
|
+
inline void* aligned_realloc(void *ptr, std::size_t new_size, std::size_t old_size)
|
189
|
+
{
|
190
|
+
EIGEN_UNUSED_VARIABLE(old_size);
|
191
|
+
|
192
|
+
void *result;
|
193
|
+
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
|
194
|
+
result = std::realloc(ptr,new_size);
|
195
|
+
#else
|
196
|
+
result = handmade_aligned_realloc(ptr,new_size,old_size);
|
197
|
+
#endif
|
198
|
+
|
199
|
+
if (!result && new_size)
|
200
|
+
throw_std_bad_alloc();
|
201
|
+
|
202
|
+
return result;
|
203
|
+
}
|
204
|
+
|
205
|
+
/*****************************************************************************
|
206
|
+
*** Implementation of conditionally aligned functions ***
|
207
|
+
*****************************************************************************/
|
208
|
+
|
209
|
+
/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
|
210
|
+
* On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.
|
211
|
+
*/
|
212
|
+
template<bool Align> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc(std::size_t size)
|
213
|
+
{
|
214
|
+
return aligned_malloc(size);
|
215
|
+
}
|
216
|
+
|
217
|
+
template<> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc<false>(std::size_t size)
|
218
|
+
{
|
219
|
+
check_that_malloc_is_allowed();
|
220
|
+
|
221
|
+
void *result = std::malloc(size);
|
222
|
+
if(!result && size)
|
223
|
+
throw_std_bad_alloc();
|
224
|
+
return result;
|
225
|
+
}
|
226
|
+
|
227
|
+
/** \internal Frees memory allocated with conditional_aligned_malloc */
|
228
|
+
template<bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void *ptr)
|
229
|
+
{
|
230
|
+
aligned_free(ptr);
|
231
|
+
}
|
232
|
+
|
233
|
+
template<> EIGEN_DEVICE_FUNC inline void conditional_aligned_free<false>(void *ptr)
|
234
|
+
{
|
235
|
+
std::free(ptr);
|
236
|
+
}
|
237
|
+
|
238
|
+
template<bool Align> inline void* conditional_aligned_realloc(void* ptr, std::size_t new_size, std::size_t old_size)
|
239
|
+
{
|
240
|
+
return aligned_realloc(ptr, new_size, old_size);
|
241
|
+
}
|
242
|
+
|
243
|
+
template<> inline void* conditional_aligned_realloc<false>(void* ptr, std::size_t new_size, std::size_t)
|
244
|
+
{
|
245
|
+
return std::realloc(ptr, new_size);
|
246
|
+
}
|
247
|
+
|
248
|
+
/*****************************************************************************
|
249
|
+
*** Construction/destruction of array elements ***
|
250
|
+
*****************************************************************************/
|
251
|
+
|
252
|
+
/** \internal Destructs the elements of an array.
|
253
|
+
* The \a size parameters tells on how many objects to call the destructor of T.
|
254
|
+
*/
|
255
|
+
template<typename T> EIGEN_DEVICE_FUNC inline void destruct_elements_of_array(T *ptr, std::size_t size)
|
256
|
+
{
|
257
|
+
// always destruct an array starting from the end.
|
258
|
+
if(ptr)
|
259
|
+
while(size) ptr[--size].~T();
|
260
|
+
}
|
261
|
+
|
262
|
+
/** \internal Constructs the elements of an array.
|
263
|
+
* The \a size parameter tells on how many objects to call the constructor of T.
|
264
|
+
*/
|
265
|
+
template<typename T> EIGEN_DEVICE_FUNC inline T* construct_elements_of_array(T *ptr, std::size_t size)
|
266
|
+
{
|
267
|
+
std::size_t i;
|
268
|
+
EIGEN_TRY
|
269
|
+
{
|
270
|
+
for (i = 0; i < size; ++i) ::new (ptr + i) T;
|
271
|
+
return ptr;
|
272
|
+
}
|
273
|
+
EIGEN_CATCH(...)
|
274
|
+
{
|
275
|
+
destruct_elements_of_array(ptr, i);
|
276
|
+
EIGEN_THROW;
|
277
|
+
}
|
278
|
+
return NULL;
|
279
|
+
}
|
280
|
+
|
281
|
+
/*****************************************************************************
|
282
|
+
*** Implementation of aligned new/delete-like functions ***
|
283
|
+
*****************************************************************************/
|
284
|
+
|
285
|
+
template<typename T>
|
286
|
+
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void check_size_for_overflow(std::size_t size)
|
287
|
+
{
|
288
|
+
if(size > std::size_t(-1) / sizeof(T))
|
289
|
+
throw_std_bad_alloc();
|
290
|
+
}
|
291
|
+
|
292
|
+
/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
|
293
|
+
* On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown.
|
294
|
+
* The default constructor of T is called.
|
295
|
+
*/
|
296
|
+
template<typename T> EIGEN_DEVICE_FUNC inline T* aligned_new(std::size_t size)
|
297
|
+
{
|
298
|
+
check_size_for_overflow<T>(size);
|
299
|
+
T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
|
300
|
+
EIGEN_TRY
|
301
|
+
{
|
302
|
+
return construct_elements_of_array(result, size);
|
303
|
+
}
|
304
|
+
EIGEN_CATCH(...)
|
305
|
+
{
|
306
|
+
aligned_free(result);
|
307
|
+
EIGEN_THROW;
|
308
|
+
}
|
309
|
+
return result;
|
310
|
+
}
|
311
|
+
|
312
|
+
template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_new(std::size_t size)
|
313
|
+
{
|
314
|
+
check_size_for_overflow<T>(size);
|
315
|
+
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
|
316
|
+
EIGEN_TRY
|
317
|
+
{
|
318
|
+
return construct_elements_of_array(result, size);
|
319
|
+
}
|
320
|
+
EIGEN_CATCH(...)
|
321
|
+
{
|
322
|
+
conditional_aligned_free<Align>(result);
|
323
|
+
EIGEN_THROW;
|
324
|
+
}
|
325
|
+
return result;
|
326
|
+
}
|
327
|
+
|
328
|
+
/** \internal Deletes objects constructed with aligned_new
|
329
|
+
* The \a size parameters tells on how many objects to call the destructor of T.
|
330
|
+
*/
|
331
|
+
template<typename T> EIGEN_DEVICE_FUNC inline void aligned_delete(T *ptr, std::size_t size)
|
332
|
+
{
|
333
|
+
destruct_elements_of_array<T>(ptr, size);
|
334
|
+
aligned_free(ptr);
|
335
|
+
}
|
336
|
+
|
337
|
+
/** \internal Deletes objects constructed with conditional_aligned_new
|
338
|
+
* The \a size parameters tells on how many objects to call the destructor of T.
|
339
|
+
*/
|
340
|
+
template<typename T, bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_delete(T *ptr, std::size_t size)
|
341
|
+
{
|
342
|
+
destruct_elements_of_array<T>(ptr, size);
|
343
|
+
conditional_aligned_free<Align>(ptr);
|
344
|
+
}
|
345
|
+
|
346
|
+
template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_realloc_new(T* pts, std::size_t new_size, std::size_t old_size)
|
347
|
+
{
|
348
|
+
check_size_for_overflow<T>(new_size);
|
349
|
+
check_size_for_overflow<T>(old_size);
|
350
|
+
if(new_size < old_size)
|
351
|
+
destruct_elements_of_array(pts+new_size, old_size-new_size);
|
352
|
+
T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
|
353
|
+
if(new_size > old_size)
|
354
|
+
{
|
355
|
+
EIGEN_TRY
|
356
|
+
{
|
357
|
+
construct_elements_of_array(result+old_size, new_size-old_size);
|
358
|
+
}
|
359
|
+
EIGEN_CATCH(...)
|
360
|
+
{
|
361
|
+
conditional_aligned_free<Align>(result);
|
362
|
+
EIGEN_THROW;
|
363
|
+
}
|
364
|
+
}
|
365
|
+
return result;
|
366
|
+
}
|
367
|
+
|
368
|
+
|
369
|
+
template<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_new_auto(std::size_t size)
|
370
|
+
{
|
371
|
+
if(size==0)
|
372
|
+
return 0; // short-cut. Also fixes Bug 884
|
373
|
+
check_size_for_overflow<T>(size);
|
374
|
+
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
|
375
|
+
if(NumTraits<T>::RequireInitialization)
|
376
|
+
{
|
377
|
+
EIGEN_TRY
|
378
|
+
{
|
379
|
+
construct_elements_of_array(result, size);
|
380
|
+
}
|
381
|
+
EIGEN_CATCH(...)
|
382
|
+
{
|
383
|
+
conditional_aligned_free<Align>(result);
|
384
|
+
EIGEN_THROW;
|
385
|
+
}
|
386
|
+
}
|
387
|
+
return result;
|
388
|
+
}
|
389
|
+
|
390
|
+
template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, std::size_t new_size, std::size_t old_size)
|
391
|
+
{
|
392
|
+
check_size_for_overflow<T>(new_size);
|
393
|
+
check_size_for_overflow<T>(old_size);
|
394
|
+
if(NumTraits<T>::RequireInitialization && (new_size < old_size))
|
395
|
+
destruct_elements_of_array(pts+new_size, old_size-new_size);
|
396
|
+
T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
|
397
|
+
if(NumTraits<T>::RequireInitialization && (new_size > old_size))
|
398
|
+
{
|
399
|
+
EIGEN_TRY
|
400
|
+
{
|
401
|
+
construct_elements_of_array(result+old_size, new_size-old_size);
|
402
|
+
}
|
403
|
+
EIGEN_CATCH(...)
|
404
|
+
{
|
405
|
+
conditional_aligned_free<Align>(result);
|
406
|
+
EIGEN_THROW;
|
407
|
+
}
|
408
|
+
}
|
409
|
+
return result;
|
410
|
+
}
|
411
|
+
|
412
|
+
template<typename T, bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_delete_auto(T *ptr, std::size_t size)
|
413
|
+
{
|
414
|
+
if(NumTraits<T>::RequireInitialization)
|
415
|
+
destruct_elements_of_array<T>(ptr, size);
|
416
|
+
conditional_aligned_free<Align>(ptr);
|
417
|
+
}
|
418
|
+
|
419
|
+
/****************************************************************************/
|
420
|
+
|
421
|
+
/** \internal Returns the index of the first element of the array that is well aligned with respect to the requested \a Alignment.
|
422
|
+
*
|
423
|
+
* \tparam Alignment requested alignment in Bytes.
|
424
|
+
* \param array the address of the start of the array
|
425
|
+
* \param size the size of the array
|
426
|
+
*
|
427
|
+
* \note If no element of the array is well aligned or the requested alignment is not a multiple of a scalar,
|
428
|
+
* the size of the array is returned. For example with SSE, the requested alignment is typically 16-bytes. If
|
429
|
+
* packet size for the given scalar type is 1, then everything is considered well-aligned.
|
430
|
+
*
|
431
|
+
* \note Otherwise, if the Alignment is larger that the scalar size, we rely on the assumptions that sizeof(Scalar) is a
|
432
|
+
* power of 2. On the other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for
|
433
|
+
* example with Scalar=double on certain 32-bit platforms, see bug #79.
|
434
|
+
*
|
435
|
+
* There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
|
436
|
+
* \sa first_default_aligned()
|
437
|
+
*/
|
438
|
+
template<int Alignment, typename Scalar, typename Index>
|
439
|
+
EIGEN_DEVICE_FUNC inline Index first_aligned(const Scalar* array, Index size)
|
440
|
+
{
|
441
|
+
const Index ScalarSize = sizeof(Scalar);
|
442
|
+
const Index AlignmentSize = Alignment / ScalarSize;
|
443
|
+
const Index AlignmentMask = AlignmentSize-1;
|
444
|
+
|
445
|
+
if(AlignmentSize<=1)
|
446
|
+
{
|
447
|
+
// Either the requested alignment if smaller than a scalar, or it exactly match a 1 scalar
|
448
|
+
// so that all elements of the array have the same alignment.
|
449
|
+
return 0;
|
450
|
+
}
|
451
|
+
else if( (UIntPtr(array) & (sizeof(Scalar)-1)) || (Alignment%ScalarSize)!=0)
|
452
|
+
{
|
453
|
+
// The array is not aligned to the size of a single scalar, or the requested alignment is not a multiple of the scalar size.
|
454
|
+
// Consequently, no element of the array is well aligned.
|
455
|
+
return size;
|
456
|
+
}
|
457
|
+
else
|
458
|
+
{
|
459
|
+
Index first = (AlignmentSize - (Index((UIntPtr(array)/sizeof(Scalar))) & AlignmentMask)) & AlignmentMask;
|
460
|
+
return (first < size) ? first : size;
|
461
|
+
}
|
462
|
+
}
|
463
|
+
|
464
|
+
/** \internal Returns the index of the first element of the array that is well aligned with respect the largest packet requirement.
|
465
|
+
* \sa first_aligned(Scalar*,Index) and first_default_aligned(DenseBase<Derived>) */
|
466
|
+
template<typename Scalar, typename Index>
|
467
|
+
EIGEN_DEVICE_FUNC inline Index first_default_aligned(const Scalar* array, Index size)
|
468
|
+
{
|
469
|
+
typedef typename packet_traits<Scalar>::type DefaultPacketType;
|
470
|
+
return first_aligned<unpacket_traits<DefaultPacketType>::alignment>(array, size);
|
471
|
+
}
|
472
|
+
|
473
|
+
/** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size
|
474
|
+
*/
|
475
|
+
template<typename Index>
|
476
|
+
inline Index first_multiple(Index size, Index base)
|
477
|
+
{
|
478
|
+
return ((size+base-1)/base)*base;
|
479
|
+
}
|
480
|
+
|
481
|
+
// std::copy is much slower than memcpy, so let's introduce a smart_copy which
|
482
|
+
// use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
|
483
|
+
template<typename T, bool UseMemcpy> struct smart_copy_helper;
|
484
|
+
|
485
|
+
template<typename T> EIGEN_DEVICE_FUNC void smart_copy(const T* start, const T* end, T* target)
|
486
|
+
{
|
487
|
+
smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
|
488
|
+
}
|
489
|
+
|
490
|
+
template<typename T> struct smart_copy_helper<T,true> {
|
491
|
+
EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target)
|
492
|
+
{
|
493
|
+
IntPtr size = IntPtr(end)-IntPtr(start);
|
494
|
+
if(size==0) return;
|
495
|
+
eigen_internal_assert(start!=0 && end!=0 && target!=0);
|
496
|
+
std::memcpy(target, start, size);
|
497
|
+
}
|
498
|
+
};
|
499
|
+
|
500
|
+
template<typename T> struct smart_copy_helper<T,false> {
|
501
|
+
EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target)
|
502
|
+
{ std::copy(start, end, target); }
|
503
|
+
};
|
504
|
+
|
505
|
+
// intelligent memmove. falls back to std::memmove for POD types, uses std::copy otherwise.
|
506
|
+
template<typename T, bool UseMemmove> struct smart_memmove_helper;
|
507
|
+
|
508
|
+
template<typename T> void smart_memmove(const T* start, const T* end, T* target)
|
509
|
+
{
|
510
|
+
smart_memmove_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
|
511
|
+
}
|
512
|
+
|
513
|
+
template<typename T> struct smart_memmove_helper<T,true> {
|
514
|
+
static inline void run(const T* start, const T* end, T* target)
|
515
|
+
{
|
516
|
+
IntPtr size = IntPtr(end)-IntPtr(start);
|
517
|
+
if(size==0) return;
|
518
|
+
eigen_internal_assert(start!=0 && end!=0 && target!=0);
|
519
|
+
std::memmove(target, start, size);
|
520
|
+
}
|
521
|
+
};
|
522
|
+
|
523
|
+
template<typename T> struct smart_memmove_helper<T,false> {
|
524
|
+
static inline void run(const T* start, const T* end, T* target)
|
525
|
+
{
|
526
|
+
if (UIntPtr(target) < UIntPtr(start))
|
527
|
+
{
|
528
|
+
std::copy(start, end, target);
|
529
|
+
}
|
530
|
+
else
|
531
|
+
{
|
532
|
+
std::ptrdiff_t count = (std::ptrdiff_t(end)-std::ptrdiff_t(start)) / sizeof(T);
|
533
|
+
std::copy_backward(start, end, target + count);
|
534
|
+
}
|
535
|
+
}
|
536
|
+
};
|
537
|
+
|
538
|
+
|
539
|
+
/*****************************************************************************
|
540
|
+
*** Implementation of runtime stack allocation (falling back to malloc) ***
|
541
|
+
*****************************************************************************/
|
542
|
+
|
543
|
+
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
|
544
|
+
// to the appropriate stack allocation function
|
545
|
+
#ifndef EIGEN_ALLOCA
|
546
|
+
#if EIGEN_OS_LINUX || EIGEN_OS_MAC || (defined alloca)
|
547
|
+
#define EIGEN_ALLOCA alloca
|
548
|
+
#elif EIGEN_COMP_MSVC
|
549
|
+
#define EIGEN_ALLOCA _alloca
|
550
|
+
#endif
|
551
|
+
#endif
|
552
|
+
|
553
|
+
// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
|
554
|
+
// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
|
555
|
+
template<typename T> class aligned_stack_memory_handler : noncopyable
|
556
|
+
{
|
557
|
+
public:
|
558
|
+
/* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size.
|
559
|
+
* Note that \a ptr can be 0 regardless of the other parameters.
|
560
|
+
* This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization).
|
561
|
+
* In this case, the buffer elements will also be destructed when this handler will be destructed.
|
562
|
+
* Finally, if \a dealloc is true, then the pointer \a ptr is freed.
|
563
|
+
**/
|
564
|
+
aligned_stack_memory_handler(T* ptr, std::size_t size, bool dealloc)
|
565
|
+
: m_ptr(ptr), m_size(size), m_deallocate(dealloc)
|
566
|
+
{
|
567
|
+
if(NumTraits<T>::RequireInitialization && m_ptr)
|
568
|
+
Eigen::internal::construct_elements_of_array(m_ptr, size);
|
569
|
+
}
|
570
|
+
~aligned_stack_memory_handler()
|
571
|
+
{
|
572
|
+
if(NumTraits<T>::RequireInitialization && m_ptr)
|
573
|
+
Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size);
|
574
|
+
if(m_deallocate)
|
575
|
+
Eigen::internal::aligned_free(m_ptr);
|
576
|
+
}
|
577
|
+
protected:
|
578
|
+
T* m_ptr;
|
579
|
+
std::size_t m_size;
|
580
|
+
bool m_deallocate;
|
581
|
+
};
|
582
|
+
|
583
|
+
template<typename T> class scoped_array : noncopyable
|
584
|
+
{
|
585
|
+
T* m_ptr;
|
586
|
+
public:
|
587
|
+
explicit scoped_array(std::ptrdiff_t size)
|
588
|
+
{
|
589
|
+
m_ptr = new T[size];
|
590
|
+
}
|
591
|
+
~scoped_array()
|
592
|
+
{
|
593
|
+
delete[] m_ptr;
|
594
|
+
}
|
595
|
+
T& operator[](std::ptrdiff_t i) { return m_ptr[i]; }
|
596
|
+
const T& operator[](std::ptrdiff_t i) const { return m_ptr[i]; }
|
597
|
+
T* &ptr() { return m_ptr; }
|
598
|
+
const T* ptr() const { return m_ptr; }
|
599
|
+
operator const T*() const { return m_ptr; }
|
600
|
+
};
|
601
|
+
|
602
|
+
template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
|
603
|
+
{
|
604
|
+
std::swap(a.ptr(),b.ptr());
|
605
|
+
}
|
606
|
+
|
607
|
+
} // end namespace internal
|
608
|
+
|
609
|
+
/** \internal
|
610
|
+
* Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
|
611
|
+
* if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
|
612
|
+
* (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.
|
613
|
+
* The allocated buffer is automatically deleted when exiting the scope of this declaration.
|
614
|
+
* If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.
|
615
|
+
* Here is an example:
|
616
|
+
* \code
|
617
|
+
* {
|
618
|
+
* ei_declare_aligned_stack_constructed_variable(float,data,size,0);
|
619
|
+
* // use data[0] to data[size-1]
|
620
|
+
* }
|
621
|
+
* \endcode
|
622
|
+
* The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
|
623
|
+
*/
|
624
|
+
#ifdef EIGEN_ALLOCA
|
625
|
+
|
626
|
+
#if EIGEN_DEFAULT_ALIGN_BYTES>0
|
627
|
+
// We always manually re-align the result of EIGEN_ALLOCA.
|
628
|
+
// If alloca is already aligned, the compiler should be smart enough to optimize away the re-alignment.
|
629
|
+
#define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((internal::UIntPtr(EIGEN_ALLOCA(SIZE+EIGEN_DEFAULT_ALIGN_BYTES-1)) + EIGEN_DEFAULT_ALIGN_BYTES-1) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)))
|
630
|
+
#else
|
631
|
+
#define EIGEN_ALIGNED_ALLOCA(SIZE) EIGEN_ALLOCA(SIZE)
|
632
|
+
#endif
|
633
|
+
|
634
|
+
#define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
|
635
|
+
Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
|
636
|
+
TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \
|
637
|
+
: reinterpret_cast<TYPE*>( \
|
638
|
+
(sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \
|
639
|
+
: Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \
|
640
|
+
Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
|
641
|
+
|
642
|
+
#else
|
643
|
+
|
644
|
+
#define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
|
645
|
+
Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
|
646
|
+
TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \
|
647
|
+
Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
|
648
|
+
|
649
|
+
#endif
|
650
|
+
|
651
|
+
|
652
|
+
/*****************************************************************************
|
653
|
+
*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF] ***
|
654
|
+
*****************************************************************************/
|
655
|
+
|
656
|
+
#if EIGEN_MAX_ALIGN_BYTES!=0
|
657
|
+
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
|
658
|
+
void* operator new(std::size_t size, const std::nothrow_t&) EIGEN_NO_THROW { \
|
659
|
+
EIGEN_TRY { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
|
660
|
+
EIGEN_CATCH (...) { return 0; } \
|
661
|
+
}
|
662
|
+
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
|
663
|
+
void *operator new(std::size_t size) { \
|
664
|
+
return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
|
665
|
+
} \
|
666
|
+
void *operator new[](std::size_t size) { \
|
667
|
+
return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
|
668
|
+
} \
|
669
|
+
void operator delete(void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
|
670
|
+
void operator delete[](void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
|
671
|
+
void operator delete(void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
|
672
|
+
void operator delete[](void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
|
673
|
+
/* in-place new and delete. since (at least afaik) there is no actual */ \
|
674
|
+
/* memory allocated we can safely let the default implementation handle */ \
|
675
|
+
/* this particular case. */ \
|
676
|
+
static void *operator new(std::size_t size, void *ptr) { return ::operator new(size,ptr); } \
|
677
|
+
static void *operator new[](std::size_t size, void* ptr) { return ::operator new[](size,ptr); } \
|
678
|
+
void operator delete(void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete(memory,ptr); } \
|
679
|
+
void operator delete[](void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete[](memory,ptr); } \
|
680
|
+
/* nothrow-new (returns zero instead of std::bad_alloc) */ \
|
681
|
+
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
|
682
|
+
void operator delete(void *ptr, const std::nothrow_t&) EIGEN_NO_THROW { \
|
683
|
+
Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \
|
684
|
+
} \
|
685
|
+
typedef void eigen_aligned_operator_new_marker_type;
|
686
|
+
#else
|
687
|
+
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
688
|
+
#endif
|
689
|
+
|
690
|
+
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
|
691
|
+
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
|
692
|
+
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%EIGEN_MAX_ALIGN_BYTES==0)))
|
693
|
+
|
694
|
+
/****************************************************************************/
|
695
|
+
|
696
|
+
/** \class aligned_allocator
|
697
|
+
* \ingroup Core_Module
|
698
|
+
*
|
699
|
+
* \brief STL compatible allocator to use with types requiring a non standrad alignment.
|
700
|
+
*
|
701
|
+
* The memory is aligned as for dynamically aligned matrix/array types such as MatrixXd.
|
702
|
+
* By default, it will thus provide at least 16 bytes alignment and more in following cases:
|
703
|
+
* - 32 bytes alignment if AVX is enabled.
|
704
|
+
* - 64 bytes alignment if AVX512 is enabled.
|
705
|
+
*
|
706
|
+
* This can be controled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented
|
707
|
+
* \link TopicPreprocessorDirectivesPerformance there \endlink.
|
708
|
+
*
|
709
|
+
* Example:
|
710
|
+
* \code
|
711
|
+
* // Matrix4f requires 16 bytes alignment:
|
712
|
+
* std::map< int, Matrix4f, std::less<int>,
|
713
|
+
* aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
|
714
|
+
* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
|
715
|
+
* std::map< int, Vector3f > my_map_vec3;
|
716
|
+
* \endcode
|
717
|
+
*
|
718
|
+
* \sa \blank \ref TopicStlContainers.
|
719
|
+
*/
|
720
|
+
template<class T>
|
721
|
+
class aligned_allocator : public std::allocator<T>
|
722
|
+
{
|
723
|
+
public:
|
724
|
+
typedef std::size_t size_type;
|
725
|
+
typedef std::ptrdiff_t difference_type;
|
726
|
+
typedef T* pointer;
|
727
|
+
typedef const T* const_pointer;
|
728
|
+
typedef T& reference;
|
729
|
+
typedef const T& const_reference;
|
730
|
+
typedef T value_type;
|
731
|
+
|
732
|
+
template<class U>
|
733
|
+
struct rebind
|
734
|
+
{
|
735
|
+
typedef aligned_allocator<U> other;
|
736
|
+
};
|
737
|
+
|
738
|
+
aligned_allocator() : std::allocator<T>() {}
|
739
|
+
|
740
|
+
aligned_allocator(const aligned_allocator& other) : std::allocator<T>(other) {}
|
741
|
+
|
742
|
+
template<class U>
|
743
|
+
aligned_allocator(const aligned_allocator<U>& other) : std::allocator<T>(other) {}
|
744
|
+
|
745
|
+
~aligned_allocator() {}
|
746
|
+
|
747
|
+
pointer allocate(size_type num, const void* /*hint*/ = 0)
|
748
|
+
{
|
749
|
+
internal::check_size_for_overflow<T>(num);
|
750
|
+
size_type size = num * sizeof(T);
|
751
|
+
#if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_LEAST(7,0)
|
752
|
+
// workaround gcc bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87544
|
753
|
+
// It triggered eigen/Eigen/src/Core/util/Memory.h:189:12: warning: argument 1 value '18446744073709551612' exceeds maximum object size 9223372036854775807
|
754
|
+
if(size>=std::size_t((std::numeric_limits<std::ptrdiff_t>::max)()))
|
755
|
+
return 0;
|
756
|
+
else
|
757
|
+
#endif
|
758
|
+
return static_cast<pointer>( internal::aligned_malloc(size) );
|
759
|
+
}
|
760
|
+
|
761
|
+
void deallocate(pointer p, size_type /*num*/)
|
762
|
+
{
|
763
|
+
internal::aligned_free(p);
|
764
|
+
}
|
765
|
+
};
|
766
|
+
|
767
|
+
//---------- Cache sizes ----------
|
768
|
+
|
769
|
+
#if !defined(EIGEN_NO_CPUID)
|
770
|
+
# if EIGEN_COMP_GNUC && EIGEN_ARCH_i386_OR_x86_64
|
771
|
+
# if defined(__PIC__) && EIGEN_ARCH_i386
|
772
|
+
// Case for x86 with PIC
|
773
|
+
# define EIGEN_CPUID(abcd,func,id) \
|
774
|
+
__asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
|
775
|
+
# elif defined(__PIC__) && EIGEN_ARCH_x86_64
|
776
|
+
// Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model.
|
777
|
+
// However, we cannot detect which code model is used, and the xchg overhead is negligible anyway.
|
778
|
+
# define EIGEN_CPUID(abcd,func,id) \
|
779
|
+
__asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id));
|
780
|
+
# else
|
781
|
+
// Case for x86_64 or x86 w/o PIC
|
782
|
+
# define EIGEN_CPUID(abcd,func,id) \
|
783
|
+
__asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) );
|
784
|
+
# endif
|
785
|
+
# elif EIGEN_COMP_MSVC
|
786
|
+
# if (EIGEN_COMP_MSVC > 1500) && EIGEN_ARCH_i386_OR_x86_64
|
787
|
+
# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
|
788
|
+
# endif
|
789
|
+
# endif
|
790
|
+
#endif
|
791
|
+
|
792
|
+
namespace internal {
|
793
|
+
|
794
|
+
#ifdef EIGEN_CPUID
|
795
|
+
|
796
|
+
inline bool cpuid_is_vendor(int abcd[4], const int vendor[3])
|
797
|
+
{
|
798
|
+
return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2];
|
799
|
+
}
|
800
|
+
|
801
|
+
inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
|
802
|
+
{
|
803
|
+
int abcd[4];
|
804
|
+
l1 = l2 = l3 = 0;
|
805
|
+
int cache_id = 0;
|
806
|
+
int cache_type = 0;
|
807
|
+
do {
|
808
|
+
abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
|
809
|
+
EIGEN_CPUID(abcd,0x4,cache_id);
|
810
|
+
cache_type = (abcd[0] & 0x0F) >> 0;
|
811
|
+
if(cache_type==1||cache_type==3) // data or unified cache
|
812
|
+
{
|
813
|
+
int cache_level = (abcd[0] & 0xE0) >> 5; // A[7:5]
|
814
|
+
int ways = (abcd[1] & 0xFFC00000) >> 22; // B[31:22]
|
815
|
+
int partitions = (abcd[1] & 0x003FF000) >> 12; // B[21:12]
|
816
|
+
int line_size = (abcd[1] & 0x00000FFF) >> 0; // B[11:0]
|
817
|
+
int sets = (abcd[2]); // C[31:0]
|
818
|
+
|
819
|
+
int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1);
|
820
|
+
|
821
|
+
switch(cache_level)
|
822
|
+
{
|
823
|
+
case 1: l1 = cache_size; break;
|
824
|
+
case 2: l2 = cache_size; break;
|
825
|
+
case 3: l3 = cache_size; break;
|
826
|
+
default: break;
|
827
|
+
}
|
828
|
+
}
|
829
|
+
cache_id++;
|
830
|
+
} while(cache_type>0 && cache_id<16);
|
831
|
+
}
|
832
|
+
|
833
|
+
inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3)
|
834
|
+
{
|
835
|
+
int abcd[4];
|
836
|
+
abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
|
837
|
+
l1 = l2 = l3 = 0;
|
838
|
+
EIGEN_CPUID(abcd,0x00000002,0);
|
839
|
+
unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2;
|
840
|
+
bool check_for_p2_core2 = false;
|
841
|
+
for(int i=0; i<14; ++i)
|
842
|
+
{
|
843
|
+
switch(bytes[i])
|
844
|
+
{
|
845
|
+
case 0x0A: l1 = 8; break; // 0Ah data L1 cache, 8 KB, 2 ways, 32 byte lines
|
846
|
+
case 0x0C: l1 = 16; break; // 0Ch data L1 cache, 16 KB, 4 ways, 32 byte lines
|
847
|
+
case 0x0E: l1 = 24; break; // 0Eh data L1 cache, 24 KB, 6 ways, 64 byte lines
|
848
|
+
case 0x10: l1 = 16; break; // 10h data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
|
849
|
+
case 0x15: l1 = 16; break; // 15h code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
|
850
|
+
case 0x2C: l1 = 32; break; // 2Ch data L1 cache, 32 KB, 8 ways, 64 byte lines
|
851
|
+
case 0x30: l1 = 32; break; // 30h code L1 cache, 32 KB, 8 ways, 64 byte lines
|
852
|
+
case 0x60: l1 = 16; break; // 60h data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored
|
853
|
+
case 0x66: l1 = 8; break; // 66h data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored
|
854
|
+
case 0x67: l1 = 16; break; // 67h data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored
|
855
|
+
case 0x68: l1 = 32; break; // 68h data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored
|
856
|
+
case 0x1A: l2 = 96; break; // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64)
|
857
|
+
case 0x22: l3 = 512; break; // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored
|
858
|
+
case 0x23: l3 = 1024; break; // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
|
859
|
+
case 0x25: l3 = 2048; break; // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored
|
860
|
+
case 0x29: l3 = 4096; break; // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored
|
861
|
+
case 0x39: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored
|
862
|
+
case 0x3A: l2 = 192; break; // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored
|
863
|
+
case 0x3B: l2 = 128; break; // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored
|
864
|
+
case 0x3C: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored
|
865
|
+
case 0x3D: l2 = 384; break; // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored
|
866
|
+
case 0x3E: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored
|
867
|
+
case 0x40: l2 = 0; break; // no integrated L2 cache (P6 core) or L3 cache (P4 core)
|
868
|
+
case 0x41: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 32 byte lines
|
869
|
+
case 0x42: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 32 byte lines
|
870
|
+
case 0x43: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 32 byte lines
|
871
|
+
case 0x44: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines
|
872
|
+
case 0x45: l2 = 2048; break; // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines
|
873
|
+
case 0x46: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines
|
874
|
+
case 0x47: l3 = 8192; break; // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines
|
875
|
+
case 0x48: l2 = 3072; break; // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines
|
876
|
+
case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2
|
877
|
+
case 0x4A: l3 = 6144; break; // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines
|
878
|
+
case 0x4B: l3 = 8192; break; // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines
|
879
|
+
case 0x4C: l3 = 12288; break; // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines
|
880
|
+
case 0x4D: l3 = 16384; break; // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines
|
881
|
+
case 0x4E: l2 = 6144; break; // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines
|
882
|
+
case 0x78: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines
|
883
|
+
case 0x79: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored
|
884
|
+
case 0x7A: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored
|
885
|
+
case 0x7B: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored
|
886
|
+
case 0x7C: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
|
887
|
+
case 0x7D: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines
|
888
|
+
case 0x7E: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64)
|
889
|
+
case 0x7F: l2 = 512; break; // code and data L2 cache, 512 KB, 2 ways, 64 byte lines
|
890
|
+
case 0x80: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines
|
891
|
+
case 0x81: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 32 byte lines
|
892
|
+
case 0x82: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 32 byte lines
|
893
|
+
case 0x83: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 32 byte lines
|
894
|
+
case 0x84: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines
|
895
|
+
case 0x85: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines
|
896
|
+
case 0x86: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines
|
897
|
+
case 0x87: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines
|
898
|
+
case 0x88: l3 = 2048; break; // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64)
|
899
|
+
case 0x89: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64)
|
900
|
+
case 0x8A: l3 = 8192; break; // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64)
|
901
|
+
case 0x8D: l3 = 3072; break; // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64)
|
902
|
+
|
903
|
+
default: break;
|
904
|
+
}
|
905
|
+
}
|
906
|
+
if(check_for_p2_core2 && l2 == l3)
|
907
|
+
l3 = 0;
|
908
|
+
l1 *= 1024;
|
909
|
+
l2 *= 1024;
|
910
|
+
l3 *= 1024;
|
911
|
+
}
|
912
|
+
|
913
|
+
inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs)
|
914
|
+
{
|
915
|
+
if(max_std_funcs>=4)
|
916
|
+
queryCacheSizes_intel_direct(l1,l2,l3);
|
917
|
+
else
|
918
|
+
queryCacheSizes_intel_codes(l1,l2,l3);
|
919
|
+
}
|
920
|
+
|
921
|
+
inline void queryCacheSizes_amd(int& l1, int& l2, int& l3)
|
922
|
+
{
|
923
|
+
int abcd[4];
|
924
|
+
abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
|
925
|
+
EIGEN_CPUID(abcd,0x80000005,0);
|
926
|
+
l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB
|
927
|
+
abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
|
928
|
+
EIGEN_CPUID(abcd,0x80000006,0);
|
929
|
+
l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB
|
930
|
+
l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB
|
931
|
+
}
|
932
|
+
#endif
|
933
|
+
|
934
|
+
/** \internal
|
935
|
+
* Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */
|
936
|
+
inline void queryCacheSizes(int& l1, int& l2, int& l3)
|
937
|
+
{
|
938
|
+
#ifdef EIGEN_CPUID
|
939
|
+
int abcd[4];
|
940
|
+
const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
|
941
|
+
const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
|
942
|
+
const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"
|
943
|
+
|
944
|
+
// identify the CPU vendor
|
945
|
+
EIGEN_CPUID(abcd,0x0,0);
|
946
|
+
int max_std_funcs = abcd[1];
|
947
|
+
if(cpuid_is_vendor(abcd,GenuineIntel))
|
948
|
+
queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
|
949
|
+
else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
|
950
|
+
queryCacheSizes_amd(l1,l2,l3);
|
951
|
+
else
|
952
|
+
// by default let's use Intel's API
|
953
|
+
queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
|
954
|
+
|
955
|
+
// here is the list of other vendors:
|
956
|
+
// ||cpuid_is_vendor(abcd,"VIA VIA VIA ")
|
957
|
+
// ||cpuid_is_vendor(abcd,"CyrixInstead")
|
958
|
+
// ||cpuid_is_vendor(abcd,"CentaurHauls")
|
959
|
+
// ||cpuid_is_vendor(abcd,"GenuineTMx86")
|
960
|
+
// ||cpuid_is_vendor(abcd,"TransmetaCPU")
|
961
|
+
// ||cpuid_is_vendor(abcd,"RiseRiseRise")
|
962
|
+
// ||cpuid_is_vendor(abcd,"Geode by NSC")
|
963
|
+
// ||cpuid_is_vendor(abcd,"SiS SiS SiS ")
|
964
|
+
// ||cpuid_is_vendor(abcd,"UMC UMC UMC ")
|
965
|
+
// ||cpuid_is_vendor(abcd,"NexGenDriven")
|
966
|
+
#else
|
967
|
+
l1 = l2 = l3 = -1;
|
968
|
+
#endif
|
969
|
+
}
|
970
|
+
|
971
|
+
/** \internal
|
972
|
+
* \returns the size in Bytes of the L1 data cache */
|
973
|
+
inline int queryL1CacheSize()
|
974
|
+
{
|
975
|
+
int l1(-1), l2, l3;
|
976
|
+
queryCacheSizes(l1,l2,l3);
|
977
|
+
return l1;
|
978
|
+
}
|
979
|
+
|
980
|
+
/** \internal
|
981
|
+
* \returns the size in Bytes of the L2 or L3 cache if this later is present */
|
982
|
+
inline int queryTopLevelCacheSize()
|
983
|
+
{
|
984
|
+
int l1, l2(-1), l3(-1);
|
985
|
+
queryCacheSizes(l1,l2,l3);
|
986
|
+
return (std::max)(l2,l3);
|
987
|
+
}
|
988
|
+
|
989
|
+
} // end namespace internal
|
990
|
+
|
991
|
+
} // end namespace Eigen
|
992
|
+
|
993
|
+
#endif // EIGEN_MEMORY_H
|