tomoto 0.2.3 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/README.md +8 -10
- data/ext/tomoto/extconf.rb +6 -2
- data/ext/tomoto/{ext.cpp → tomoto.cpp} +1 -1
- data/lib/tomoto/version.rb +1 -1
- data/lib/tomoto.rb +5 -1
- data/vendor/EigenRand/EigenRand/Core.h +10 -10
- data/vendor/EigenRand/EigenRand/Dists/Basic.h +208 -9
- data/vendor/EigenRand/EigenRand/Dists/Discrete.h +52 -31
- data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +9 -8
- data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +28 -21
- data/vendor/EigenRand/EigenRand/EigenRand +11 -6
- data/vendor/EigenRand/EigenRand/Macro.h +13 -7
- data/vendor/EigenRand/EigenRand/MorePacketMath.h +348 -740
- data/vendor/EigenRand/EigenRand/MvDists/Multinomial.h +5 -3
- data/vendor/EigenRand/EigenRand/MvDists/MvNormal.h +9 -3
- data/vendor/EigenRand/EigenRand/PacketFilter.h +11 -253
- data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +21 -47
- data/vendor/EigenRand/EigenRand/RandUtils.h +50 -344
- data/vendor/EigenRand/EigenRand/arch/AVX/MorePacketMath.h +619 -0
- data/vendor/EigenRand/EigenRand/arch/AVX/PacketFilter.h +149 -0
- data/vendor/EigenRand/EigenRand/arch/AVX/RandUtils.h +228 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/MorePacketMath.h +473 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/PacketFilter.h +142 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/RandUtils.h +126 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/MorePacketMath.h +501 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/PacketFilter.h +133 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/RandUtils.h +120 -0
- data/vendor/EigenRand/EigenRand/doc.h +24 -12
- data/vendor/EigenRand/README.md +57 -4
- data/vendor/eigen/COPYING.APACHE +203 -0
- data/vendor/eigen/COPYING.BSD +1 -1
- data/vendor/eigen/COPYING.MINPACK +51 -52
- data/vendor/eigen/Eigen/Cholesky +0 -1
- data/vendor/eigen/Eigen/Core +112 -265
- data/vendor/eigen/Eigen/Eigenvalues +2 -3
- data/vendor/eigen/Eigen/Geometry +5 -8
- data/vendor/eigen/Eigen/Householder +0 -1
- data/vendor/eigen/Eigen/Jacobi +0 -1
- data/vendor/eigen/Eigen/KLUSupport +41 -0
- data/vendor/eigen/Eigen/LU +2 -5
- data/vendor/eigen/Eigen/OrderingMethods +0 -3
- data/vendor/eigen/Eigen/PaStiXSupport +1 -0
- data/vendor/eigen/Eigen/PardisoSupport +0 -0
- data/vendor/eigen/Eigen/QR +2 -3
- data/vendor/eigen/Eigen/QtAlignedMalloc +0 -1
- data/vendor/eigen/Eigen/SVD +0 -1
- data/vendor/eigen/Eigen/Sparse +0 -2
- data/vendor/eigen/Eigen/SparseCholesky +0 -8
- data/vendor/eigen/Eigen/SparseLU +4 -0
- data/vendor/eigen/Eigen/SparseQR +0 -1
- data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +42 -27
- data/vendor/eigen/Eigen/src/Cholesky/LLT.h +39 -23
- data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +90 -47
- data/vendor/eigen/Eigen/src/Core/ArithmeticSequence.h +413 -0
- data/vendor/eigen/Eigen/src/Core/Array.h +99 -11
- data/vendor/eigen/Eigen/src/Core/ArrayBase.h +3 -3
- data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +21 -21
- data/vendor/eigen/Eigen/src/Core/Assign.h +1 -1
- data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +125 -50
- data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +10 -10
- data/vendor/eigen/Eigen/src/Core/BandMatrix.h +16 -16
- data/vendor/eigen/Eigen/src/Core/Block.h +56 -60
- data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +29 -31
- data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +7 -3
- data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +325 -272
- data/vendor/eigen/Eigen/src/Core/CoreIterators.h +5 -0
- data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +21 -22
- data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +153 -18
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +6 -6
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +14 -10
- data/vendor/eigen/Eigen/src/Core/DenseBase.h +132 -42
- data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +25 -21
- data/vendor/eigen/Eigen/src/Core/DenseStorage.h +153 -71
- data/vendor/eigen/Eigen/src/Core/Diagonal.h +21 -23
- data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +50 -2
- data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +1 -1
- data/vendor/eigen/Eigen/src/Core/Dot.h +10 -10
- data/vendor/eigen/Eigen/src/Core/EigenBase.h +10 -9
- data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +8 -4
- data/vendor/eigen/Eigen/src/Core/Fuzzy.h +3 -3
- data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +20 -10
- data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +599 -152
- data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +40 -33
- data/vendor/eigen/Eigen/src/Core/IO.h +40 -7
- data/vendor/eigen/Eigen/src/Core/IndexedView.h +237 -0
- data/vendor/eigen/Eigen/src/Core/Inverse.h +9 -10
- data/vendor/eigen/Eigen/src/Core/Map.h +7 -7
- data/vendor/eigen/Eigen/src/Core/MapBase.h +10 -3
- data/vendor/eigen/Eigen/src/Core/MathFunctions.h +767 -125
- data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +118 -19
- data/vendor/eigen/Eigen/src/Core/Matrix.h +131 -25
- data/vendor/eigen/Eigen/src/Core/MatrixBase.h +21 -3
- data/vendor/eigen/Eigen/src/Core/NestByValue.h +25 -50
- data/vendor/eigen/Eigen/src/Core/NoAlias.h +4 -3
- data/vendor/eigen/Eigen/src/Core/NumTraits.h +107 -20
- data/vendor/eigen/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
- data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +3 -31
- data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +152 -59
- data/vendor/eigen/Eigen/src/Core/Product.h +30 -25
- data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +192 -125
- data/vendor/eigen/Eigen/src/Core/Random.h +37 -1
- data/vendor/eigen/Eigen/src/Core/Redux.h +180 -170
- data/vendor/eigen/Eigen/src/Core/Ref.h +121 -23
- data/vendor/eigen/Eigen/src/Core/Replicate.h +8 -8
- data/vendor/eigen/Eigen/src/Core/Reshaped.h +454 -0
- data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +7 -5
- data/vendor/eigen/Eigen/src/Core/Reverse.h +18 -12
- data/vendor/eigen/Eigen/src/Core/Select.h +8 -6
- data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +33 -20
- data/vendor/eigen/Eigen/src/Core/Solve.h +14 -14
- data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +16 -16
- data/vendor/eigen/Eigen/src/Core/SolverBase.h +41 -3
- data/vendor/eigen/Eigen/src/Core/StableNorm.h +100 -70
- data/vendor/eigen/Eigen/src/Core/StlIterators.h +463 -0
- data/vendor/eigen/Eigen/src/Core/Stride.h +9 -4
- data/vendor/eigen/Eigen/src/Core/Swap.h +5 -4
- data/vendor/eigen/Eigen/src/Core/Transpose.h +88 -27
- data/vendor/eigen/Eigen/src/Core/Transpositions.h +26 -47
- data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +93 -75
- data/vendor/eigen/Eigen/src/Core/VectorBlock.h +5 -5
- data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +159 -70
- data/vendor/eigen/Eigen/src/Core/Visitor.h +137 -29
- data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +50 -129
- data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +126 -337
- data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +1092 -155
- data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +65 -1
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +207 -236
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1482 -495
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +152 -165
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +19 -251
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +2042 -392
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +235 -80
- data/vendor/eigen/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +102 -14
- data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/Half.h +942 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +1 -1
- data/vendor/eigen/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
- data/vendor/eigen/Eigen/src/Core/arch/{CUDA → GPU}/MathFunctions.h +16 -4
- data/vendor/eigen/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
- data/vendor/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
- data/vendor/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/Complex.h +648 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +313 -219
- data/vendor/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +54 -70
- data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +4376 -549
- data/vendor/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +59 -179
- data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +65 -428
- data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +893 -283
- data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +65 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +212 -183
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +101 -5
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +510 -395
- data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +11 -2
- data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +112 -46
- data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +31 -30
- data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +32 -2
- data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +355 -16
- data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +1075 -586
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +49 -24
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +41 -35
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +6 -6
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +4 -2
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +382 -483
- data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +22 -5
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +53 -30
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +16 -8
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +8 -6
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +4 -4
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +5 -4
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +33 -27
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +14 -12
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +36 -34
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +8 -4
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +13 -10
- data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +304 -119
- data/vendor/eigen/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
- data/vendor/eigen/Eigen/src/Core/util/Constants.h +25 -9
- data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +26 -3
- data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +29 -9
- data/vendor/eigen/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
- data/vendor/eigen/Eigen/src/Core/util/IntegralConstant.h +272 -0
- data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +8 -1
- data/vendor/eigen/Eigen/src/Core/util/Macros.h +709 -246
- data/vendor/eigen/Eigen/src/Core/util/Memory.h +222 -52
- data/vendor/eigen/Eigen/src/Core/util/Meta.h +355 -77
- data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +5 -1
- data/vendor/eigen/Eigen/src/Core/util/ReshapedHelper.h +51 -0
- data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +8 -5
- data/vendor/eigen/Eigen/src/Core/util/SymbolicIndex.h +293 -0
- data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +65 -30
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +1 -1
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +7 -4
- data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +1 -1
- data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +9 -6
- data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +21 -9
- data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +77 -43
- data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +20 -15
- data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +99 -5
- data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +4 -4
- data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +3 -3
- data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +15 -11
- data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +1 -1
- data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +3 -2
- data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +39 -2
- data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +70 -14
- data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +3 -3
- data/vendor/eigen/Eigen/src/Geometry/Scaling.h +23 -5
- data/vendor/eigen/Eigen/src/Geometry/Transform.h +88 -67
- data/vendor/eigen/Eigen/src/Geometry/Translation.h +6 -12
- data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +1 -1
- data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
- data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +9 -2
- data/vendor/eigen/Eigen/src/Householder/Householder.h +8 -4
- data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +123 -48
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +15 -15
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +7 -23
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +5 -22
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +41 -47
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +51 -60
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +70 -20
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +2 -20
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +11 -9
- data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +31 -10
- data/vendor/eigen/Eigen/src/KLUSupport/KLUSupport.h +358 -0
- data/vendor/eigen/Eigen/src/LU/Determinant.h +35 -19
- data/vendor/eigen/Eigen/src/LU/FullPivLU.h +29 -43
- data/vendor/eigen/Eigen/src/LU/InverseImpl.h +25 -8
- data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +71 -58
- data/vendor/eigen/Eigen/src/LU/arch/InverseSize4.h +351 -0
- data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +7 -17
- data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +297 -277
- data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +6 -10
- data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +1 -1
- data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +9 -7
- data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +41 -20
- data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +100 -27
- data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +59 -22
- data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +48 -23
- data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +25 -3
- data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +183 -63
- data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +22 -14
- data/vendor/eigen/Eigen/src/SVD/SVDBase.h +83 -22
- data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +3 -3
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +17 -9
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +12 -37
- data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +3 -2
- data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +16 -0
- data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +6 -6
- data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +81 -27
- data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +25 -57
- data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +40 -11
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +11 -15
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +4 -2
- data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +30 -8
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +126 -11
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +5 -12
- data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +13 -1
- data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +7 -7
- data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +5 -2
- data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +8 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +1 -1
- data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +1 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +162 -12
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +1 -1
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +76 -2
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +2 -2
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +1 -1
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +1 -1
- data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +19 -6
- data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +2 -12
- data/vendor/eigen/Eigen/src/StlSupport/StdList.h +2 -2
- data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +2 -2
- data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +6 -8
- data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +175 -39
- data/vendor/eigen/Eigen/src/misc/lapacke.h +5 -4
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +28 -2
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +155 -11
- data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +626 -242
- data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +14 -0
- data/vendor/eigen/Eigen/src/plugins/IndexedViewMethods.h +262 -0
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +4 -4
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +10 -0
- data/vendor/eigen/Eigen/src/plugins/ReshapedMethods.h +149 -0
- data/vendor/eigen/README.md +2 -0
- data/vendor/eigen/bench/btl/README +1 -1
- data/vendor/eigen/bench/tensors/README +6 -7
- data/vendor/eigen/ci/README.md +56 -0
- data/vendor/eigen/demos/mix_eigen_and_c/README +1 -1
- data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +213 -158
- data/vendor/eigen/unsupported/README.txt +1 -1
- data/vendor/tomotopy/README.kr.rst +21 -0
- data/vendor/tomotopy/README.rst +20 -0
- data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +2 -2
- data/vendor/tomotopy/src/Labeling/Phraser.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/CTModel.hpp +2 -1
- data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +2 -1
- data/vendor/tomotopy/src/TopicModel/DTModel.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/HDP.h +1 -0
- data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +53 -2
- data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +16 -5
- data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/PT.h +3 -1
- data/vendor/tomotopy/src/TopicModel/PTModel.hpp +31 -1
- data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +7 -5
- data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +36 -1
- data/vendor/tomotopy/src/Utils/exception.h +6 -0
- data/vendor/tomotopy/src/Utils/sample.hpp +14 -12
- data/vendor/tomotopy/src/Utils/sse_gamma.h +0 -3
- metadata +60 -14
- data/vendor/eigen/Eigen/CMakeLists.txt +0 -19
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +0 -674
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +0 -333
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +0 -1124
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +0 -212
- data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +0 -161
- data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +0 -338
@@ -0,0 +1,752 @@
|
|
1
|
+
// This file is part of Eigen, a lightweight C++ template library
|
2
|
+
// for linear algebra.
|
3
|
+
//
|
4
|
+
// Copyright (C) 2020, Arm Limited and Contributors
|
5
|
+
//
|
6
|
+
// This Source Code Form is subject to the terms of the Mozilla
|
7
|
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
8
|
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
9
|
+
|
10
|
+
#ifndef EIGEN_PACKET_MATH_SVE_H
|
11
|
+
#define EIGEN_PACKET_MATH_SVE_H
|
12
|
+
|
13
|
+
namespace Eigen
|
14
|
+
{
|
15
|
+
namespace internal
|
16
|
+
{
|
17
|
+
#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
|
18
|
+
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
|
19
|
+
#endif
|
20
|
+
|
21
|
+
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
|
22
|
+
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
|
23
|
+
#endif
|
24
|
+
|
25
|
+
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
|
26
|
+
|
27
|
+
template <typename Scalar, int SVEVectorLength>
|
28
|
+
struct sve_packet_size_selector {
|
29
|
+
enum { size = SVEVectorLength / (sizeof(Scalar) * CHAR_BIT) };
|
30
|
+
};
|
31
|
+
|
32
|
+
/********************************* int32 **************************************/
|
33
|
+
typedef svint32_t PacketXi __attribute__((arm_sve_vector_bits(EIGEN_ARM64_SVE_VL)));
|
34
|
+
|
35
|
+
template <>
|
36
|
+
struct packet_traits<numext::int32_t> : default_packet_traits {
|
37
|
+
typedef PacketXi type;
|
38
|
+
typedef PacketXi half; // Half not implemented yet
|
39
|
+
enum {
|
40
|
+
Vectorizable = 1,
|
41
|
+
AlignedOnScalar = 1,
|
42
|
+
size = sve_packet_size_selector<numext::int32_t, EIGEN_ARM64_SVE_VL>::size,
|
43
|
+
HasHalfPacket = 0,
|
44
|
+
|
45
|
+
HasAdd = 1,
|
46
|
+
HasSub = 1,
|
47
|
+
HasShift = 1,
|
48
|
+
HasMul = 1,
|
49
|
+
HasNegate = 1,
|
50
|
+
HasAbs = 1,
|
51
|
+
HasArg = 0,
|
52
|
+
HasAbs2 = 1,
|
53
|
+
HasMin = 1,
|
54
|
+
HasMax = 1,
|
55
|
+
HasConj = 1,
|
56
|
+
HasSetLinear = 0,
|
57
|
+
HasBlend = 0,
|
58
|
+
HasReduxp = 0 // Not implemented in SVE
|
59
|
+
};
|
60
|
+
};
|
61
|
+
|
62
|
+
template <>
|
63
|
+
struct unpacket_traits<PacketXi> {
|
64
|
+
typedef numext::int32_t type;
|
65
|
+
typedef PacketXi half; // Half not yet implemented
|
66
|
+
enum {
|
67
|
+
size = sve_packet_size_selector<numext::int32_t, EIGEN_ARM64_SVE_VL>::size,
|
68
|
+
alignment = Aligned64,
|
69
|
+
vectorizable = true,
|
70
|
+
masked_load_available = false,
|
71
|
+
masked_store_available = false
|
72
|
+
};
|
73
|
+
};
|
74
|
+
|
75
|
+
template <>
|
76
|
+
EIGEN_STRONG_INLINE void prefetch<numext::int32_t>(const numext::int32_t* addr)
|
77
|
+
{
|
78
|
+
svprfw(svptrue_b32(), addr, SV_PLDL1KEEP);
|
79
|
+
}
|
80
|
+
|
81
|
+
template <>
|
82
|
+
EIGEN_STRONG_INLINE PacketXi pset1<PacketXi>(const numext::int32_t& from)
|
83
|
+
{
|
84
|
+
return svdup_n_s32(from);
|
85
|
+
}
|
86
|
+
|
87
|
+
template <>
|
88
|
+
EIGEN_STRONG_INLINE PacketXi plset<PacketXi>(const numext::int32_t& a)
|
89
|
+
{
|
90
|
+
numext::int32_t c[packet_traits<numext::int32_t>::size];
|
91
|
+
for (int i = 0; i < packet_traits<numext::int32_t>::size; i++) c[i] = i;
|
92
|
+
return svadd_s32_z(svptrue_b32(), pset1<PacketXi>(a), svld1_s32(svptrue_b32(), c));
|
93
|
+
}
|
94
|
+
|
95
|
+
template <>
|
96
|
+
EIGEN_STRONG_INLINE PacketXi padd<PacketXi>(const PacketXi& a, const PacketXi& b)
|
97
|
+
{
|
98
|
+
return svadd_s32_z(svptrue_b32(), a, b);
|
99
|
+
}
|
100
|
+
|
101
|
+
template <>
|
102
|
+
EIGEN_STRONG_INLINE PacketXi psub<PacketXi>(const PacketXi& a, const PacketXi& b)
|
103
|
+
{
|
104
|
+
return svsub_s32_z(svptrue_b32(), a, b);
|
105
|
+
}
|
106
|
+
|
107
|
+
template <>
|
108
|
+
EIGEN_STRONG_INLINE PacketXi pnegate(const PacketXi& a)
|
109
|
+
{
|
110
|
+
return svneg_s32_z(svptrue_b32(), a);
|
111
|
+
}
|
112
|
+
|
113
|
+
template <>
|
114
|
+
EIGEN_STRONG_INLINE PacketXi pconj(const PacketXi& a)
|
115
|
+
{
|
116
|
+
return a;
|
117
|
+
}
|
118
|
+
|
119
|
+
template <>
|
120
|
+
EIGEN_STRONG_INLINE PacketXi pmul<PacketXi>(const PacketXi& a, const PacketXi& b)
|
121
|
+
{
|
122
|
+
return svmul_s32_z(svptrue_b32(), a, b);
|
123
|
+
}
|
124
|
+
|
125
|
+
template <>
|
126
|
+
EIGEN_STRONG_INLINE PacketXi pdiv<PacketXi>(const PacketXi& a, const PacketXi& b)
|
127
|
+
{
|
128
|
+
return svdiv_s32_z(svptrue_b32(), a, b);
|
129
|
+
}
|
130
|
+
|
131
|
+
template <>
|
132
|
+
EIGEN_STRONG_INLINE PacketXi pmadd(const PacketXi& a, const PacketXi& b, const PacketXi& c)
|
133
|
+
{
|
134
|
+
return svmla_s32_z(svptrue_b32(), c, a, b);
|
135
|
+
}
|
136
|
+
|
137
|
+
template <>
|
138
|
+
EIGEN_STRONG_INLINE PacketXi pmin<PacketXi>(const PacketXi& a, const PacketXi& b)
|
139
|
+
{
|
140
|
+
return svmin_s32_z(svptrue_b32(), a, b);
|
141
|
+
}
|
142
|
+
|
143
|
+
template <>
|
144
|
+
EIGEN_STRONG_INLINE PacketXi pmax<PacketXi>(const PacketXi& a, const PacketXi& b)
|
145
|
+
{
|
146
|
+
return svmax_s32_z(svptrue_b32(), a, b);
|
147
|
+
}
|
148
|
+
|
149
|
+
template <>
|
150
|
+
EIGEN_STRONG_INLINE PacketXi pcmp_le<PacketXi>(const PacketXi& a, const PacketXi& b)
|
151
|
+
{
|
152
|
+
return svdup_n_s32_z(svcmplt_s32(svptrue_b32(), a, b), 0xffffffffu);
|
153
|
+
}
|
154
|
+
|
155
|
+
template <>
|
156
|
+
EIGEN_STRONG_INLINE PacketXi pcmp_lt<PacketXi>(const PacketXi& a, const PacketXi& b)
|
157
|
+
{
|
158
|
+
return svdup_n_s32_z(svcmplt_s32(svptrue_b32(), a, b), 0xffffffffu);
|
159
|
+
}
|
160
|
+
|
161
|
+
template <>
|
162
|
+
EIGEN_STRONG_INLINE PacketXi pcmp_eq<PacketXi>(const PacketXi& a, const PacketXi& b)
|
163
|
+
{
|
164
|
+
return svdup_n_s32_z(svcmpeq_s32(svptrue_b32(), a, b), 0xffffffffu);
|
165
|
+
}
|
166
|
+
|
167
|
+
template <>
|
168
|
+
EIGEN_STRONG_INLINE PacketXi ptrue<PacketXi>(const PacketXi& /*a*/)
|
169
|
+
{
|
170
|
+
return svdup_n_s32_z(svptrue_b32(), 0xffffffffu);
|
171
|
+
}
|
172
|
+
|
173
|
+
template <>
|
174
|
+
EIGEN_STRONG_INLINE PacketXi pzero<PacketXi>(const PacketXi& /*a*/)
|
175
|
+
{
|
176
|
+
return svdup_n_s32_z(svptrue_b32(), 0);
|
177
|
+
}
|
178
|
+
|
179
|
+
template <>
|
180
|
+
EIGEN_STRONG_INLINE PacketXi pand<PacketXi>(const PacketXi& a, const PacketXi& b)
|
181
|
+
{
|
182
|
+
return svand_s32_z(svptrue_b32(), a, b);
|
183
|
+
}
|
184
|
+
|
185
|
+
template <>
|
186
|
+
EIGEN_STRONG_INLINE PacketXi por<PacketXi>(const PacketXi& a, const PacketXi& b)
|
187
|
+
{
|
188
|
+
return svorr_s32_z(svptrue_b32(), a, b);
|
189
|
+
}
|
190
|
+
|
191
|
+
template <>
|
192
|
+
EIGEN_STRONG_INLINE PacketXi pxor<PacketXi>(const PacketXi& a, const PacketXi& b)
|
193
|
+
{
|
194
|
+
return sveor_s32_z(svptrue_b32(), a, b);
|
195
|
+
}
|
196
|
+
|
197
|
+
template <>
|
198
|
+
EIGEN_STRONG_INLINE PacketXi pandnot<PacketXi>(const PacketXi& a, const PacketXi& b)
|
199
|
+
{
|
200
|
+
return svbic_s32_z(svptrue_b32(), a, b);
|
201
|
+
}
|
202
|
+
|
203
|
+
template <int N>
|
204
|
+
EIGEN_STRONG_INLINE PacketXi parithmetic_shift_right(PacketXi a)
|
205
|
+
{
|
206
|
+
return svasrd_n_s32_z(svptrue_b32(), a, N);
|
207
|
+
}
|
208
|
+
|
209
|
+
template <int N>
|
210
|
+
EIGEN_STRONG_INLINE PacketXi plogical_shift_right(PacketXi a)
|
211
|
+
{
|
212
|
+
return svreinterpret_s32_u32(svlsr_u32_z(svptrue_b32(), svreinterpret_u32_s32(a), svdup_n_u32_z(svptrue_b32(), N)));
|
213
|
+
}
|
214
|
+
|
215
|
+
template <int N>
|
216
|
+
EIGEN_STRONG_INLINE PacketXi plogical_shift_left(PacketXi a)
|
217
|
+
{
|
218
|
+
return svlsl_s32_z(svptrue_b32(), a, svdup_n_u32_z(svptrue_b32(), N));
|
219
|
+
}
|
220
|
+
|
221
|
+
template <>
|
222
|
+
EIGEN_STRONG_INLINE PacketXi pload<PacketXi>(const numext::int32_t* from)
|
223
|
+
{
|
224
|
+
EIGEN_DEBUG_ALIGNED_LOAD return svld1_s32(svptrue_b32(), from);
|
225
|
+
}
|
226
|
+
|
227
|
+
template <>
|
228
|
+
EIGEN_STRONG_INLINE PacketXi ploadu<PacketXi>(const numext::int32_t* from)
|
229
|
+
{
|
230
|
+
EIGEN_DEBUG_UNALIGNED_LOAD return svld1_s32(svptrue_b32(), from);
|
231
|
+
}
|
232
|
+
|
233
|
+
template <>
|
234
|
+
EIGEN_STRONG_INLINE PacketXi ploaddup<PacketXi>(const numext::int32_t* from)
|
235
|
+
{
|
236
|
+
svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
|
237
|
+
indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
|
238
|
+
return svld1_gather_u32index_s32(svptrue_b32(), from, indices);
|
239
|
+
}
|
240
|
+
|
241
|
+
template <>
|
242
|
+
EIGEN_STRONG_INLINE PacketXi ploadquad<PacketXi>(const numext::int32_t* from)
|
243
|
+
{
|
244
|
+
svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
|
245
|
+
indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
|
246
|
+
indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a0, a0, a1, a1, a1, a1, ...}
|
247
|
+
return svld1_gather_u32index_s32(svptrue_b32(), from, indices);
|
248
|
+
}
|
249
|
+
|
250
|
+
template <>
|
251
|
+
EIGEN_STRONG_INLINE void pstore<numext::int32_t>(numext::int32_t* to, const PacketXi& from)
|
252
|
+
{
|
253
|
+
EIGEN_DEBUG_ALIGNED_STORE svst1_s32(svptrue_b32(), to, from);
|
254
|
+
}
|
255
|
+
|
256
|
+
template <>
|
257
|
+
EIGEN_STRONG_INLINE void pstoreu<numext::int32_t>(numext::int32_t* to, const PacketXi& from)
|
258
|
+
{
|
259
|
+
EIGEN_DEBUG_UNALIGNED_STORE svst1_s32(svptrue_b32(), to, from);
|
260
|
+
}
|
261
|
+
|
262
|
+
template <>
|
263
|
+
EIGEN_DEVICE_FUNC inline PacketXi pgather<numext::int32_t, PacketXi>(const numext::int32_t* from, Index stride)
|
264
|
+
{
|
265
|
+
// Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
|
266
|
+
svint32_t indices = svindex_s32(0, stride);
|
267
|
+
return svld1_gather_s32index_s32(svptrue_b32(), from, indices);
|
268
|
+
}
|
269
|
+
|
270
|
+
template <>
|
271
|
+
EIGEN_DEVICE_FUNC inline void pscatter<numext::int32_t, PacketXi>(numext::int32_t* to, const PacketXi& from, Index stride)
|
272
|
+
{
|
273
|
+
// Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
|
274
|
+
svint32_t indices = svindex_s32(0, stride);
|
275
|
+
svst1_scatter_s32index_s32(svptrue_b32(), to, indices, from);
|
276
|
+
}
|
277
|
+
|
278
|
+
template <>
|
279
|
+
EIGEN_STRONG_INLINE numext::int32_t pfirst<PacketXi>(const PacketXi& a)
|
280
|
+
{
|
281
|
+
// svlasta returns the first element if all predicate bits are 0
|
282
|
+
return svlasta_s32(svpfalse_b(), a);
|
283
|
+
}
|
284
|
+
|
285
|
+
template <>
|
286
|
+
EIGEN_STRONG_INLINE PacketXi preverse(const PacketXi& a)
|
287
|
+
{
|
288
|
+
return svrev_s32(a);
|
289
|
+
}
|
290
|
+
|
291
|
+
template <>
|
292
|
+
EIGEN_STRONG_INLINE PacketXi pabs(const PacketXi& a)
|
293
|
+
{
|
294
|
+
return svabs_s32_z(svptrue_b32(), a);
|
295
|
+
}
|
296
|
+
|
297
|
+
template <>
|
298
|
+
EIGEN_STRONG_INLINE numext::int32_t predux<PacketXi>(const PacketXi& a)
|
299
|
+
{
|
300
|
+
return static_cast<numext::int32_t>(svaddv_s32(svptrue_b32(), a));
|
301
|
+
}
|
302
|
+
|
303
|
+
template <>
|
304
|
+
EIGEN_STRONG_INLINE numext::int32_t predux_mul<PacketXi>(const PacketXi& a)
|
305
|
+
{
|
306
|
+
EIGEN_STATIC_ASSERT((EIGEN_ARM64_SVE_VL % 128 == 0),
|
307
|
+
EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT);
|
308
|
+
|
309
|
+
// Multiply the vector by its reverse
|
310
|
+
svint32_t prod = svmul_s32_z(svptrue_b32(), a, svrev_s32(a));
|
311
|
+
svint32_t half_prod;
|
312
|
+
|
313
|
+
// Extract the high half of the vector. Depending on the VL more reductions need to be done
|
314
|
+
if (EIGEN_ARM64_SVE_VL >= 2048) {
|
315
|
+
half_prod = svtbl_s32(prod, svindex_u32(32, 1));
|
316
|
+
prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
|
317
|
+
}
|
318
|
+
if (EIGEN_ARM64_SVE_VL >= 1024) {
|
319
|
+
half_prod = svtbl_s32(prod, svindex_u32(16, 1));
|
320
|
+
prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
|
321
|
+
}
|
322
|
+
if (EIGEN_ARM64_SVE_VL >= 512) {
|
323
|
+
half_prod = svtbl_s32(prod, svindex_u32(8, 1));
|
324
|
+
prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
|
325
|
+
}
|
326
|
+
if (EIGEN_ARM64_SVE_VL >= 256) {
|
327
|
+
half_prod = svtbl_s32(prod, svindex_u32(4, 1));
|
328
|
+
prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
|
329
|
+
}
|
330
|
+
// Last reduction
|
331
|
+
half_prod = svtbl_s32(prod, svindex_u32(2, 1));
|
332
|
+
prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
|
333
|
+
|
334
|
+
// The reduction is done to the first element.
|
335
|
+
return pfirst<PacketXi>(prod);
|
336
|
+
}
|
337
|
+
|
338
|
+
template <>
|
339
|
+
EIGEN_STRONG_INLINE numext::int32_t predux_min<PacketXi>(const PacketXi& a)
|
340
|
+
{
|
341
|
+
return svminv_s32(svptrue_b32(), a);
|
342
|
+
}
|
343
|
+
|
344
|
+
template <>
|
345
|
+
EIGEN_STRONG_INLINE numext::int32_t predux_max<PacketXi>(const PacketXi& a)
|
346
|
+
{
|
347
|
+
return svmaxv_s32(svptrue_b32(), a);
|
348
|
+
}
|
349
|
+
|
350
|
+
template <int N>
|
351
|
+
EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<PacketXi, N>& kernel) {
|
352
|
+
int buffer[packet_traits<numext::int32_t>::size * N] = {0};
|
353
|
+
int i = 0;
|
354
|
+
|
355
|
+
PacketXi stride_index = svindex_s32(0, N);
|
356
|
+
|
357
|
+
for (i = 0; i < N; i++) {
|
358
|
+
svst1_scatter_s32index_s32(svptrue_b32(), buffer + i, stride_index, kernel.packet[i]);
|
359
|
+
}
|
360
|
+
for (i = 0; i < N; i++) {
|
361
|
+
kernel.packet[i] = svld1_s32(svptrue_b32(), buffer + i * packet_traits<numext::int32_t>::size);
|
362
|
+
}
|
363
|
+
}
|
364
|
+
|
365
|
+
/********************************* float32 ************************************/
|
366
|
+
|
367
|
+
typedef svfloat32_t PacketXf __attribute__((arm_sve_vector_bits(EIGEN_ARM64_SVE_VL)));
|
368
|
+
|
369
|
+
template <>
|
370
|
+
struct packet_traits<float> : default_packet_traits {
|
371
|
+
typedef PacketXf type;
|
372
|
+
typedef PacketXf half;
|
373
|
+
|
374
|
+
enum {
|
375
|
+
Vectorizable = 1,
|
376
|
+
AlignedOnScalar = 1,
|
377
|
+
size = sve_packet_size_selector<float, EIGEN_ARM64_SVE_VL>::size,
|
378
|
+
HasHalfPacket = 0,
|
379
|
+
|
380
|
+
HasAdd = 1,
|
381
|
+
HasSub = 1,
|
382
|
+
HasShift = 1,
|
383
|
+
HasMul = 1,
|
384
|
+
HasNegate = 1,
|
385
|
+
HasAbs = 1,
|
386
|
+
HasArg = 0,
|
387
|
+
HasAbs2 = 1,
|
388
|
+
HasMin = 1,
|
389
|
+
HasMax = 1,
|
390
|
+
HasConj = 1,
|
391
|
+
HasSetLinear = 0,
|
392
|
+
HasBlend = 0,
|
393
|
+
HasReduxp = 0, // Not implemented in SVE
|
394
|
+
|
395
|
+
HasDiv = 1,
|
396
|
+
HasFloor = 1,
|
397
|
+
|
398
|
+
HasSin = EIGEN_FAST_MATH,
|
399
|
+
HasCos = EIGEN_FAST_MATH,
|
400
|
+
HasLog = 1,
|
401
|
+
HasExp = 1,
|
402
|
+
HasSqrt = 0,
|
403
|
+
HasTanh = EIGEN_FAST_MATH,
|
404
|
+
HasErf = EIGEN_FAST_MATH
|
405
|
+
};
|
406
|
+
};
|
407
|
+
|
408
|
+
template <>
|
409
|
+
struct unpacket_traits<PacketXf> {
|
410
|
+
typedef float type;
|
411
|
+
typedef PacketXf half; // Half not yet implemented
|
412
|
+
typedef PacketXi integer_packet;
|
413
|
+
|
414
|
+
enum {
|
415
|
+
size = sve_packet_size_selector<float, EIGEN_ARM64_SVE_VL>::size,
|
416
|
+
alignment = Aligned64,
|
417
|
+
vectorizable = true,
|
418
|
+
masked_load_available = false,
|
419
|
+
masked_store_available = false
|
420
|
+
};
|
421
|
+
};
|
422
|
+
|
423
|
+
template <>
|
424
|
+
EIGEN_STRONG_INLINE PacketXf pset1<PacketXf>(const float& from)
|
425
|
+
{
|
426
|
+
return svdup_n_f32(from);
|
427
|
+
}
|
428
|
+
|
429
|
+
template <>
|
430
|
+
EIGEN_STRONG_INLINE PacketXf pset1frombits<PacketXf>(numext::uint32_t from)
|
431
|
+
{
|
432
|
+
return svreinterpret_f32_u32(svdup_n_u32_z(svptrue_b32(), from));
|
433
|
+
}
|
434
|
+
|
435
|
+
template <>
|
436
|
+
EIGEN_STRONG_INLINE PacketXf plset<PacketXf>(const float& a)
|
437
|
+
{
|
438
|
+
float c[packet_traits<float>::size];
|
439
|
+
for (int i = 0; i < packet_traits<float>::size; i++) c[i] = i;
|
440
|
+
return svadd_f32_z(svptrue_b32(), pset1<PacketXf>(a), svld1_f32(svptrue_b32(), c));
|
441
|
+
}
|
442
|
+
|
443
|
+
template <>
|
444
|
+
EIGEN_STRONG_INLINE PacketXf padd<PacketXf>(const PacketXf& a, const PacketXf& b)
|
445
|
+
{
|
446
|
+
return svadd_f32_z(svptrue_b32(), a, b);
|
447
|
+
}
|
448
|
+
|
449
|
+
template <>
|
450
|
+
EIGEN_STRONG_INLINE PacketXf psub<PacketXf>(const PacketXf& a, const PacketXf& b)
|
451
|
+
{
|
452
|
+
return svsub_f32_z(svptrue_b32(), a, b);
|
453
|
+
}
|
454
|
+
|
455
|
+
template <>
|
456
|
+
EIGEN_STRONG_INLINE PacketXf pnegate(const PacketXf& a)
|
457
|
+
{
|
458
|
+
return svneg_f32_z(svptrue_b32(), a);
|
459
|
+
}
|
460
|
+
|
461
|
+
template <>
|
462
|
+
EIGEN_STRONG_INLINE PacketXf pconj(const PacketXf& a)
|
463
|
+
{
|
464
|
+
return a;
|
465
|
+
}
|
466
|
+
|
467
|
+
template <>
|
468
|
+
EIGEN_STRONG_INLINE PacketXf pmul<PacketXf>(const PacketXf& a, const PacketXf& b)
|
469
|
+
{
|
470
|
+
return svmul_f32_z(svptrue_b32(), a, b);
|
471
|
+
}
|
472
|
+
|
473
|
+
template <>
|
474
|
+
EIGEN_STRONG_INLINE PacketXf pdiv<PacketXf>(const PacketXf& a, const PacketXf& b)
|
475
|
+
{
|
476
|
+
return svdiv_f32_z(svptrue_b32(), a, b);
|
477
|
+
}
|
478
|
+
|
479
|
+
template <>
|
480
|
+
EIGEN_STRONG_INLINE PacketXf pmadd(const PacketXf& a, const PacketXf& b, const PacketXf& c)
|
481
|
+
{
|
482
|
+
return svmla_f32_z(svptrue_b32(), c, a, b);
|
483
|
+
}
|
484
|
+
|
485
|
+
template <>
|
486
|
+
EIGEN_STRONG_INLINE PacketXf pmin<PacketXf>(const PacketXf& a, const PacketXf& b)
|
487
|
+
{
|
488
|
+
return svmin_f32_z(svptrue_b32(), a, b);
|
489
|
+
}
|
490
|
+
|
491
|
+
template <>
|
492
|
+
EIGEN_STRONG_INLINE PacketXf pmin<PropagateNaN, PacketXf>(const PacketXf& a, const PacketXf& b)
|
493
|
+
{
|
494
|
+
return pmin<PacketXf>(a, b);
|
495
|
+
}
|
496
|
+
|
497
|
+
template <>
|
498
|
+
EIGEN_STRONG_INLINE PacketXf pmin<PropagateNumbers, PacketXf>(const PacketXf& a, const PacketXf& b)
|
499
|
+
{
|
500
|
+
return svminnm_f32_z(svptrue_b32(), a, b);
|
501
|
+
}
|
502
|
+
|
503
|
+
template <>
|
504
|
+
EIGEN_STRONG_INLINE PacketXf pmax<PacketXf>(const PacketXf& a, const PacketXf& b)
|
505
|
+
{
|
506
|
+
return svmax_f32_z(svptrue_b32(), a, b);
|
507
|
+
}
|
508
|
+
|
509
|
+
template <>
|
510
|
+
EIGEN_STRONG_INLINE PacketXf pmax<PropagateNaN, PacketXf>(const PacketXf& a, const PacketXf& b)
|
511
|
+
{
|
512
|
+
return pmax<PacketXf>(a, b);
|
513
|
+
}
|
514
|
+
|
515
|
+
template <>
|
516
|
+
EIGEN_STRONG_INLINE PacketXf pmax<PropagateNumbers, PacketXf>(const PacketXf& a, const PacketXf& b)
|
517
|
+
{
|
518
|
+
return svmaxnm_f32_z(svptrue_b32(), a, b);
|
519
|
+
}
|
520
|
+
|
521
|
+
// Float comparisons in SVE return svbool (predicate). Use svdup to set active
|
522
|
+
// lanes to 1 (0xffffffffu) and inactive lanes to 0.
|
523
|
+
template <>
|
524
|
+
EIGEN_STRONG_INLINE PacketXf pcmp_le<PacketXf>(const PacketXf& a, const PacketXf& b)
|
525
|
+
{
|
526
|
+
return svreinterpret_f32_u32(svdup_n_u32_z(svcmplt_f32(svptrue_b32(), a, b), 0xffffffffu));
|
527
|
+
}
|
528
|
+
|
529
|
+
template <>
|
530
|
+
EIGEN_STRONG_INLINE PacketXf pcmp_lt<PacketXf>(const PacketXf& a, const PacketXf& b)
|
531
|
+
{
|
532
|
+
return svreinterpret_f32_u32(svdup_n_u32_z(svcmplt_f32(svptrue_b32(), a, b), 0xffffffffu));
|
533
|
+
}
|
534
|
+
|
535
|
+
template <>
|
536
|
+
EIGEN_STRONG_INLINE PacketXf pcmp_eq<PacketXf>(const PacketXf& a, const PacketXf& b)
|
537
|
+
{
|
538
|
+
return svreinterpret_f32_u32(svdup_n_u32_z(svcmpeq_f32(svptrue_b32(), a, b), 0xffffffffu));
|
539
|
+
}
|
540
|
+
|
541
|
+
// Do a predicate inverse (svnot_b_z) on the predicate resulted from the
|
542
|
+
// greater/equal comparison (svcmpge_f32). Then fill a float vector with the
|
543
|
+
// active elements.
|
544
|
+
template <>
|
545
|
+
EIGEN_STRONG_INLINE PacketXf pcmp_lt_or_nan<PacketXf>(const PacketXf& a, const PacketXf& b)
|
546
|
+
{
|
547
|
+
return svreinterpret_f32_u32(svdup_n_u32_z(svnot_b_z(svptrue_b32(), svcmpge_f32(svptrue_b32(), a, b)), 0xffffffffu));
|
548
|
+
}
|
549
|
+
|
550
|
+
template <>
|
551
|
+
EIGEN_STRONG_INLINE PacketXf pfloor<PacketXf>(const PacketXf& a)
|
552
|
+
{
|
553
|
+
return svrintm_f32_z(svptrue_b32(), a);
|
554
|
+
}
|
555
|
+
|
556
|
+
template <>
|
557
|
+
EIGEN_STRONG_INLINE PacketXf ptrue<PacketXf>(const PacketXf& /*a*/)
|
558
|
+
{
|
559
|
+
return svreinterpret_f32_u32(svdup_n_u32_z(svptrue_b32(), 0xffffffffu));
|
560
|
+
}
|
561
|
+
|
562
|
+
// Logical Operations are not supported for float, so reinterpret casts
|
563
|
+
template <>
|
564
|
+
EIGEN_STRONG_INLINE PacketXf pand<PacketXf>(const PacketXf& a, const PacketXf& b)
|
565
|
+
{
|
566
|
+
return svreinterpret_f32_u32(svand_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
|
567
|
+
}
|
568
|
+
|
569
|
+
template <>
|
570
|
+
EIGEN_STRONG_INLINE PacketXf por<PacketXf>(const PacketXf& a, const PacketXf& b)
|
571
|
+
{
|
572
|
+
return svreinterpret_f32_u32(svorr_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
|
573
|
+
}
|
574
|
+
|
575
|
+
template <>
|
576
|
+
EIGEN_STRONG_INLINE PacketXf pxor<PacketXf>(const PacketXf& a, const PacketXf& b)
|
577
|
+
{
|
578
|
+
return svreinterpret_f32_u32(sveor_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
|
579
|
+
}
|
580
|
+
|
581
|
+
template <>
|
582
|
+
EIGEN_STRONG_INLINE PacketXf pandnot<PacketXf>(const PacketXf& a, const PacketXf& b)
|
583
|
+
{
|
584
|
+
return svreinterpret_f32_u32(svbic_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
|
585
|
+
}
|
586
|
+
|
587
|
+
template <>
|
588
|
+
EIGEN_STRONG_INLINE PacketXf pload<PacketXf>(const float* from)
|
589
|
+
{
|
590
|
+
EIGEN_DEBUG_ALIGNED_LOAD return svld1_f32(svptrue_b32(), from);
|
591
|
+
}
|
592
|
+
|
593
|
+
template <>
|
594
|
+
EIGEN_STRONG_INLINE PacketXf ploadu<PacketXf>(const float* from)
|
595
|
+
{
|
596
|
+
EIGEN_DEBUG_UNALIGNED_LOAD return svld1_f32(svptrue_b32(), from);
|
597
|
+
}
|
598
|
+
|
599
|
+
template <>
|
600
|
+
EIGEN_STRONG_INLINE PacketXf ploaddup<PacketXf>(const float* from)
|
601
|
+
{
|
602
|
+
svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
|
603
|
+
indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
|
604
|
+
return svld1_gather_u32index_f32(svptrue_b32(), from, indices);
|
605
|
+
}
|
606
|
+
|
607
|
+
template <>
|
608
|
+
EIGEN_STRONG_INLINE PacketXf ploadquad<PacketXf>(const float* from)
|
609
|
+
{
|
610
|
+
svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
|
611
|
+
indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
|
612
|
+
indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a0, a0, a1, a1, a1, a1, ...}
|
613
|
+
return svld1_gather_u32index_f32(svptrue_b32(), from, indices);
|
614
|
+
}
|
615
|
+
|
616
|
+
template <>
|
617
|
+
EIGEN_STRONG_INLINE void pstore<float>(float* to, const PacketXf& from)
|
618
|
+
{
|
619
|
+
EIGEN_DEBUG_ALIGNED_STORE svst1_f32(svptrue_b32(), to, from);
|
620
|
+
}
|
621
|
+
|
622
|
+
template <>
|
623
|
+
EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const PacketXf& from)
|
624
|
+
{
|
625
|
+
EIGEN_DEBUG_UNALIGNED_STORE svst1_f32(svptrue_b32(), to, from);
|
626
|
+
}
|
627
|
+
|
628
|
+
template <>
|
629
|
+
EIGEN_DEVICE_FUNC inline PacketXf pgather<float, PacketXf>(const float* from, Index stride)
|
630
|
+
{
|
631
|
+
// Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
|
632
|
+
svint32_t indices = svindex_s32(0, stride);
|
633
|
+
return svld1_gather_s32index_f32(svptrue_b32(), from, indices);
|
634
|
+
}
|
635
|
+
|
636
|
+
template <>
|
637
|
+
EIGEN_DEVICE_FUNC inline void pscatter<float, PacketXf>(float* to, const PacketXf& from, Index stride)
|
638
|
+
{
|
639
|
+
// Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
|
640
|
+
svint32_t indices = svindex_s32(0, stride);
|
641
|
+
svst1_scatter_s32index_f32(svptrue_b32(), to, indices, from);
|
642
|
+
}
|
643
|
+
|
644
|
+
template <>
|
645
|
+
EIGEN_STRONG_INLINE float pfirst<PacketXf>(const PacketXf& a)
|
646
|
+
{
|
647
|
+
// svlasta returns the first element if all predicate bits are 0
|
648
|
+
return svlasta_f32(svpfalse_b(), a);
|
649
|
+
}
|
650
|
+
|
651
|
+
template <>
|
652
|
+
EIGEN_STRONG_INLINE PacketXf preverse(const PacketXf& a)
|
653
|
+
{
|
654
|
+
return svrev_f32(a);
|
655
|
+
}
|
656
|
+
|
657
|
+
template <>
|
658
|
+
EIGEN_STRONG_INLINE PacketXf pabs(const PacketXf& a)
|
659
|
+
{
|
660
|
+
return svabs_f32_z(svptrue_b32(), a);
|
661
|
+
}
|
662
|
+
|
663
|
+
// TODO(tellenbach): Should this go into MathFunctions.h? If so, change for
|
664
|
+
// all vector extensions and the generic version.
|
665
|
+
template <>
|
666
|
+
EIGEN_STRONG_INLINE PacketXf pfrexp<PacketXf>(const PacketXf& a, PacketXf& exponent)
|
667
|
+
{
|
668
|
+
return pfrexp_generic(a, exponent);
|
669
|
+
}
|
670
|
+
|
671
|
+
template <>
|
672
|
+
EIGEN_STRONG_INLINE float predux<PacketXf>(const PacketXf& a)
|
673
|
+
{
|
674
|
+
return svaddv_f32(svptrue_b32(), a);
|
675
|
+
}
|
676
|
+
|
677
|
+
// Other reduction functions:
|
678
|
+
// mul
|
679
|
+
// Only works for SVE Vls multiple of 128
|
680
|
+
template <>
|
681
|
+
EIGEN_STRONG_INLINE float predux_mul<PacketXf>(const PacketXf& a)
|
682
|
+
{
|
683
|
+
EIGEN_STATIC_ASSERT((EIGEN_ARM64_SVE_VL % 128 == 0),
|
684
|
+
EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT);
|
685
|
+
// Multiply the vector by its reverse
|
686
|
+
svfloat32_t prod = svmul_f32_z(svptrue_b32(), a, svrev_f32(a));
|
687
|
+
svfloat32_t half_prod;
|
688
|
+
|
689
|
+
// Extract the high half of the vector. Depending on the VL more reductions need to be done
|
690
|
+
if (EIGEN_ARM64_SVE_VL >= 2048) {
|
691
|
+
half_prod = svtbl_f32(prod, svindex_u32(32, 1));
|
692
|
+
prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
|
693
|
+
}
|
694
|
+
if (EIGEN_ARM64_SVE_VL >= 1024) {
|
695
|
+
half_prod = svtbl_f32(prod, svindex_u32(16, 1));
|
696
|
+
prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
|
697
|
+
}
|
698
|
+
if (EIGEN_ARM64_SVE_VL >= 512) {
|
699
|
+
half_prod = svtbl_f32(prod, svindex_u32(8, 1));
|
700
|
+
prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
|
701
|
+
}
|
702
|
+
if (EIGEN_ARM64_SVE_VL >= 256) {
|
703
|
+
half_prod = svtbl_f32(prod, svindex_u32(4, 1));
|
704
|
+
prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
|
705
|
+
}
|
706
|
+
// Last reduction
|
707
|
+
half_prod = svtbl_f32(prod, svindex_u32(2, 1));
|
708
|
+
prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
|
709
|
+
|
710
|
+
// The reduction is done to the first element.
|
711
|
+
return pfirst<PacketXf>(prod);
|
712
|
+
}
|
713
|
+
|
714
|
+
template <>
|
715
|
+
EIGEN_STRONG_INLINE float predux_min<PacketXf>(const PacketXf& a)
|
716
|
+
{
|
717
|
+
return svminv_f32(svptrue_b32(), a);
|
718
|
+
}
|
719
|
+
|
720
|
+
template <>
|
721
|
+
EIGEN_STRONG_INLINE float predux_max<PacketXf>(const PacketXf& a)
|
722
|
+
{
|
723
|
+
return svmaxv_f32(svptrue_b32(), a);
|
724
|
+
}
|
725
|
+
|
726
|
+
template<int N>
|
727
|
+
EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<PacketXf, N>& kernel)
|
728
|
+
{
|
729
|
+
float buffer[packet_traits<float>::size * N] = {0};
|
730
|
+
int i = 0;
|
731
|
+
|
732
|
+
PacketXi stride_index = svindex_s32(0, N);
|
733
|
+
|
734
|
+
for (i = 0; i < N; i++) {
|
735
|
+
svst1_scatter_s32index_f32(svptrue_b32(), buffer + i, stride_index, kernel.packet[i]);
|
736
|
+
}
|
737
|
+
|
738
|
+
for (i = 0; i < N; i++) {
|
739
|
+
kernel.packet[i] = svld1_f32(svptrue_b32(), buffer + i * packet_traits<float>::size);
|
740
|
+
}
|
741
|
+
}
|
742
|
+
|
743
|
+
template<>
|
744
|
+
EIGEN_STRONG_INLINE PacketXf pldexp<PacketXf>(const PacketXf& a, const PacketXf& exponent)
|
745
|
+
{
|
746
|
+
return pldexp_generic(a, exponent);
|
747
|
+
}
|
748
|
+
|
749
|
+
} // namespace internal
|
750
|
+
} // namespace Eigen
|
751
|
+
|
752
|
+
#endif // EIGEN_PACKET_MATH_SVE_H
|