tomoto 0.2.3 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/README.md +8 -10
- data/ext/tomoto/extconf.rb +6 -2
- data/ext/tomoto/{ext.cpp → tomoto.cpp} +1 -1
- data/lib/tomoto/version.rb +1 -1
- data/lib/tomoto.rb +5 -1
- data/vendor/EigenRand/EigenRand/Core.h +10 -10
- data/vendor/EigenRand/EigenRand/Dists/Basic.h +208 -9
- data/vendor/EigenRand/EigenRand/Dists/Discrete.h +52 -31
- data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +9 -8
- data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +28 -21
- data/vendor/EigenRand/EigenRand/EigenRand +11 -6
- data/vendor/EigenRand/EigenRand/Macro.h +13 -7
- data/vendor/EigenRand/EigenRand/MorePacketMath.h +348 -740
- data/vendor/EigenRand/EigenRand/MvDists/Multinomial.h +5 -3
- data/vendor/EigenRand/EigenRand/MvDists/MvNormal.h +9 -3
- data/vendor/EigenRand/EigenRand/PacketFilter.h +11 -253
- data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +21 -47
- data/vendor/EigenRand/EigenRand/RandUtils.h +50 -344
- data/vendor/EigenRand/EigenRand/arch/AVX/MorePacketMath.h +619 -0
- data/vendor/EigenRand/EigenRand/arch/AVX/PacketFilter.h +149 -0
- data/vendor/EigenRand/EigenRand/arch/AVX/RandUtils.h +228 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/MorePacketMath.h +473 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/PacketFilter.h +142 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/RandUtils.h +126 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/MorePacketMath.h +501 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/PacketFilter.h +133 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/RandUtils.h +120 -0
- data/vendor/EigenRand/EigenRand/doc.h +24 -12
- data/vendor/EigenRand/README.md +57 -4
- data/vendor/eigen/COPYING.APACHE +203 -0
- data/vendor/eigen/COPYING.BSD +1 -1
- data/vendor/eigen/COPYING.MINPACK +51 -52
- data/vendor/eigen/Eigen/Cholesky +0 -1
- data/vendor/eigen/Eigen/Core +112 -265
- data/vendor/eigen/Eigen/Eigenvalues +2 -3
- data/vendor/eigen/Eigen/Geometry +5 -8
- data/vendor/eigen/Eigen/Householder +0 -1
- data/vendor/eigen/Eigen/Jacobi +0 -1
- data/vendor/eigen/Eigen/KLUSupport +41 -0
- data/vendor/eigen/Eigen/LU +2 -5
- data/vendor/eigen/Eigen/OrderingMethods +0 -3
- data/vendor/eigen/Eigen/PaStiXSupport +1 -0
- data/vendor/eigen/Eigen/PardisoSupport +0 -0
- data/vendor/eigen/Eigen/QR +2 -3
- data/vendor/eigen/Eigen/QtAlignedMalloc +0 -1
- data/vendor/eigen/Eigen/SVD +0 -1
- data/vendor/eigen/Eigen/Sparse +0 -2
- data/vendor/eigen/Eigen/SparseCholesky +0 -8
- data/vendor/eigen/Eigen/SparseLU +4 -0
- data/vendor/eigen/Eigen/SparseQR +0 -1
- data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +42 -27
- data/vendor/eigen/Eigen/src/Cholesky/LLT.h +39 -23
- data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +90 -47
- data/vendor/eigen/Eigen/src/Core/ArithmeticSequence.h +413 -0
- data/vendor/eigen/Eigen/src/Core/Array.h +99 -11
- data/vendor/eigen/Eigen/src/Core/ArrayBase.h +3 -3
- data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +21 -21
- data/vendor/eigen/Eigen/src/Core/Assign.h +1 -1
- data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +125 -50
- data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +10 -10
- data/vendor/eigen/Eigen/src/Core/BandMatrix.h +16 -16
- data/vendor/eigen/Eigen/src/Core/Block.h +56 -60
- data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +29 -31
- data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +7 -3
- data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +325 -272
- data/vendor/eigen/Eigen/src/Core/CoreIterators.h +5 -0
- data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +21 -22
- data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +153 -18
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +6 -6
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +14 -10
- data/vendor/eigen/Eigen/src/Core/DenseBase.h +132 -42
- data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +25 -21
- data/vendor/eigen/Eigen/src/Core/DenseStorage.h +153 -71
- data/vendor/eigen/Eigen/src/Core/Diagonal.h +21 -23
- data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +50 -2
- data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +1 -1
- data/vendor/eigen/Eigen/src/Core/Dot.h +10 -10
- data/vendor/eigen/Eigen/src/Core/EigenBase.h +10 -9
- data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +8 -4
- data/vendor/eigen/Eigen/src/Core/Fuzzy.h +3 -3
- data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +20 -10
- data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +599 -152
- data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +40 -33
- data/vendor/eigen/Eigen/src/Core/IO.h +40 -7
- data/vendor/eigen/Eigen/src/Core/IndexedView.h +237 -0
- data/vendor/eigen/Eigen/src/Core/Inverse.h +9 -10
- data/vendor/eigen/Eigen/src/Core/Map.h +7 -7
- data/vendor/eigen/Eigen/src/Core/MapBase.h +10 -3
- data/vendor/eigen/Eigen/src/Core/MathFunctions.h +767 -125
- data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +118 -19
- data/vendor/eigen/Eigen/src/Core/Matrix.h +131 -25
- data/vendor/eigen/Eigen/src/Core/MatrixBase.h +21 -3
- data/vendor/eigen/Eigen/src/Core/NestByValue.h +25 -50
- data/vendor/eigen/Eigen/src/Core/NoAlias.h +4 -3
- data/vendor/eigen/Eigen/src/Core/NumTraits.h +107 -20
- data/vendor/eigen/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
- data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +3 -31
- data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +152 -59
- data/vendor/eigen/Eigen/src/Core/Product.h +30 -25
- data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +192 -125
- data/vendor/eigen/Eigen/src/Core/Random.h +37 -1
- data/vendor/eigen/Eigen/src/Core/Redux.h +180 -170
- data/vendor/eigen/Eigen/src/Core/Ref.h +121 -23
- data/vendor/eigen/Eigen/src/Core/Replicate.h +8 -8
- data/vendor/eigen/Eigen/src/Core/Reshaped.h +454 -0
- data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +7 -5
- data/vendor/eigen/Eigen/src/Core/Reverse.h +18 -12
- data/vendor/eigen/Eigen/src/Core/Select.h +8 -6
- data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +33 -20
- data/vendor/eigen/Eigen/src/Core/Solve.h +14 -14
- data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +16 -16
- data/vendor/eigen/Eigen/src/Core/SolverBase.h +41 -3
- data/vendor/eigen/Eigen/src/Core/StableNorm.h +100 -70
- data/vendor/eigen/Eigen/src/Core/StlIterators.h +463 -0
- data/vendor/eigen/Eigen/src/Core/Stride.h +9 -4
- data/vendor/eigen/Eigen/src/Core/Swap.h +5 -4
- data/vendor/eigen/Eigen/src/Core/Transpose.h +88 -27
- data/vendor/eigen/Eigen/src/Core/Transpositions.h +26 -47
- data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +93 -75
- data/vendor/eigen/Eigen/src/Core/VectorBlock.h +5 -5
- data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +159 -70
- data/vendor/eigen/Eigen/src/Core/Visitor.h +137 -29
- data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +50 -129
- data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +126 -337
- data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +1092 -155
- data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +65 -1
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +207 -236
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1482 -495
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +152 -165
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +19 -251
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +2042 -392
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +235 -80
- data/vendor/eigen/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +102 -14
- data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/Half.h +942 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +1 -1
- data/vendor/eigen/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
- data/vendor/eigen/Eigen/src/Core/arch/{CUDA → GPU}/MathFunctions.h +16 -4
- data/vendor/eigen/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
- data/vendor/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
- data/vendor/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/Complex.h +648 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +313 -219
- data/vendor/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +54 -70
- data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +4376 -549
- data/vendor/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +59 -179
- data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +65 -428
- data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +893 -283
- data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +65 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +212 -183
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +101 -5
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +510 -395
- data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +11 -2
- data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +112 -46
- data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +31 -30
- data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +32 -2
- data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +355 -16
- data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +1075 -586
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +49 -24
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +41 -35
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +6 -6
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +4 -2
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +382 -483
- data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +22 -5
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +53 -30
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +16 -8
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +8 -6
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +4 -4
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +5 -4
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +33 -27
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +14 -12
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +36 -34
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +8 -4
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +13 -10
- data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +304 -119
- data/vendor/eigen/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
- data/vendor/eigen/Eigen/src/Core/util/Constants.h +25 -9
- data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +26 -3
- data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +29 -9
- data/vendor/eigen/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
- data/vendor/eigen/Eigen/src/Core/util/IntegralConstant.h +272 -0
- data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +8 -1
- data/vendor/eigen/Eigen/src/Core/util/Macros.h +709 -246
- data/vendor/eigen/Eigen/src/Core/util/Memory.h +222 -52
- data/vendor/eigen/Eigen/src/Core/util/Meta.h +355 -77
- data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +5 -1
- data/vendor/eigen/Eigen/src/Core/util/ReshapedHelper.h +51 -0
- data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +8 -5
- data/vendor/eigen/Eigen/src/Core/util/SymbolicIndex.h +293 -0
- data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +65 -30
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +1 -1
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +7 -4
- data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +1 -1
- data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +9 -6
- data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +21 -9
- data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +77 -43
- data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +20 -15
- data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +99 -5
- data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +4 -4
- data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +3 -3
- data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +15 -11
- data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +1 -1
- data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +3 -2
- data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +39 -2
- data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +70 -14
- data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +3 -3
- data/vendor/eigen/Eigen/src/Geometry/Scaling.h +23 -5
- data/vendor/eigen/Eigen/src/Geometry/Transform.h +88 -67
- data/vendor/eigen/Eigen/src/Geometry/Translation.h +6 -12
- data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +1 -1
- data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
- data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +9 -2
- data/vendor/eigen/Eigen/src/Householder/Householder.h +8 -4
- data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +123 -48
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +15 -15
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +7 -23
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +5 -22
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +41 -47
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +51 -60
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +70 -20
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +2 -20
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +11 -9
- data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +31 -10
- data/vendor/eigen/Eigen/src/KLUSupport/KLUSupport.h +358 -0
- data/vendor/eigen/Eigen/src/LU/Determinant.h +35 -19
- data/vendor/eigen/Eigen/src/LU/FullPivLU.h +29 -43
- data/vendor/eigen/Eigen/src/LU/InverseImpl.h +25 -8
- data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +71 -58
- data/vendor/eigen/Eigen/src/LU/arch/InverseSize4.h +351 -0
- data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +7 -17
- data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +297 -277
- data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +6 -10
- data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +1 -1
- data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +9 -7
- data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +41 -20
- data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +100 -27
- data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +59 -22
- data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +48 -23
- data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +25 -3
- data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +183 -63
- data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +22 -14
- data/vendor/eigen/Eigen/src/SVD/SVDBase.h +83 -22
- data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +3 -3
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +17 -9
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +12 -37
- data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +3 -2
- data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +16 -0
- data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +6 -6
- data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +81 -27
- data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +25 -57
- data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +40 -11
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +11 -15
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +4 -2
- data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +30 -8
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +126 -11
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +5 -12
- data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +13 -1
- data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +7 -7
- data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +5 -2
- data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +8 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +1 -1
- data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +1 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +162 -12
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +1 -1
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +76 -2
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +2 -2
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +1 -1
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +1 -1
- data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +19 -6
- data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +2 -12
- data/vendor/eigen/Eigen/src/StlSupport/StdList.h +2 -2
- data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +2 -2
- data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +6 -8
- data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +175 -39
- data/vendor/eigen/Eigen/src/misc/lapacke.h +5 -4
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +28 -2
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +155 -11
- data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +626 -242
- data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +14 -0
- data/vendor/eigen/Eigen/src/plugins/IndexedViewMethods.h +262 -0
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +4 -4
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +10 -0
- data/vendor/eigen/Eigen/src/plugins/ReshapedMethods.h +149 -0
- data/vendor/eigen/README.md +2 -0
- data/vendor/eigen/bench/btl/README +1 -1
- data/vendor/eigen/bench/tensors/README +6 -7
- data/vendor/eigen/ci/README.md +56 -0
- data/vendor/eigen/demos/mix_eigen_and_c/README +1 -1
- data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +213 -158
- data/vendor/eigen/unsupported/README.txt +1 -1
- data/vendor/tomotopy/README.kr.rst +21 -0
- data/vendor/tomotopy/README.rst +20 -0
- data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +2 -2
- data/vendor/tomotopy/src/Labeling/Phraser.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/CTModel.hpp +2 -1
- data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +2 -1
- data/vendor/tomotopy/src/TopicModel/DTModel.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/HDP.h +1 -0
- data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +53 -2
- data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +16 -5
- data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/PT.h +3 -1
- data/vendor/tomotopy/src/TopicModel/PTModel.hpp +31 -1
- data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +7 -5
- data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +36 -1
- data/vendor/tomotopy/src/Utils/exception.h +6 -0
- data/vendor/tomotopy/src/Utils/sample.hpp +14 -12
- data/vendor/tomotopy/src/Utils/sse_gamma.h +0 -3
- metadata +60 -14
- data/vendor/eigen/Eigen/CMakeLists.txt +0 -19
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +0 -674
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +0 -333
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +0 -1124
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +0 -212
- data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +0 -161
- data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +0 -338
@@ -0,0 +1,942 @@
|
|
1
|
+
// This file is part of Eigen, a lightweight C++ template library
|
2
|
+
// for linear algebra.
|
3
|
+
//
|
4
|
+
// This Source Code Form is subject to the terms of the Mozilla
|
5
|
+
// Public License v. 2.0. If a copy of the MPL was not distributed
|
6
|
+
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
7
|
+
//
|
8
|
+
// The conversion routines are Copyright (c) Fabian Giesen, 2016.
|
9
|
+
// The original license follows:
|
10
|
+
//
|
11
|
+
// Copyright (c) Fabian Giesen, 2016
|
12
|
+
// All rights reserved.
|
13
|
+
// Redistribution and use in source and binary forms, with or without
|
14
|
+
// modification, are permitted.
|
15
|
+
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
16
|
+
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
17
|
+
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
18
|
+
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
19
|
+
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
20
|
+
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
21
|
+
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
22
|
+
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
23
|
+
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24
|
+
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
25
|
+
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26
|
+
|
27
|
+
|
28
|
+
// Standard 16-bit float type, mostly useful for GPUs. Defines a new
|
29
|
+
// type Eigen::half (inheriting either from CUDA's or HIP's __half struct) with
|
30
|
+
// operator overloads such that it behaves basically as an arithmetic
|
31
|
+
// type. It will be quite slow on CPUs (so it is recommended to stay
|
32
|
+
// in fp32 for CPUs, except for simple parameter conversions, I/O
|
33
|
+
// to disk and the likes), but fast on GPUs.
|
34
|
+
|
35
|
+
|
36
|
+
#ifndef EIGEN_HALF_H
|
37
|
+
#define EIGEN_HALF_H
|
38
|
+
|
39
|
+
#include <sstream>
|
40
|
+
|
41
|
+
#if defined(EIGEN_HAS_GPU_FP16) || defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
|
42
|
+
// When compiling with GPU support, the "__half_raw" base class as well as
|
43
|
+
// some other routines are defined in the GPU compiler header files
|
44
|
+
// (cuda_fp16.h, hip_fp16.h), and they are not tagged constexpr
|
45
|
+
// As a consequence, we get compile failures when compiling Eigen with
|
46
|
+
// GPU support. Hence the need to disable EIGEN_CONSTEXPR when building
|
47
|
+
// Eigen with GPU support
|
48
|
+
#pragma push_macro("EIGEN_CONSTEXPR")
|
49
|
+
#undef EIGEN_CONSTEXPR
|
50
|
+
#define EIGEN_CONSTEXPR
|
51
|
+
#endif
|
52
|
+
|
53
|
+
#define F16_PACKET_FUNCTION(PACKET_F, PACKET_F16, METHOD) \
|
54
|
+
template <> \
|
55
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_UNUSED \
|
56
|
+
PACKET_F16 METHOD<PACKET_F16>(const PACKET_F16& _x) { \
|
57
|
+
return float2half(METHOD<PACKET_F>(half2float(_x))); \
|
58
|
+
}
|
59
|
+
|
60
|
+
namespace Eigen {
|
61
|
+
|
62
|
+
struct half;
|
63
|
+
|
64
|
+
namespace half_impl {
|
65
|
+
|
66
|
+
// We want to use the __half_raw struct from the HIP header file only during the device compile phase.
|
67
|
+
// This is required because of a quirk in the way TensorFlow GPU builds are done.
|
68
|
+
// When compiling TensorFlow source code with GPU support, files that
|
69
|
+
// * contain GPU kernels (i.e. *.cu.cc files) are compiled via hipcc
|
70
|
+
// * do not contain GPU kernels ( i.e. *.cc files) are compiled via gcc (typically)
|
71
|
+
//
|
72
|
+
// Tensorflow uses the Eigen::half type as its FP16 type, and there are functions that
|
73
|
+
// * are defined in a file that gets compiled via hipcc AND
|
74
|
+
// * have Eigen::half as a pass-by-value argument AND
|
75
|
+
// * are called in a file that gets compiled via gcc
|
76
|
+
//
|
77
|
+
// In the scenario described above the caller and callee will see different versions
|
78
|
+
// of the Eigen::half base class __half_raw, and they will be compiled by different compilers
|
79
|
+
//
|
80
|
+
// There appears to be an ABI mismatch between gcc and clang (which is called by hipcc) that results in
|
81
|
+
// the callee getting corrupted values for the Eigen::half argument.
|
82
|
+
//
|
83
|
+
// Making the host side compile phase of hipcc use the same Eigen::half impl, as the gcc compile, resolves
|
84
|
+
// this error, and hence the following convoluted #if condition
|
85
|
+
#if !defined(EIGEN_HAS_GPU_FP16) || !defined(EIGEN_GPU_COMPILE_PHASE)
|
86
|
+
// Make our own __half_raw definition that is similar to CUDA's.
|
87
|
+
struct __half_raw {
|
88
|
+
#if (defined(EIGEN_HAS_GPU_FP16) && !defined(EIGEN_GPU_COMPILE_PHASE))
|
89
|
+
// Eigen::half can be used as the datatype for shared memory declarations (in Eigen and TF)
|
90
|
+
// The element type for shared memory cannot have non-trivial constructors
|
91
|
+
// and hence the following special casing (which skips the zero-initilization).
|
92
|
+
// Note that this check gets done even in the host compilation phase, and
|
93
|
+
// hence the need for this
|
94
|
+
EIGEN_DEVICE_FUNC __half_raw() {}
|
95
|
+
#else
|
96
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw() : x(0) {}
|
97
|
+
#endif
|
98
|
+
#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
|
99
|
+
explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw(numext::uint16_t raw) : x(numext::bit_cast<__fp16>(raw)) {
|
100
|
+
}
|
101
|
+
__fp16 x;
|
102
|
+
#else
|
103
|
+
explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw(numext::uint16_t raw) : x(raw) {}
|
104
|
+
numext::uint16_t x;
|
105
|
+
#endif
|
106
|
+
};
|
107
|
+
|
108
|
+
#elif defined(EIGEN_HAS_HIP_FP16)
|
109
|
+
// Nothing to do here
|
110
|
+
// HIP fp16 header file has a definition for __half_raw
|
111
|
+
#elif defined(EIGEN_HAS_CUDA_FP16)
|
112
|
+
#if EIGEN_CUDA_SDK_VER < 90000
|
113
|
+
// In CUDA < 9.0, __half is the equivalent of CUDA 9's __half_raw
|
114
|
+
typedef __half __half_raw;
|
115
|
+
#endif // defined(EIGEN_HAS_CUDA_FP16)
|
116
|
+
#elif defined(SYCL_DEVICE_ONLY)
|
117
|
+
typedef cl::sycl::half __half_raw;
|
118
|
+
#endif
|
119
|
+
|
120
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw raw_uint16_to_half(numext::uint16_t x);
|
121
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff);
|
122
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h);
|
123
|
+
|
124
|
+
struct half_base : public __half_raw {
|
125
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base() {}
|
126
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half_raw& h) : __half_raw(h) {}
|
127
|
+
|
128
|
+
#if defined(EIGEN_HAS_GPU_FP16)
|
129
|
+
#if defined(EIGEN_HAS_HIP_FP16)
|
130
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half& h) { x = __half_as_ushort(h); }
|
131
|
+
#elif defined(EIGEN_HAS_CUDA_FP16)
|
132
|
+
#if EIGEN_CUDA_SDK_VER >= 90000
|
133
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half& h) : __half_raw(*(__half_raw*)&h) {}
|
134
|
+
#endif
|
135
|
+
#endif
|
136
|
+
#endif
|
137
|
+
};
|
138
|
+
|
139
|
+
} // namespace half_impl
|
140
|
+
|
141
|
+
// Class definition.
|
142
|
+
struct half : public half_impl::half_base {
|
143
|
+
|
144
|
+
// Writing this out as separate #if-else blocks to make the code easier to follow
|
145
|
+
// The same applies to most #if-else blocks in this file
|
146
|
+
#if !defined(EIGEN_HAS_GPU_FP16) || !defined(EIGEN_GPU_COMPILE_PHASE)
|
147
|
+
// Use the same base class for the following two scenarios
|
148
|
+
// * when compiling without GPU support enabled
|
149
|
+
// * during host compile phase when compiling with GPU support enabled
|
150
|
+
typedef half_impl::__half_raw __half_raw;
|
151
|
+
#elif defined(EIGEN_HAS_HIP_FP16)
|
152
|
+
// Nothing to do here
|
153
|
+
// HIP fp16 header file has a definition for __half_raw
|
154
|
+
#elif defined(EIGEN_HAS_CUDA_FP16)
|
155
|
+
// Note that EIGEN_CUDA_SDK_VER is set to 0 even when compiling with HIP, so
|
156
|
+
// (EIGEN_CUDA_SDK_VER < 90000) is true even for HIP! So keeping this within
|
157
|
+
// #if defined(EIGEN_HAS_CUDA_FP16) is needed
|
158
|
+
#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000
|
159
|
+
typedef half_impl::__half_raw __half_raw;
|
160
|
+
#endif
|
161
|
+
#endif
|
162
|
+
|
163
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half() {}
|
164
|
+
|
165
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half_raw& h) : half_impl::half_base(h) {}
|
166
|
+
|
167
|
+
#if defined(EIGEN_HAS_GPU_FP16)
|
168
|
+
#if defined(EIGEN_HAS_HIP_FP16)
|
169
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half& h) : half_impl::half_base(h) {}
|
170
|
+
#elif defined(EIGEN_HAS_CUDA_FP16)
|
171
|
+
#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000
|
172
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half& h) : half_impl::half_base(h) {}
|
173
|
+
#endif
|
174
|
+
#endif
|
175
|
+
#endif
|
176
|
+
|
177
|
+
|
178
|
+
explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(bool b)
|
179
|
+
: half_impl::half_base(half_impl::raw_uint16_to_half(b ? 0x3c00 : 0)) {}
|
180
|
+
template<class T>
|
181
|
+
explicit EIGEN_DEVICE_FUNC half(T val)
|
182
|
+
: half_impl::half_base(half_impl::float_to_half_rtne(static_cast<float>(val))) {}
|
183
|
+
explicit EIGEN_DEVICE_FUNC half(float f)
|
184
|
+
: half_impl::half_base(half_impl::float_to_half_rtne(f)) {}
|
185
|
+
|
186
|
+
// Following the convention of numpy, converting between complex and
|
187
|
+
// float will lead to loss of imag value.
|
188
|
+
template<typename RealScalar>
|
189
|
+
explicit EIGEN_DEVICE_FUNC half(std::complex<RealScalar> c)
|
190
|
+
: half_impl::half_base(half_impl::float_to_half_rtne(static_cast<float>(c.real()))) {}
|
191
|
+
|
192
|
+
EIGEN_DEVICE_FUNC operator float() const { // NOLINT: Allow implicit conversion to float, because it is lossless.
|
193
|
+
return half_impl::half_to_float(*this);
|
194
|
+
}
|
195
|
+
|
196
|
+
#if defined(EIGEN_HAS_GPU_FP16) && !defined(EIGEN_GPU_COMPILE_PHASE)
|
197
|
+
EIGEN_DEVICE_FUNC operator __half() const {
|
198
|
+
::__half_raw hr;
|
199
|
+
hr.x = x;
|
200
|
+
return __half(hr);
|
201
|
+
}
|
202
|
+
#endif
|
203
|
+
};
|
204
|
+
|
205
|
+
} // end namespace Eigen
|
206
|
+
|
207
|
+
namespace std {
|
208
|
+
template<>
|
209
|
+
struct numeric_limits<Eigen::half> {
|
210
|
+
static const bool is_specialized = true;
|
211
|
+
static const bool is_signed = true;
|
212
|
+
static const bool is_integer = false;
|
213
|
+
static const bool is_exact = false;
|
214
|
+
static const bool has_infinity = true;
|
215
|
+
static const bool has_quiet_NaN = true;
|
216
|
+
static const bool has_signaling_NaN = true;
|
217
|
+
static const float_denorm_style has_denorm = denorm_present;
|
218
|
+
static const bool has_denorm_loss = false;
|
219
|
+
static const std::float_round_style round_style = std::round_to_nearest;
|
220
|
+
static const bool is_iec559 = false;
|
221
|
+
static const bool is_bounded = false;
|
222
|
+
static const bool is_modulo = false;
|
223
|
+
static const int digits = 11;
|
224
|
+
static const int digits10 = 3; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html
|
225
|
+
static const int max_digits10 = 5; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html
|
226
|
+
static const int radix = 2;
|
227
|
+
static const int min_exponent = -13;
|
228
|
+
static const int min_exponent10 = -4;
|
229
|
+
static const int max_exponent = 16;
|
230
|
+
static const int max_exponent10 = 4;
|
231
|
+
static const bool traps = true;
|
232
|
+
static const bool tinyness_before = false;
|
233
|
+
|
234
|
+
static Eigen::half (min)() { return Eigen::half_impl::raw_uint16_to_half(0x400); }
|
235
|
+
static Eigen::half lowest() { return Eigen::half_impl::raw_uint16_to_half(0xfbff); }
|
236
|
+
static Eigen::half (max)() { return Eigen::half_impl::raw_uint16_to_half(0x7bff); }
|
237
|
+
static Eigen::half epsilon() { return Eigen::half_impl::raw_uint16_to_half(0x0800); }
|
238
|
+
static Eigen::half round_error() { return Eigen::half(0.5); }
|
239
|
+
static Eigen::half infinity() { return Eigen::half_impl::raw_uint16_to_half(0x7c00); }
|
240
|
+
static Eigen::half quiet_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7e00); }
|
241
|
+
static Eigen::half signaling_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7d00); }
|
242
|
+
static Eigen::half denorm_min() { return Eigen::half_impl::raw_uint16_to_half(0x1); }
|
243
|
+
};
|
244
|
+
|
245
|
+
// If std::numeric_limits<T> is specialized, should also specialize
|
246
|
+
// std::numeric_limits<const T>, std::numeric_limits<volatile T>, and
|
247
|
+
// std::numeric_limits<const volatile T>
|
248
|
+
// https://stackoverflow.com/a/16519653/
|
249
|
+
template<>
|
250
|
+
struct numeric_limits<const Eigen::half> : numeric_limits<Eigen::half> {};
|
251
|
+
template<>
|
252
|
+
struct numeric_limits<volatile Eigen::half> : numeric_limits<Eigen::half> {};
|
253
|
+
template<>
|
254
|
+
struct numeric_limits<const volatile Eigen::half> : numeric_limits<Eigen::half> {};
|
255
|
+
} // end namespace std
|
256
|
+
|
257
|
+
namespace Eigen {
|
258
|
+
|
259
|
+
namespace half_impl {
|
260
|
+
|
261
|
+
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && \
|
262
|
+
EIGEN_CUDA_ARCH >= 530) || \
|
263
|
+
(defined(EIGEN_HAS_HIP_FP16) && defined(HIP_DEVICE_COMPILE))
|
264
|
+
// Note: We deliberatly do *not* define this to 1 even if we have Arm's native
|
265
|
+
// fp16 type since GPU halfs are rather different from native CPU halfs.
|
266
|
+
// TODO: Rename to something like EIGEN_HAS_NATIVE_GPU_FP16
|
267
|
+
#define EIGEN_HAS_NATIVE_FP16
|
268
|
+
#endif
|
269
|
+
|
270
|
+
// Intrinsics for native fp16 support. Note that on current hardware,
|
271
|
+
// these are no faster than fp32 arithmetic (you need to use the half2
|
272
|
+
// versions to get the ALU speed increased), but you do save the
|
273
|
+
// conversion steps back and forth.
|
274
|
+
|
275
|
+
#if defined(EIGEN_HAS_NATIVE_FP16)
|
276
|
+
EIGEN_STRONG_INLINE __device__ half operator + (const half& a, const half& b) {
|
277
|
+
#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000
|
278
|
+
return __hadd(::__half(a), ::__half(b));
|
279
|
+
#else
|
280
|
+
return __hadd(a, b);
|
281
|
+
#endif
|
282
|
+
}
|
283
|
+
EIGEN_STRONG_INLINE __device__ half operator * (const half& a, const half& b) {
|
284
|
+
return __hmul(a, b);
|
285
|
+
}
|
286
|
+
EIGEN_STRONG_INLINE __device__ half operator - (const half& a, const half& b) {
|
287
|
+
return __hsub(a, b);
|
288
|
+
}
|
289
|
+
EIGEN_STRONG_INLINE __device__ half operator / (const half& a, const half& b) {
|
290
|
+
#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000
|
291
|
+
return __hdiv(a, b);
|
292
|
+
#else
|
293
|
+
float num = __half2float(a);
|
294
|
+
float denom = __half2float(b);
|
295
|
+
return __float2half(num / denom);
|
296
|
+
#endif
|
297
|
+
}
|
298
|
+
EIGEN_STRONG_INLINE __device__ half operator - (const half& a) {
|
299
|
+
return __hneg(a);
|
300
|
+
}
|
301
|
+
EIGEN_STRONG_INLINE __device__ half& operator += (half& a, const half& b) {
|
302
|
+
a = a + b;
|
303
|
+
return a;
|
304
|
+
}
|
305
|
+
EIGEN_STRONG_INLINE __device__ half& operator *= (half& a, const half& b) {
|
306
|
+
a = a * b;
|
307
|
+
return a;
|
308
|
+
}
|
309
|
+
EIGEN_STRONG_INLINE __device__ half& operator -= (half& a, const half& b) {
|
310
|
+
a = a - b;
|
311
|
+
return a;
|
312
|
+
}
|
313
|
+
EIGEN_STRONG_INLINE __device__ half& operator /= (half& a, const half& b) {
|
314
|
+
a = a / b;
|
315
|
+
return a;
|
316
|
+
}
|
317
|
+
EIGEN_STRONG_INLINE __device__ bool operator == (const half& a, const half& b) {
|
318
|
+
return __heq(a, b);
|
319
|
+
}
|
320
|
+
EIGEN_STRONG_INLINE __device__ bool operator != (const half& a, const half& b) {
|
321
|
+
return __hne(a, b);
|
322
|
+
}
|
323
|
+
EIGEN_STRONG_INLINE __device__ bool operator < (const half& a, const half& b) {
|
324
|
+
return __hlt(a, b);
|
325
|
+
}
|
326
|
+
EIGEN_STRONG_INLINE __device__ bool operator <= (const half& a, const half& b) {
|
327
|
+
return __hle(a, b);
|
328
|
+
}
|
329
|
+
EIGEN_STRONG_INLINE __device__ bool operator > (const half& a, const half& b) {
|
330
|
+
return __hgt(a, b);
|
331
|
+
}
|
332
|
+
EIGEN_STRONG_INLINE __device__ bool operator >= (const half& a, const half& b) {
|
333
|
+
return __hge(a, b);
|
334
|
+
}
|
335
|
+
#endif
|
336
|
+
|
337
|
+
#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
|
338
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) {
|
339
|
+
return half(vaddh_f16(a.x, b.x));
|
340
|
+
}
|
341
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) {
|
342
|
+
return half(vmulh_f16(a.x, b.x));
|
343
|
+
}
|
344
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) {
|
345
|
+
return half(vsubh_f16(a.x, b.x));
|
346
|
+
}
|
347
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) {
|
348
|
+
return half(vdivh_f16(a.x, b.x));
|
349
|
+
}
|
350
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) {
|
351
|
+
return half(vnegh_f16(a.x));
|
352
|
+
}
|
353
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) {
|
354
|
+
a = half(vaddh_f16(a.x, b.x));
|
355
|
+
return a;
|
356
|
+
}
|
357
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) {
|
358
|
+
a = half(vmulh_f16(a.x, b.x));
|
359
|
+
return a;
|
360
|
+
}
|
361
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) {
|
362
|
+
a = half(vsubh_f16(a.x, b.x));
|
363
|
+
return a;
|
364
|
+
}
|
365
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) {
|
366
|
+
a = half(vdivh_f16(a.x, b.x));
|
367
|
+
return a;
|
368
|
+
}
|
369
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) {
|
370
|
+
return vceqh_f16(a.x, b.x);
|
371
|
+
}
|
372
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) {
|
373
|
+
return !vceqh_f16(a.x, b.x);
|
374
|
+
}
|
375
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) {
|
376
|
+
return vclth_f16(a.x, b.x);
|
377
|
+
}
|
378
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) {
|
379
|
+
return vcleh_f16(a.x, b.x);
|
380
|
+
}
|
381
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) {
|
382
|
+
return vcgth_f16(a.x, b.x);
|
383
|
+
}
|
384
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) {
|
385
|
+
return vcgeh_f16(a.x, b.x);
|
386
|
+
}
|
387
|
+
// We need to distinguish ‘clang as the CUDA compiler’ from ‘clang as the host compiler,
|
388
|
+
// invoked by NVCC’ (e.g. on MacOS). The former needs to see both host and device implementation
|
389
|
+
// of the functions, while the latter can only deal with one of them.
|
390
|
+
#elif !defined(EIGEN_HAS_NATIVE_FP16) || (EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC) // Emulate support for half floats
|
391
|
+
|
392
|
+
#if EIGEN_COMP_CLANG && defined(EIGEN_CUDACC)
|
393
|
+
// We need to provide emulated *host-side* FP16 operators for clang.
|
394
|
+
#pragma push_macro("EIGEN_DEVICE_FUNC")
|
395
|
+
#undef EIGEN_DEVICE_FUNC
|
396
|
+
#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_HAS_NATIVE_FP16)
|
397
|
+
#define EIGEN_DEVICE_FUNC __host__
|
398
|
+
#else // both host and device need emulated ops.
|
399
|
+
#define EIGEN_DEVICE_FUNC __host__ __device__
|
400
|
+
#endif
|
401
|
+
#endif
|
402
|
+
|
403
|
+
// Definitions for CPUs and older HIP+CUDA, mostly working through conversion
|
404
|
+
// to/from fp32.
|
405
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) {
|
406
|
+
return half(float(a) + float(b));
|
407
|
+
}
|
408
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) {
|
409
|
+
return half(float(a) * float(b));
|
410
|
+
}
|
411
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) {
|
412
|
+
return half(float(a) - float(b));
|
413
|
+
}
|
414
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) {
|
415
|
+
return half(float(a) / float(b));
|
416
|
+
}
|
417
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) {
|
418
|
+
half result;
|
419
|
+
result.x = a.x ^ 0x8000;
|
420
|
+
return result;
|
421
|
+
}
|
422
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) {
|
423
|
+
a = half(float(a) + float(b));
|
424
|
+
return a;
|
425
|
+
}
|
426
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) {
|
427
|
+
a = half(float(a) * float(b));
|
428
|
+
return a;
|
429
|
+
}
|
430
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) {
|
431
|
+
a = half(float(a) - float(b));
|
432
|
+
return a;
|
433
|
+
}
|
434
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) {
|
435
|
+
a = half(float(a) / float(b));
|
436
|
+
return a;
|
437
|
+
}
|
438
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) {
|
439
|
+
return numext::equal_strict(float(a),float(b));
|
440
|
+
}
|
441
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) {
|
442
|
+
return numext::not_equal_strict(float(a), float(b));
|
443
|
+
}
|
444
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) {
|
445
|
+
return float(a) < float(b);
|
446
|
+
}
|
447
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) {
|
448
|
+
return float(a) <= float(b);
|
449
|
+
}
|
450
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) {
|
451
|
+
return float(a) > float(b);
|
452
|
+
}
|
453
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) {
|
454
|
+
return float(a) >= float(b);
|
455
|
+
}
|
456
|
+
|
457
|
+
#if defined(__clang__) && defined(__CUDA__)
|
458
|
+
#pragma pop_macro("EIGEN_DEVICE_FUNC")
|
459
|
+
#endif
|
460
|
+
#endif // Emulate support for half floats
|
461
|
+
|
462
|
+
// Division by an index. Do it in full float precision to avoid accuracy
|
463
|
+
// issues in converting the denominator to half.
|
464
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, Index b) {
|
465
|
+
return half(static_cast<float>(a) / static_cast<float>(b));
|
466
|
+
}
|
467
|
+
|
468
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator++(half& a) {
|
469
|
+
a += half(1);
|
470
|
+
return a;
|
471
|
+
}
|
472
|
+
|
473
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator--(half& a) {
|
474
|
+
a -= half(1);
|
475
|
+
return a;
|
476
|
+
}
|
477
|
+
|
478
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator++(half& a, int) {
|
479
|
+
half original_value = a;
|
480
|
+
++a;
|
481
|
+
return original_value;
|
482
|
+
}
|
483
|
+
|
484
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator--(half& a, int) {
|
485
|
+
half original_value = a;
|
486
|
+
--a;
|
487
|
+
return original_value;
|
488
|
+
}
|
489
|
+
|
490
|
+
// Conversion routines, including fallbacks for the host or older CUDA.
|
491
|
+
// Note that newer Intel CPUs (Haswell or newer) have vectorized versions of
|
492
|
+
// these in hardware. If we need more performance on older/other CPUs, they are
|
493
|
+
// also possible to vectorize directly.
|
494
|
+
|
495
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw raw_uint16_to_half(numext::uint16_t x) {
|
496
|
+
// We cannot simply do a "return __half_raw(x)" here, because __half_raw is union type
|
497
|
+
// in the hip_fp16 header file, and that will trigger a compile error
|
498
|
+
// On the other hand, having anything but a return statement also triggers a compile error
|
499
|
+
// because this is constexpr function.
|
500
|
+
// Fortunately, since we need to disable EIGEN_CONSTEXPR for GPU anyway, we can get out
|
501
|
+
// of this catch22 by having separate bodies for GPU / non GPU
|
502
|
+
#if defined(EIGEN_HAS_GPU_FP16)
|
503
|
+
__half_raw h;
|
504
|
+
h.x = x;
|
505
|
+
return h;
|
506
|
+
#else
|
507
|
+
return __half_raw(x);
|
508
|
+
#endif
|
509
|
+
}
|
510
|
+
|
511
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC numext::uint16_t raw_half_as_uint16(const __half_raw& h) {
|
512
|
+
// HIP/CUDA/Default have a member 'x' of type uint16_t.
|
513
|
+
// For ARM64 native half, the member 'x' is of type __fp16, so we need to bit-cast.
|
514
|
+
// For SYCL, cl::sycl::half is _Float16, so cast directly.
|
515
|
+
#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
|
516
|
+
return numext::bit_cast<numext::uint16_t>(h.x);
|
517
|
+
#elif defined(SYCL_DEVICE_ONLY)
|
518
|
+
return numext::bit_cast<numext::uint16_t>(h);
|
519
|
+
#else
|
520
|
+
return h.x;
|
521
|
+
#endif
|
522
|
+
}
|
523
|
+
|
524
|
+
union float32_bits {
|
525
|
+
unsigned int u;
|
526
|
+
float f;
|
527
|
+
};
|
528
|
+
|
529
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff) {
|
530
|
+
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
|
531
|
+
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
532
|
+
__half tmp_ff = __float2half(ff);
|
533
|
+
return *(__half_raw*)&tmp_ff;
|
534
|
+
|
535
|
+
#elif defined(EIGEN_HAS_FP16_C)
|
536
|
+
__half_raw h;
|
537
|
+
h.x = _cvtss_sh(ff, 0);
|
538
|
+
return h;
|
539
|
+
|
540
|
+
#elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
|
541
|
+
__half_raw h;
|
542
|
+
h.x = static_cast<__fp16>(ff);
|
543
|
+
return h;
|
544
|
+
|
545
|
+
#else
|
546
|
+
float32_bits f; f.f = ff;
|
547
|
+
|
548
|
+
const float32_bits f32infty = { 255 << 23 };
|
549
|
+
const float32_bits f16max = { (127 + 16) << 23 };
|
550
|
+
const float32_bits denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 };
|
551
|
+
unsigned int sign_mask = 0x80000000u;
|
552
|
+
__half_raw o;
|
553
|
+
o.x = static_cast<numext::uint16_t>(0x0u);
|
554
|
+
|
555
|
+
unsigned int sign = f.u & sign_mask;
|
556
|
+
f.u ^= sign;
|
557
|
+
|
558
|
+
// NOTE all the integer compares in this function can be safely
|
559
|
+
// compiled into signed compares since all operands are below
|
560
|
+
// 0x80000000. Important if you want fast straight SSE2 code
|
561
|
+
// (since there's no unsigned PCMPGTD).
|
562
|
+
|
563
|
+
if (f.u >= f16max.u) { // result is Inf or NaN (all exponent bits set)
|
564
|
+
o.x = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf
|
565
|
+
} else { // (De)normalized number or zero
|
566
|
+
if (f.u < (113 << 23)) { // resulting FP16 is subnormal or zero
|
567
|
+
// use a magic value to align our 10 mantissa bits at the bottom of
|
568
|
+
// the float. as long as FP addition is round-to-nearest-even this
|
569
|
+
// just works.
|
570
|
+
f.f += denorm_magic.f;
|
571
|
+
|
572
|
+
// and one integer subtract of the bias later, we have our final float!
|
573
|
+
o.x = static_cast<numext::uint16_t>(f.u - denorm_magic.u);
|
574
|
+
} else {
|
575
|
+
unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd
|
576
|
+
|
577
|
+
// update exponent, rounding bias part 1
|
578
|
+
// Equivalent to `f.u += ((unsigned int)(15 - 127) << 23) + 0xfff`, but
|
579
|
+
// without arithmetic overflow.
|
580
|
+
f.u += 0xc8000fffU;
|
581
|
+
// rounding bias part 2
|
582
|
+
f.u += mant_odd;
|
583
|
+
// take the bits!
|
584
|
+
o.x = static_cast<numext::uint16_t>(f.u >> 13);
|
585
|
+
}
|
586
|
+
}
|
587
|
+
|
588
|
+
o.x |= static_cast<numext::uint16_t>(sign >> 16);
|
589
|
+
return o;
|
590
|
+
#endif
|
591
|
+
}
|
592
|
+
|
593
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h) {
|
594
|
+
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
|
595
|
+
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
596
|
+
return __half2float(h);
|
597
|
+
#elif defined(EIGEN_HAS_FP16_C)
|
598
|
+
return _cvtsh_ss(h.x);
|
599
|
+
#elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
|
600
|
+
return static_cast<float>(h.x);
|
601
|
+
#else
|
602
|
+
const float32_bits magic = { 113 << 23 };
|
603
|
+
const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift
|
604
|
+
float32_bits o;
|
605
|
+
|
606
|
+
o.u = (h.x & 0x7fff) << 13; // exponent/mantissa bits
|
607
|
+
unsigned int exp = shifted_exp & o.u; // just the exponent
|
608
|
+
o.u += (127 - 15) << 23; // exponent adjust
|
609
|
+
|
610
|
+
// handle exponent special cases
|
611
|
+
if (exp == shifted_exp) { // Inf/NaN?
|
612
|
+
o.u += (128 - 16) << 23; // extra exp adjust
|
613
|
+
} else if (exp == 0) { // Zero/Denormal?
|
614
|
+
o.u += 1 << 23; // extra exp adjust
|
615
|
+
o.f -= magic.f; // renormalize
|
616
|
+
}
|
617
|
+
|
618
|
+
o.u |= (h.x & 0x8000) << 16; // sign bit
|
619
|
+
return o.f;
|
620
|
+
#endif
|
621
|
+
}
|
622
|
+
|
623
|
+
// --- standard functions ---
|
624
|
+
|
625
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const half& a) {
|
626
|
+
#ifdef EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC
|
627
|
+
return (numext::bit_cast<numext::uint16_t>(a.x) & 0x7fff) == 0x7c00;
|
628
|
+
#else
|
629
|
+
return (a.x & 0x7fff) == 0x7c00;
|
630
|
+
#endif
|
631
|
+
}
|
632
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const half& a) {
|
633
|
+
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
|
634
|
+
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
635
|
+
return __hisnan(a);
|
636
|
+
#elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
|
637
|
+
return (numext::bit_cast<numext::uint16_t>(a.x) & 0x7fff) > 0x7c00;
|
638
|
+
#else
|
639
|
+
return (a.x & 0x7fff) > 0x7c00;
|
640
|
+
#endif
|
641
|
+
}
|
642
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const half& a) {
|
643
|
+
return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a));
|
644
|
+
}
|
645
|
+
|
646
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half abs(const half& a) {
|
647
|
+
#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
|
648
|
+
return half(vabsh_f16(a.x));
|
649
|
+
#else
|
650
|
+
half result;
|
651
|
+
result.x = a.x & 0x7FFF;
|
652
|
+
return result;
|
653
|
+
#endif
|
654
|
+
}
|
655
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half exp(const half& a) {
|
656
|
+
#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \
|
657
|
+
defined(EIGEN_HIP_DEVICE_COMPILE)
|
658
|
+
return half(hexp(a));
|
659
|
+
#else
|
660
|
+
return half(::expf(float(a)));
|
661
|
+
#endif
|
662
|
+
}
|
663
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half expm1(const half& a) {
|
664
|
+
return half(numext::expm1(float(a)));
|
665
|
+
}
|
666
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log(const half& a) {
|
667
|
+
#if (defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDA_SDK_VER >= 80000 && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
|
668
|
+
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
669
|
+
return half(::hlog(a));
|
670
|
+
#else
|
671
|
+
return half(::logf(float(a)));
|
672
|
+
#endif
|
673
|
+
}
|
674
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log1p(const half& a) {
|
675
|
+
return half(numext::log1p(float(a)));
|
676
|
+
}
|
677
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log10(const half& a) {
|
678
|
+
return half(::log10f(float(a)));
|
679
|
+
}
|
680
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log2(const half& a) {
|
681
|
+
return half(static_cast<float>(EIGEN_LOG2E) * ::logf(float(a)));
|
682
|
+
}
|
683
|
+
|
684
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sqrt(const half& a) {
|
685
|
+
#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \
|
686
|
+
defined(EIGEN_HIP_DEVICE_COMPILE)
|
687
|
+
return half(hsqrt(a));
|
688
|
+
#else
|
689
|
+
return half(::sqrtf(float(a)));
|
690
|
+
#endif
|
691
|
+
}
|
692
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half pow(const half& a, const half& b) {
|
693
|
+
return half(::powf(float(a), float(b)));
|
694
|
+
}
|
695
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sin(const half& a) {
|
696
|
+
return half(::sinf(float(a)));
|
697
|
+
}
|
698
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half cos(const half& a) {
|
699
|
+
return half(::cosf(float(a)));
|
700
|
+
}
|
701
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tan(const half& a) {
|
702
|
+
return half(::tanf(float(a)));
|
703
|
+
}
|
704
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tanh(const half& a) {
|
705
|
+
return half(::tanhf(float(a)));
|
706
|
+
}
|
707
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half asin(const half& a) {
|
708
|
+
return half(::asinf(float(a)));
|
709
|
+
}
|
710
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half acos(const half& a) {
|
711
|
+
return half(::acosf(float(a)));
|
712
|
+
}
|
713
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half floor(const half& a) {
|
714
|
+
#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300) || \
|
715
|
+
defined(EIGEN_HIP_DEVICE_COMPILE)
|
716
|
+
return half(hfloor(a));
|
717
|
+
#else
|
718
|
+
return half(::floorf(float(a)));
|
719
|
+
#endif
|
720
|
+
}
|
721
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half ceil(const half& a) {
|
722
|
+
#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300) || \
|
723
|
+
defined(EIGEN_HIP_DEVICE_COMPILE)
|
724
|
+
return half(hceil(a));
|
725
|
+
#else
|
726
|
+
return half(::ceilf(float(a)));
|
727
|
+
#endif
|
728
|
+
}
|
729
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half rint(const half& a) {
|
730
|
+
return half(::rintf(float(a)));
|
731
|
+
}
|
732
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half round(const half& a) {
|
733
|
+
return half(::roundf(float(a)));
|
734
|
+
}
|
735
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half fmod(const half& a, const half& b) {
|
736
|
+
return half(::fmodf(float(a), float(b)));
|
737
|
+
}
|
738
|
+
|
739
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (min)(const half& a, const half& b) {
|
740
|
+
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
|
741
|
+
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
742
|
+
return __hlt(b, a) ? b : a;
|
743
|
+
#else
|
744
|
+
const float f1 = static_cast<float>(a);
|
745
|
+
const float f2 = static_cast<float>(b);
|
746
|
+
return f2 < f1 ? b : a;
|
747
|
+
#endif
|
748
|
+
}
|
749
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (max)(const half& a, const half& b) {
|
750
|
+
#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
|
751
|
+
(defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
|
752
|
+
return __hlt(a, b) ? b : a;
|
753
|
+
#else
|
754
|
+
const float f1 = static_cast<float>(a);
|
755
|
+
const float f2 = static_cast<float>(b);
|
756
|
+
return f1 < f2 ? b : a;
|
757
|
+
#endif
|
758
|
+
}
|
759
|
+
|
760
|
+
#ifndef EIGEN_NO_IO
|
761
|
+
EIGEN_ALWAYS_INLINE std::ostream& operator << (std::ostream& os, const half& v) {
|
762
|
+
os << static_cast<float>(v);
|
763
|
+
return os;
|
764
|
+
}
|
765
|
+
#endif
|
766
|
+
|
767
|
+
} // end namespace half_impl
|
768
|
+
|
769
|
+
// import Eigen::half_impl::half into Eigen namespace
|
770
|
+
// using half_impl::half;
|
771
|
+
|
772
|
+
namespace internal {
|
773
|
+
|
774
|
+
template<>
|
775
|
+
struct random_default_impl<half, false, false>
|
776
|
+
{
|
777
|
+
static inline half run(const half& x, const half& y)
|
778
|
+
{
|
779
|
+
return x + (y-x) * half(float(std::rand()) / float(RAND_MAX));
|
780
|
+
}
|
781
|
+
static inline half run()
|
782
|
+
{
|
783
|
+
return run(half(-1.f), half(1.f));
|
784
|
+
}
|
785
|
+
};
|
786
|
+
|
787
|
+
template<> struct is_arithmetic<half> { enum { value = true }; };
|
788
|
+
|
789
|
+
} // end namespace internal
|
790
|
+
|
791
|
+
template<> struct NumTraits<Eigen::half>
|
792
|
+
: GenericNumTraits<Eigen::half>
|
793
|
+
{
|
794
|
+
enum {
|
795
|
+
IsSigned = true,
|
796
|
+
IsInteger = false,
|
797
|
+
IsComplex = false,
|
798
|
+
RequireInitialization = false
|
799
|
+
};
|
800
|
+
|
801
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half epsilon() {
|
802
|
+
return half_impl::raw_uint16_to_half(0x0800);
|
803
|
+
}
|
804
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half dummy_precision() {
|
805
|
+
return half_impl::raw_uint16_to_half(0x211f); // Eigen::half(1e-2f);
|
806
|
+
}
|
807
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half highest() {
|
808
|
+
return half_impl::raw_uint16_to_half(0x7bff);
|
809
|
+
}
|
810
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half lowest() {
|
811
|
+
return half_impl::raw_uint16_to_half(0xfbff);
|
812
|
+
}
|
813
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half infinity() {
|
814
|
+
return half_impl::raw_uint16_to_half(0x7c00);
|
815
|
+
}
|
816
|
+
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half quiet_NaN() {
|
817
|
+
return half_impl::raw_uint16_to_half(0x7e00);
|
818
|
+
}
|
819
|
+
};
|
820
|
+
|
821
|
+
} // end namespace Eigen
|
822
|
+
|
823
|
+
#if defined(EIGEN_HAS_GPU_FP16) || defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
|
824
|
+
#pragma pop_macro("EIGEN_CONSTEXPR")
|
825
|
+
#endif
|
826
|
+
|
827
|
+
namespace Eigen {
|
828
|
+
namespace numext {
|
829
|
+
|
830
|
+
#if defined(EIGEN_GPU_COMPILE_PHASE)
|
831
|
+
|
832
|
+
template <>
|
833
|
+
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isnan)(const Eigen::half& h) {
|
834
|
+
return (half_impl::isnan)(h);
|
835
|
+
}
|
836
|
+
|
837
|
+
template <>
|
838
|
+
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isinf)(const Eigen::half& h) {
|
839
|
+
return (half_impl::isinf)(h);
|
840
|
+
}
|
841
|
+
|
842
|
+
template <>
|
843
|
+
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isfinite)(const Eigen::half& h) {
|
844
|
+
return (half_impl::isfinite)(h);
|
845
|
+
}
|
846
|
+
|
847
|
+
#endif
|
848
|
+
|
849
|
+
template <>
|
850
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bit_cast<Eigen::half, uint16_t>(const uint16_t& src) {
|
851
|
+
return Eigen::half(Eigen::half_impl::raw_uint16_to_half(src));
|
852
|
+
}
|
853
|
+
|
854
|
+
template <>
|
855
|
+
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC uint16_t bit_cast<uint16_t, Eigen::half>(const Eigen::half& src) {
|
856
|
+
return Eigen::half_impl::raw_half_as_uint16(src);
|
857
|
+
}
|
858
|
+
|
859
|
+
} // namespace numext
|
860
|
+
} // namespace Eigen
|
861
|
+
|
862
|
+
// Add the missing shfl* intrinsics.
|
863
|
+
// The __shfl* functions are only valid on HIP or _CUDA_ARCH_ >= 300.
|
864
|
+
// CUDA defines them for (__CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__))
|
865
|
+
//
|
866
|
+
// HIP and CUDA prior to SDK 9.0 define
|
867
|
+
// __shfl, __shfl_up, __shfl_down, __shfl_xor for int and float
|
868
|
+
// CUDA since 9.0 deprecates those and instead defines
|
869
|
+
// __shfl_sync, __shfl_up_sync, __shfl_down_sync, __shfl_xor_sync,
|
870
|
+
// with native support for __half and __nv_bfloat16
|
871
|
+
//
|
872
|
+
// Note that the following are __device__ - only functions.
|
873
|
+
#if (defined(EIGEN_CUDACC) && (!defined(EIGEN_CUDA_ARCH) || EIGEN_CUDA_ARCH >= 300)) \
|
874
|
+
|| defined(EIGEN_HIPCC)
|
875
|
+
|
876
|
+
#if defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDA_SDK_VER >= 90000
|
877
|
+
|
878
|
+
__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_sync(unsigned mask, Eigen::half var, int srcLane, int width=warpSize) {
|
879
|
+
const __half h = var;
|
880
|
+
return static_cast<Eigen::half>(__shfl_sync(mask, h, srcLane, width));
|
881
|
+
}
|
882
|
+
|
883
|
+
__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_up_sync(unsigned mask, Eigen::half var, unsigned int delta, int width=warpSize) {
|
884
|
+
const __half h = var;
|
885
|
+
return static_cast<Eigen::half>(__shfl_up_sync(mask, h, delta, width));
|
886
|
+
}
|
887
|
+
|
888
|
+
__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_down_sync(unsigned mask, Eigen::half var, unsigned int delta, int width=warpSize) {
|
889
|
+
const __half h = var;
|
890
|
+
return static_cast<Eigen::half>(__shfl_down_sync(mask, h, delta, width));
|
891
|
+
}
|
892
|
+
|
893
|
+
__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor_sync(unsigned mask, Eigen::half var, int laneMask, int width=warpSize) {
|
894
|
+
const __half h = var;
|
895
|
+
return static_cast<Eigen::half>(__shfl_xor_sync(mask, h, laneMask, width));
|
896
|
+
}
|
897
|
+
|
898
|
+
#else // HIP or CUDA SDK < 9.0
|
899
|
+
|
900
|
+
__device__ EIGEN_STRONG_INLINE Eigen::half __shfl(Eigen::half var, int srcLane, int width=warpSize) {
|
901
|
+
const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
|
902
|
+
return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl(ivar, srcLane, width)));
|
903
|
+
}
|
904
|
+
|
905
|
+
__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_up(Eigen::half var, unsigned int delta, int width=warpSize) {
|
906
|
+
const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
|
907
|
+
return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl_up(ivar, delta, width)));
|
908
|
+
}
|
909
|
+
|
910
|
+
__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_down(Eigen::half var, unsigned int delta, int width=warpSize) {
|
911
|
+
const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
|
912
|
+
return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl_down(ivar, delta, width)));
|
913
|
+
}
|
914
|
+
|
915
|
+
__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor(Eigen::half var, int laneMask, int width=warpSize) {
|
916
|
+
const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
|
917
|
+
return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl_xor(ivar, laneMask, width)));
|
918
|
+
}
|
919
|
+
|
920
|
+
#endif // HIP vs CUDA
|
921
|
+
#endif // __shfl*
|
922
|
+
|
923
|
+
// ldg() has an overload for __half_raw, but we also need one for Eigen::half.
|
924
|
+
#if (defined(EIGEN_CUDACC) && (!defined(EIGEN_CUDA_ARCH) || EIGEN_CUDA_ARCH >= 350)) \
|
925
|
+
|| defined(EIGEN_HIPCC)
|
926
|
+
EIGEN_STRONG_INLINE __device__ Eigen::half __ldg(const Eigen::half* ptr) {
|
927
|
+
return Eigen::half_impl::raw_uint16_to_half(__ldg(reinterpret_cast<const Eigen::numext::uint16_t*>(ptr)));
|
928
|
+
}
|
929
|
+
#endif // __ldg
|
930
|
+
|
931
|
+
#if EIGEN_HAS_STD_HASH
|
932
|
+
namespace std {
|
933
|
+
template <>
|
934
|
+
struct hash<Eigen::half> {
|
935
|
+
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t operator()(const Eigen::half& a) const {
|
936
|
+
return static_cast<std::size_t>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(a));
|
937
|
+
}
|
938
|
+
};
|
939
|
+
} // end namespace std
|
940
|
+
#endif
|
941
|
+
|
942
|
+
#endif // EIGEN_HALF_H
|