tomoto 0.2.3 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/README.md +8 -10
- data/ext/tomoto/extconf.rb +6 -2
- data/ext/tomoto/{ext.cpp → tomoto.cpp} +1 -1
- data/lib/tomoto/version.rb +1 -1
- data/lib/tomoto.rb +5 -1
- data/vendor/EigenRand/EigenRand/Core.h +10 -10
- data/vendor/EigenRand/EigenRand/Dists/Basic.h +208 -9
- data/vendor/EigenRand/EigenRand/Dists/Discrete.h +52 -31
- data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +9 -8
- data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +28 -21
- data/vendor/EigenRand/EigenRand/EigenRand +11 -6
- data/vendor/EigenRand/EigenRand/Macro.h +13 -7
- data/vendor/EigenRand/EigenRand/MorePacketMath.h +348 -740
- data/vendor/EigenRand/EigenRand/MvDists/Multinomial.h +5 -3
- data/vendor/EigenRand/EigenRand/MvDists/MvNormal.h +9 -3
- data/vendor/EigenRand/EigenRand/PacketFilter.h +11 -253
- data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +21 -47
- data/vendor/EigenRand/EigenRand/RandUtils.h +50 -344
- data/vendor/EigenRand/EigenRand/arch/AVX/MorePacketMath.h +619 -0
- data/vendor/EigenRand/EigenRand/arch/AVX/PacketFilter.h +149 -0
- data/vendor/EigenRand/EigenRand/arch/AVX/RandUtils.h +228 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/MorePacketMath.h +473 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/PacketFilter.h +142 -0
- data/vendor/EigenRand/EigenRand/arch/NEON/RandUtils.h +126 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/MorePacketMath.h +501 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/PacketFilter.h +133 -0
- data/vendor/EigenRand/EigenRand/arch/SSE/RandUtils.h +120 -0
- data/vendor/EigenRand/EigenRand/doc.h +24 -12
- data/vendor/EigenRand/README.md +57 -4
- data/vendor/eigen/COPYING.APACHE +203 -0
- data/vendor/eigen/COPYING.BSD +1 -1
- data/vendor/eigen/COPYING.MINPACK +51 -52
- data/vendor/eigen/Eigen/Cholesky +0 -1
- data/vendor/eigen/Eigen/Core +112 -265
- data/vendor/eigen/Eigen/Eigenvalues +2 -3
- data/vendor/eigen/Eigen/Geometry +5 -8
- data/vendor/eigen/Eigen/Householder +0 -1
- data/vendor/eigen/Eigen/Jacobi +0 -1
- data/vendor/eigen/Eigen/KLUSupport +41 -0
- data/vendor/eigen/Eigen/LU +2 -5
- data/vendor/eigen/Eigen/OrderingMethods +0 -3
- data/vendor/eigen/Eigen/PaStiXSupport +1 -0
- data/vendor/eigen/Eigen/PardisoSupport +0 -0
- data/vendor/eigen/Eigen/QR +2 -3
- data/vendor/eigen/Eigen/QtAlignedMalloc +0 -1
- data/vendor/eigen/Eigen/SVD +0 -1
- data/vendor/eigen/Eigen/Sparse +0 -2
- data/vendor/eigen/Eigen/SparseCholesky +0 -8
- data/vendor/eigen/Eigen/SparseLU +4 -0
- data/vendor/eigen/Eigen/SparseQR +0 -1
- data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +42 -27
- data/vendor/eigen/Eigen/src/Cholesky/LLT.h +39 -23
- data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +90 -47
- data/vendor/eigen/Eigen/src/Core/ArithmeticSequence.h +413 -0
- data/vendor/eigen/Eigen/src/Core/Array.h +99 -11
- data/vendor/eigen/Eigen/src/Core/ArrayBase.h +3 -3
- data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +21 -21
- data/vendor/eigen/Eigen/src/Core/Assign.h +1 -1
- data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +125 -50
- data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +10 -10
- data/vendor/eigen/Eigen/src/Core/BandMatrix.h +16 -16
- data/vendor/eigen/Eigen/src/Core/Block.h +56 -60
- data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +29 -31
- data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +7 -3
- data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +325 -272
- data/vendor/eigen/Eigen/src/Core/CoreIterators.h +5 -0
- data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +21 -22
- data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +153 -18
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +6 -6
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +14 -10
- data/vendor/eigen/Eigen/src/Core/DenseBase.h +132 -42
- data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +25 -21
- data/vendor/eigen/Eigen/src/Core/DenseStorage.h +153 -71
- data/vendor/eigen/Eigen/src/Core/Diagonal.h +21 -23
- data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +50 -2
- data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +1 -1
- data/vendor/eigen/Eigen/src/Core/Dot.h +10 -10
- data/vendor/eigen/Eigen/src/Core/EigenBase.h +10 -9
- data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +8 -4
- data/vendor/eigen/Eigen/src/Core/Fuzzy.h +3 -3
- data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +20 -10
- data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +599 -152
- data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +40 -33
- data/vendor/eigen/Eigen/src/Core/IO.h +40 -7
- data/vendor/eigen/Eigen/src/Core/IndexedView.h +237 -0
- data/vendor/eigen/Eigen/src/Core/Inverse.h +9 -10
- data/vendor/eigen/Eigen/src/Core/Map.h +7 -7
- data/vendor/eigen/Eigen/src/Core/MapBase.h +10 -3
- data/vendor/eigen/Eigen/src/Core/MathFunctions.h +767 -125
- data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +118 -19
- data/vendor/eigen/Eigen/src/Core/Matrix.h +131 -25
- data/vendor/eigen/Eigen/src/Core/MatrixBase.h +21 -3
- data/vendor/eigen/Eigen/src/Core/NestByValue.h +25 -50
- data/vendor/eigen/Eigen/src/Core/NoAlias.h +4 -3
- data/vendor/eigen/Eigen/src/Core/NumTraits.h +107 -20
- data/vendor/eigen/Eigen/src/Core/PartialReduxEvaluator.h +232 -0
- data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +3 -31
- data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +152 -59
- data/vendor/eigen/Eigen/src/Core/Product.h +30 -25
- data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +192 -125
- data/vendor/eigen/Eigen/src/Core/Random.h +37 -1
- data/vendor/eigen/Eigen/src/Core/Redux.h +180 -170
- data/vendor/eigen/Eigen/src/Core/Ref.h +121 -23
- data/vendor/eigen/Eigen/src/Core/Replicate.h +8 -8
- data/vendor/eigen/Eigen/src/Core/Reshaped.h +454 -0
- data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +7 -5
- data/vendor/eigen/Eigen/src/Core/Reverse.h +18 -12
- data/vendor/eigen/Eigen/src/Core/Select.h +8 -6
- data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +33 -20
- data/vendor/eigen/Eigen/src/Core/Solve.h +14 -14
- data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +16 -16
- data/vendor/eigen/Eigen/src/Core/SolverBase.h +41 -3
- data/vendor/eigen/Eigen/src/Core/StableNorm.h +100 -70
- data/vendor/eigen/Eigen/src/Core/StlIterators.h +463 -0
- data/vendor/eigen/Eigen/src/Core/Stride.h +9 -4
- data/vendor/eigen/Eigen/src/Core/Swap.h +5 -4
- data/vendor/eigen/Eigen/src/Core/Transpose.h +88 -27
- data/vendor/eigen/Eigen/src/Core/Transpositions.h +26 -47
- data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +93 -75
- data/vendor/eigen/Eigen/src/Core/VectorBlock.h +5 -5
- data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +159 -70
- data/vendor/eigen/Eigen/src/Core/Visitor.h +137 -29
- data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +50 -129
- data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +126 -337
- data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +1092 -155
- data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +65 -1
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/Complex.h +422 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +207 -236
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1482 -495
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +152 -165
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +19 -251
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +2937 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +221 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +629 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +2042 -392
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +235 -80
- data/vendor/eigen/Eigen/src/Core/arch/Default/BFloat16.h +700 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +102 -14
- data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1649 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +110 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/Half.h +942 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +1 -1
- data/vendor/eigen/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
- data/vendor/eigen/Eigen/src/Core/arch/{CUDA → GPU}/MathFunctions.h +16 -4
- data/vendor/eigen/Eigen/src/Core/arch/GPU/PacketMath.h +1685 -0
- data/vendor/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h +80 -0
- data/vendor/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/Complex.h +648 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
- data/vendor/eigen/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +313 -219
- data/vendor/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +54 -70
- data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +4376 -549
- data/vendor/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h +1419 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +59 -179
- data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +65 -428
- data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +893 -283
- data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +65 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
- data/vendor/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
- data/vendor/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +212 -183
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +101 -5
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +510 -395
- data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +11 -2
- data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +112 -46
- data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +31 -30
- data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +32 -2
- data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +355 -16
- data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +1075 -586
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +49 -24
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +41 -35
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +6 -6
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +4 -2
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +382 -483
- data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +22 -5
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +53 -30
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +16 -8
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +8 -6
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +4 -4
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +5 -4
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +33 -27
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +14 -12
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +36 -34
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +8 -4
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +13 -10
- data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +304 -119
- data/vendor/eigen/Eigen/src/Core/util/ConfigureVectorization.h +512 -0
- data/vendor/eigen/Eigen/src/Core/util/Constants.h +25 -9
- data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +26 -3
- data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +29 -9
- data/vendor/eigen/Eigen/src/Core/util/IndexedViewHelper.h +186 -0
- data/vendor/eigen/Eigen/src/Core/util/IntegralConstant.h +272 -0
- data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +8 -1
- data/vendor/eigen/Eigen/src/Core/util/Macros.h +709 -246
- data/vendor/eigen/Eigen/src/Core/util/Memory.h +222 -52
- data/vendor/eigen/Eigen/src/Core/util/Meta.h +355 -77
- data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +5 -1
- data/vendor/eigen/Eigen/src/Core/util/ReshapedHelper.h +51 -0
- data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +8 -5
- data/vendor/eigen/Eigen/src/Core/util/SymbolicIndex.h +293 -0
- data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +65 -30
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +1 -1
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +7 -4
- data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +1 -1
- data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +2 -2
- data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +9 -6
- data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +21 -9
- data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +77 -43
- data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +20 -15
- data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +99 -5
- data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +4 -4
- data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +3 -3
- data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +15 -11
- data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +1 -1
- data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +3 -2
- data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +39 -2
- data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +70 -14
- data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +3 -3
- data/vendor/eigen/Eigen/src/Geometry/Scaling.h +23 -5
- data/vendor/eigen/Eigen/src/Geometry/Transform.h +88 -67
- data/vendor/eigen/Eigen/src/Geometry/Translation.h +6 -12
- data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +1 -1
- data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h +168 -0
- data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +9 -2
- data/vendor/eigen/Eigen/src/Householder/Householder.h +8 -4
- data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +123 -48
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +15 -15
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +7 -23
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +5 -22
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +41 -47
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +51 -60
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +70 -20
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +2 -20
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +11 -9
- data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +31 -10
- data/vendor/eigen/Eigen/src/KLUSupport/KLUSupport.h +358 -0
- data/vendor/eigen/Eigen/src/LU/Determinant.h +35 -19
- data/vendor/eigen/Eigen/src/LU/FullPivLU.h +29 -43
- data/vendor/eigen/Eigen/src/LU/InverseImpl.h +25 -8
- data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +71 -58
- data/vendor/eigen/Eigen/src/LU/arch/InverseSize4.h +351 -0
- data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +7 -17
- data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +297 -277
- data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +6 -10
- data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +1 -1
- data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +9 -7
- data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +41 -20
- data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +100 -27
- data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +59 -22
- data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +48 -23
- data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +25 -3
- data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +183 -63
- data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +22 -14
- data/vendor/eigen/Eigen/src/SVD/SVDBase.h +83 -22
- data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +3 -3
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +17 -9
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +12 -37
- data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +3 -2
- data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +16 -0
- data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +6 -6
- data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +81 -27
- data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +25 -57
- data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +40 -11
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +11 -15
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +4 -2
- data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +30 -8
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +126 -11
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +5 -12
- data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +13 -1
- data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +7 -7
- data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +5 -2
- data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +8 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +1 -1
- data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +1 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +162 -12
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +1 -1
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +76 -2
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +2 -2
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +1 -1
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +1 -1
- data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +19 -6
- data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +2 -12
- data/vendor/eigen/Eigen/src/StlSupport/StdList.h +2 -2
- data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +2 -2
- data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +6 -8
- data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +175 -39
- data/vendor/eigen/Eigen/src/misc/lapacke.h +5 -4
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +28 -2
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +155 -11
- data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +626 -242
- data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +14 -0
- data/vendor/eigen/Eigen/src/plugins/IndexedViewMethods.h +262 -0
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +4 -4
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +10 -0
- data/vendor/eigen/Eigen/src/plugins/ReshapedMethods.h +149 -0
- data/vendor/eigen/README.md +2 -0
- data/vendor/eigen/bench/btl/README +1 -1
- data/vendor/eigen/bench/tensors/README +6 -7
- data/vendor/eigen/ci/README.md +56 -0
- data/vendor/eigen/demos/mix_eigen_and_c/README +1 -1
- data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +213 -158
- data/vendor/eigen/unsupported/README.txt +1 -1
- data/vendor/tomotopy/README.kr.rst +21 -0
- data/vendor/tomotopy/README.rst +20 -0
- data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +2 -2
- data/vendor/tomotopy/src/Labeling/Phraser.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/CTModel.hpp +2 -1
- data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +2 -1
- data/vendor/tomotopy/src/TopicModel/DTModel.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/HDP.h +1 -0
- data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +53 -2
- data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +1 -1
- data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +16 -5
- data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +1 -0
- data/vendor/tomotopy/src/TopicModel/PT.h +3 -1
- data/vendor/tomotopy/src/TopicModel/PTModel.hpp +31 -1
- data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +2 -2
- data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +7 -5
- data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +36 -1
- data/vendor/tomotopy/src/Utils/exception.h +6 -0
- data/vendor/tomotopy/src/Utils/sample.hpp +14 -12
- data/vendor/tomotopy/src/Utils/sse_gamma.h +0 -3
- metadata +60 -14
- data/vendor/eigen/Eigen/CMakeLists.txt +0 -19
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +0 -674
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +0 -333
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +0 -1124
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +0 -212
- data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +0 -161
- data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +0 -338
@@ -18,13 +18,15 @@ namespace internal {
|
|
18
18
|
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
|
19
19
|
#endif
|
20
20
|
|
21
|
-
#
|
21
|
+
#if !defined(EIGEN_VECTORIZE_AVX) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
|
22
|
+
// 32 bits => 8 registers
|
23
|
+
// 64 bits => 16 registers
|
22
24
|
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
|
23
25
|
#endif
|
24
26
|
|
25
|
-
#ifdef
|
27
|
+
#ifdef EIGEN_VECTORIZE_FMA
|
26
28
|
#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
|
27
|
-
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
|
29
|
+
#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
|
28
30
|
#endif
|
29
31
|
#endif
|
30
32
|
|
@@ -34,47 +36,75 @@ namespace internal {
|
|
34
36
|
// One solution is to increase ABI version using -fabi-version=4 (or greater).
|
35
37
|
// Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper
|
36
38
|
// structure:
|
37
|
-
template<typename T>
|
38
|
-
struct eigen_packet_wrapper
|
39
|
-
{
|
40
|
-
EIGEN_ALWAYS_INLINE operator T&() { return m_val; }
|
41
|
-
EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; }
|
42
|
-
EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {}
|
43
|
-
EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {}
|
44
|
-
EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) {
|
45
|
-
m_val = v;
|
46
|
-
return *this;
|
47
|
-
}
|
48
|
-
|
49
|
-
T m_val;
|
50
|
-
};
|
51
39
|
typedef eigen_packet_wrapper<__m128> Packet4f;
|
52
|
-
typedef eigen_packet_wrapper<__m128i> Packet4i;
|
53
40
|
typedef eigen_packet_wrapper<__m128d> Packet2d;
|
54
41
|
#else
|
55
42
|
typedef __m128 Packet4f;
|
56
|
-
typedef __m128i Packet4i;
|
57
43
|
typedef __m128d Packet2d;
|
58
44
|
#endif
|
59
45
|
|
46
|
+
typedef eigen_packet_wrapper<__m128i, 0> Packet4i;
|
47
|
+
typedef eigen_packet_wrapper<__m128i, 1> Packet16b;
|
48
|
+
|
60
49
|
template<> struct is_arithmetic<__m128> { enum { value = true }; };
|
61
50
|
template<> struct is_arithmetic<__m128i> { enum { value = true }; };
|
62
51
|
template<> struct is_arithmetic<__m128d> { enum { value = true }; };
|
52
|
+
template<> struct is_arithmetic<Packet4i> { enum { value = true }; };
|
53
|
+
template<> struct is_arithmetic<Packet16b> { enum { value = true }; };
|
54
|
+
|
55
|
+
template<int p, int q, int r, int s>
|
56
|
+
struct shuffle_mask{
|
57
|
+
enum { mask = (s)<<6|(r)<<4|(q)<<2|(p) };
|
58
|
+
};
|
63
59
|
|
60
|
+
// TODO: change the implementation of all swizzle* ops from macro to template,
|
64
61
|
#define vec4f_swizzle1(v,p,q,r,s) \
|
65
|
-
(_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), (
|
62
|
+
Packet4f(_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), (shuffle_mask<p,q,r,s>::mask))))
|
66
63
|
|
67
64
|
#define vec4i_swizzle1(v,p,q,r,s) \
|
68
|
-
(_mm_shuffle_epi32( v, (
|
65
|
+
Packet4i(_mm_shuffle_epi32( v, (shuffle_mask<p,q,r,s>::mask)))
|
69
66
|
|
70
67
|
#define vec2d_swizzle1(v,p,q) \
|
71
|
-
(_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), (
|
72
|
-
|
68
|
+
Packet2d(_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), (shuffle_mask<2*p,2*p+1,2*q,2*q+1>::mask))))
|
69
|
+
|
73
70
|
#define vec4f_swizzle2(a,b,p,q,r,s) \
|
74
|
-
(_mm_shuffle_ps( (a), (b), (
|
71
|
+
Packet4f(_mm_shuffle_ps( (a), (b), (shuffle_mask<p,q,r,s>::mask)))
|
75
72
|
|
76
73
|
#define vec4i_swizzle2(a,b,p,q,r,s) \
|
77
|
-
(_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), (
|
74
|
+
Packet4i(_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), (shuffle_mask<p,q,r,s>::mask)))))
|
75
|
+
|
76
|
+
EIGEN_STRONG_INLINE Packet4f vec4f_movelh(const Packet4f& a, const Packet4f& b)
|
77
|
+
{
|
78
|
+
return Packet4f(_mm_movelh_ps(a,b));
|
79
|
+
}
|
80
|
+
EIGEN_STRONG_INLINE Packet4f vec4f_movehl(const Packet4f& a, const Packet4f& b)
|
81
|
+
{
|
82
|
+
return Packet4f(_mm_movehl_ps(a,b));
|
83
|
+
}
|
84
|
+
EIGEN_STRONG_INLINE Packet4f vec4f_unpacklo(const Packet4f& a, const Packet4f& b)
|
85
|
+
{
|
86
|
+
return Packet4f(_mm_unpacklo_ps(a,b));
|
87
|
+
}
|
88
|
+
EIGEN_STRONG_INLINE Packet4f vec4f_unpackhi(const Packet4f& a, const Packet4f& b)
|
89
|
+
{
|
90
|
+
return Packet4f(_mm_unpackhi_ps(a,b));
|
91
|
+
}
|
92
|
+
#define vec4f_duplane(a,p) \
|
93
|
+
vec4f_swizzle2(a,a,p,p,p,p)
|
94
|
+
|
95
|
+
#define vec2d_swizzle2(a,b,mask) \
|
96
|
+
Packet2d(_mm_shuffle_pd(a,b,mask))
|
97
|
+
|
98
|
+
EIGEN_STRONG_INLINE Packet2d vec2d_unpacklo(const Packet2d& a, const Packet2d& b)
|
99
|
+
{
|
100
|
+
return Packet2d(_mm_unpacklo_pd(a,b));
|
101
|
+
}
|
102
|
+
EIGEN_STRONG_INLINE Packet2d vec2d_unpackhi(const Packet2d& a, const Packet2d& b)
|
103
|
+
{
|
104
|
+
return Packet2d(_mm_unpackhi_pd(a,b));
|
105
|
+
}
|
106
|
+
#define vec2d_duplane(a,p) \
|
107
|
+
vec2d_swizzle2(a,a,(p<<1)|p)
|
78
108
|
|
79
109
|
#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
|
80
110
|
const Packet4f p4f_##NAME = pset1<Packet4f>(X)
|
@@ -83,7 +113,7 @@ template<> struct is_arithmetic<__m128d> { enum { value = true }; };
|
|
83
113
|
const Packet2d p2d_##NAME = pset1<Packet2d>(X)
|
84
114
|
|
85
115
|
#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
|
86
|
-
const Packet4f p4f_##NAME =
|
116
|
+
const Packet4f p4f_##NAME = pset1frombits<Packet4f>(X)
|
87
117
|
|
88
118
|
#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
|
89
119
|
const Packet4i p4i_##NAME = pset1<Packet4i>(X)
|
@@ -92,36 +122,41 @@ template<> struct is_arithmetic<__m128d> { enum { value = true }; };
|
|
92
122
|
// Use the packet_traits defined in AVX/PacketMath.h instead if we're going
|
93
123
|
// to leverage AVX instructions.
|
94
124
|
#ifndef EIGEN_VECTORIZE_AVX
|
95
|
-
template<>
|
96
|
-
{
|
125
|
+
template <>
|
126
|
+
struct packet_traits<float> : default_packet_traits {
|
97
127
|
typedef Packet4f type;
|
98
128
|
typedef Packet4f half;
|
99
129
|
enum {
|
100
130
|
Vectorizable = 1,
|
101
131
|
AlignedOnScalar = 1,
|
102
|
-
size=4,
|
132
|
+
size = 4,
|
103
133
|
HasHalfPacket = 0,
|
104
134
|
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
135
|
+
HasCmp = 1,
|
136
|
+
HasDiv = 1,
|
137
|
+
HasSin = EIGEN_FAST_MATH,
|
138
|
+
HasCos = EIGEN_FAST_MATH,
|
139
|
+
HasLog = 1,
|
140
|
+
HasLog1p = 1,
|
141
|
+
HasExpm1 = 1,
|
142
|
+
HasNdtri = 1,
|
143
|
+
HasExp = 1,
|
144
|
+
HasBessel = 1,
|
110
145
|
HasSqrt = 1,
|
111
146
|
HasRsqrt = 1,
|
112
|
-
HasTanh
|
113
|
-
|
114
|
-
|
147
|
+
HasTanh = EIGEN_FAST_MATH,
|
148
|
+
HasErf = EIGEN_FAST_MATH,
|
149
|
+
HasBlend = 1,
|
150
|
+
HasCeil = 1,
|
151
|
+
HasFloor = 1,
|
115
152
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
116
|
-
,
|
117
153
|
HasRound = 1,
|
118
|
-
HasFloor = 1,
|
119
|
-
HasCeil = 1
|
120
154
|
#endif
|
155
|
+
HasRint = 1
|
121
156
|
};
|
122
157
|
};
|
123
|
-
template<>
|
124
|
-
{
|
158
|
+
template <>
|
159
|
+
struct packet_traits<double> : default_packet_traits {
|
125
160
|
typedef Packet2d type;
|
126
161
|
typedef Packet2d half;
|
127
162
|
enum {
|
@@ -130,18 +165,19 @@ template<> struct packet_traits<double> : default_packet_traits
|
|
130
165
|
size=2,
|
131
166
|
HasHalfPacket = 0,
|
132
167
|
|
168
|
+
HasCmp = 1,
|
133
169
|
HasDiv = 1,
|
170
|
+
HasLog = 1,
|
134
171
|
HasExp = 1,
|
135
172
|
HasSqrt = 1,
|
136
173
|
HasRsqrt = 1,
|
137
|
-
HasBlend = 1
|
138
|
-
|
174
|
+
HasBlend = 1,
|
175
|
+
HasFloor = 1,
|
176
|
+
HasCeil = 1,
|
139
177
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
140
|
-
,
|
141
178
|
HasRound = 1,
|
142
|
-
HasFloor = 1,
|
143
|
-
HasCeil = 1
|
144
179
|
#endif
|
180
|
+
HasRint = 1
|
145
181
|
};
|
146
182
|
};
|
147
183
|
#endif
|
@@ -154,13 +190,56 @@ template<> struct packet_traits<int> : default_packet_traits
|
|
154
190
|
AlignedOnScalar = 1,
|
155
191
|
size=4,
|
156
192
|
|
193
|
+
HasShift = 1,
|
157
194
|
HasBlend = 1
|
158
195
|
};
|
159
196
|
};
|
160
197
|
|
161
|
-
template<> struct
|
162
|
-
|
163
|
-
|
198
|
+
template<> struct packet_traits<bool> : default_packet_traits
|
199
|
+
{
|
200
|
+
typedef Packet16b type;
|
201
|
+
typedef Packet16b half;
|
202
|
+
enum {
|
203
|
+
Vectorizable = 1,
|
204
|
+
AlignedOnScalar = 1,
|
205
|
+
HasHalfPacket = 0,
|
206
|
+
size=16,
|
207
|
+
|
208
|
+
HasAdd = 1,
|
209
|
+
HasSub = 1,
|
210
|
+
HasShift = 0,
|
211
|
+
HasMul = 1,
|
212
|
+
HasNegate = 1,
|
213
|
+
HasAbs = 0,
|
214
|
+
HasAbs2 = 0,
|
215
|
+
HasMin = 0,
|
216
|
+
HasMax = 0,
|
217
|
+
HasConj = 0,
|
218
|
+
HasSqrt = 1
|
219
|
+
};
|
220
|
+
};
|
221
|
+
|
222
|
+
template<> struct unpacket_traits<Packet4f> {
|
223
|
+
typedef float type;
|
224
|
+
typedef Packet4f half;
|
225
|
+
typedef Packet4i integer_packet;
|
226
|
+
enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
|
227
|
+
};
|
228
|
+
template<> struct unpacket_traits<Packet2d> {
|
229
|
+
typedef double type;
|
230
|
+
typedef Packet2d half;
|
231
|
+
enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
|
232
|
+
};
|
233
|
+
template<> struct unpacket_traits<Packet4i> {
|
234
|
+
typedef int type;
|
235
|
+
typedef Packet4i half;
|
236
|
+
enum {size=4, alignment=Aligned16, vectorizable=false, masked_load_available=false, masked_store_available=false};
|
237
|
+
};
|
238
|
+
template<> struct unpacket_traits<Packet16b> {
|
239
|
+
typedef bool type;
|
240
|
+
typedef Packet16b half;
|
241
|
+
enum {size=16, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
|
242
|
+
};
|
164
243
|
|
165
244
|
#ifndef EIGEN_VECTORIZE_AVX
|
166
245
|
template<> struct scalar_div_cost<float,true> { enum { value = 7 }; };
|
@@ -179,6 +258,18 @@ template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { re
|
|
179
258
|
template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
|
180
259
|
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
|
181
260
|
#endif
|
261
|
+
template<> EIGEN_STRONG_INLINE Packet16b pset1<Packet16b>(const bool& from) { return _mm_set1_epi8(static_cast<char>(from)); }
|
262
|
+
|
263
|
+
template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) { return _mm_castsi128_ps(pset1<Packet4i>(from)); }
|
264
|
+
template<> EIGEN_STRONG_INLINE Packet2d pset1frombits<Packet2d>(uint64_t from) { return _mm_castsi128_pd(_mm_set1_epi64x(from)); }
|
265
|
+
|
266
|
+
template<> EIGEN_STRONG_INLINE Packet4f peven_mask(const Packet4f& /*a*/) { return _mm_castsi128_ps(_mm_set_epi32(0, -1, 0, -1)); }
|
267
|
+
template<> EIGEN_STRONG_INLINE Packet4i peven_mask(const Packet4i& /*a*/) { return _mm_set_epi32(0, -1, 0, -1); }
|
268
|
+
template<> EIGEN_STRONG_INLINE Packet2d peven_mask(const Packet2d& /*a*/) { return _mm_castsi128_pd(_mm_set_epi32(0, 0, -1, -1)); }
|
269
|
+
|
270
|
+
template<> EIGEN_STRONG_INLINE Packet4f pzero(const Packet4f& /*a*/) { return _mm_setzero_ps(); }
|
271
|
+
template<> EIGEN_STRONG_INLINE Packet2d pzero(const Packet2d& /*a*/) { return _mm_setzero_pd(); }
|
272
|
+
template<> EIGEN_STRONG_INLINE Packet4i pzero(const Packet4i& /*a*/) { return _mm_setzero_si128(); }
|
182
273
|
|
183
274
|
// GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction.
|
184
275
|
// However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203)
|
@@ -190,7 +281,7 @@ template<> EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float *from) {
|
|
190
281
|
return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0);
|
191
282
|
}
|
192
283
|
#endif
|
193
|
-
|
284
|
+
|
194
285
|
template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
|
195
286
|
template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
|
196
287
|
template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
|
@@ -199,9 +290,34 @@ template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const
|
|
199
290
|
template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
|
200
291
|
template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
|
201
292
|
|
293
|
+
template<> EIGEN_STRONG_INLINE Packet16b padd<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); }
|
294
|
+
|
202
295
|
template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
|
203
296
|
template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
|
204
297
|
template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
|
298
|
+
template<> EIGEN_STRONG_INLINE Packet16b psub<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); }
|
299
|
+
|
300
|
+
template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b);
|
301
|
+
template<> EIGEN_STRONG_INLINE Packet4f paddsub<Packet4f>(const Packet4f& a, const Packet4f& b)
|
302
|
+
{
|
303
|
+
#ifdef EIGEN_VECTORIZE_SSE3
|
304
|
+
return _mm_addsub_ps(a,b);
|
305
|
+
#else
|
306
|
+
const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x0,0x80000000,0x0));
|
307
|
+
return padd(a, pxor(mask, b));
|
308
|
+
#endif
|
309
|
+
}
|
310
|
+
|
311
|
+
template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& , const Packet2d& );
|
312
|
+
template<> EIGEN_STRONG_INLINE Packet2d paddsub<Packet2d>(const Packet2d& a, const Packet2d& b)
|
313
|
+
{
|
314
|
+
#ifdef EIGEN_VECTORIZE_SSE3
|
315
|
+
return _mm_addsub_pd(a,b);
|
316
|
+
#else
|
317
|
+
const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x0));
|
318
|
+
return padd(a, pxor(mask, b));
|
319
|
+
#endif
|
320
|
+
}
|
205
321
|
|
206
322
|
template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
|
207
323
|
{
|
@@ -218,6 +334,11 @@ template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
|
|
218
334
|
return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a);
|
219
335
|
}
|
220
336
|
|
337
|
+
template<> EIGEN_STRONG_INLINE Packet16b pnegate(const Packet16b& a)
|
338
|
+
{
|
339
|
+
return psub(pset1<Packet16b>(false), a);
|
340
|
+
}
|
341
|
+
|
221
342
|
template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
|
222
343
|
template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
|
223
344
|
template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
|
@@ -240,18 +361,126 @@ template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const
|
|
240
361
|
#endif
|
241
362
|
}
|
242
363
|
|
364
|
+
template<> EIGEN_STRONG_INLINE Packet16b pmul<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); }
|
365
|
+
|
243
366
|
template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
|
244
367
|
template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
|
245
368
|
|
246
369
|
// for some weird raisons, it has to be overloaded for packet of integers
|
247
370
|
template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
|
248
|
-
#ifdef
|
371
|
+
#ifdef EIGEN_VECTORIZE_FMA
|
249
372
|
template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); }
|
250
373
|
template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); }
|
251
374
|
#endif
|
252
375
|
|
253
|
-
|
254
|
-
template<>
|
376
|
+
#ifdef EIGEN_VECTORIZE_SSE4_1
|
377
|
+
template<> EIGEN_DEVICE_FUNC inline Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) {
|
378
|
+
return _mm_blendv_ps(b,a,mask);
|
379
|
+
}
|
380
|
+
|
381
|
+
template<> EIGEN_DEVICE_FUNC inline Packet4i pselect(const Packet4i& mask, const Packet4i& a, const Packet4i& b) {
|
382
|
+
return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(b),_mm_castsi128_ps(a),_mm_castsi128_ps(mask)));
|
383
|
+
}
|
384
|
+
|
385
|
+
template<> EIGEN_DEVICE_FUNC inline Packet2d pselect(const Packet2d& mask, const Packet2d& a, const Packet2d& b) { return _mm_blendv_pd(b,a,mask); }
|
386
|
+
|
387
|
+
template<> EIGEN_DEVICE_FUNC inline Packet16b pselect(const Packet16b& mask, const Packet16b& a, const Packet16b& b) {
|
388
|
+
return _mm_blendv_epi8(b,a,mask);
|
389
|
+
}
|
390
|
+
#else
|
391
|
+
template<> EIGEN_DEVICE_FUNC inline Packet16b pselect(const Packet16b& mask, const Packet16b& a, const Packet16b& b) {
|
392
|
+
Packet16b a_part = _mm_and_si128(mask, a);
|
393
|
+
Packet16b b_part = _mm_andnot_si128(mask, b);
|
394
|
+
return _mm_or_si128(a_part, b_part);
|
395
|
+
}
|
396
|
+
#endif
|
397
|
+
|
398
|
+
template<> EIGEN_STRONG_INLINE Packet4i ptrue<Packet4i>(const Packet4i& a) { return _mm_cmpeq_epi32(a, a); }
|
399
|
+
template<> EIGEN_STRONG_INLINE Packet16b ptrue<Packet16b>(const Packet16b& a) { return _mm_cmpeq_epi8(a, a); }
|
400
|
+
template<> EIGEN_STRONG_INLINE Packet4f
|
401
|
+
ptrue<Packet4f>(const Packet4f& a) {
|
402
|
+
Packet4i b = _mm_castps_si128(a);
|
403
|
+
return _mm_castsi128_ps(_mm_cmpeq_epi32(b, b));
|
404
|
+
}
|
405
|
+
template<> EIGEN_STRONG_INLINE Packet2d
|
406
|
+
ptrue<Packet2d>(const Packet2d& a) {
|
407
|
+
Packet4i b = _mm_castpd_si128(a);
|
408
|
+
return _mm_castsi128_pd(_mm_cmpeq_epi32(b, b));
|
409
|
+
}
|
410
|
+
|
411
|
+
|
412
|
+
template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
|
413
|
+
template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
|
414
|
+
template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
|
415
|
+
template<> EIGEN_STRONG_INLINE Packet16b pand<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); }
|
416
|
+
|
417
|
+
template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
|
418
|
+
template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
|
419
|
+
template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
|
420
|
+
template<> EIGEN_STRONG_INLINE Packet16b por<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); }
|
421
|
+
|
422
|
+
template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
|
423
|
+
template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
|
424
|
+
template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
|
425
|
+
template<> EIGEN_STRONG_INLINE Packet16b pxor<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); }
|
426
|
+
|
427
|
+
template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(b,a); }
|
428
|
+
template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(b,a); }
|
429
|
+
template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(b,a); }
|
430
|
+
|
431
|
+
template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return _mm_cmple_ps(a,b); }
|
432
|
+
template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return _mm_cmplt_ps(a,b); }
|
433
|
+
template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { return _mm_cmpnge_ps(a,b); }
|
434
|
+
template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return _mm_cmpeq_ps(a,b); }
|
435
|
+
|
436
|
+
template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) { return _mm_cmple_pd(a,b); }
|
437
|
+
template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) { return _mm_cmplt_pd(a,b); }
|
438
|
+
template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt_or_nan(const Packet2d& a, const Packet2d& b) { return _mm_cmpnge_pd(a,b); }
|
439
|
+
template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return _mm_cmpeq_pd(a,b); }
|
440
|
+
|
441
|
+
template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i& a, const Packet4i& b) { return _mm_cmplt_epi32(a,b); }
|
442
|
+
template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return _mm_cmpeq_epi32(a,b); }
|
443
|
+
template<> EIGEN_STRONG_INLINE Packet16b pcmp_eq(const Packet16b& a, const Packet16b& b) { return _mm_cmpeq_epi8(a,b); }
|
444
|
+
template<> EIGEN_STRONG_INLINE Packet4i pcmp_le(const Packet4i& a, const Packet4i& b) { return por(pcmp_lt(a,b), pcmp_eq(a,b)); }
|
445
|
+
|
446
|
+
template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) {
|
447
|
+
#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
|
448
|
+
// There appears to be a bug in GCC, by which the optimizer may
|
449
|
+
// flip the argument order in calls to _mm_min_ps, so we have to
|
450
|
+
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
|
451
|
+
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
|
452
|
+
#ifdef EIGEN_VECTORIZE_AVX
|
453
|
+
Packet4f res;
|
454
|
+
asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
|
455
|
+
#else
|
456
|
+
Packet4f res = b;
|
457
|
+
asm("minps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
|
458
|
+
#endif
|
459
|
+
return res;
|
460
|
+
#else
|
461
|
+
// Arguments are reversed to match NaN propagation behavior of std::min.
|
462
|
+
return _mm_min_ps(b, a);
|
463
|
+
#endif
|
464
|
+
}
|
465
|
+
template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) {
|
466
|
+
#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
|
467
|
+
// There appears to be a bug in GCC, by which the optimizer may
|
468
|
+
// flip the argument order in calls to _mm_min_pd, so we have to
|
469
|
+
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
|
470
|
+
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
|
471
|
+
#ifdef EIGEN_VECTORIZE_AVX
|
472
|
+
Packet2d res;
|
473
|
+
asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
|
474
|
+
#else
|
475
|
+
Packet2d res = b;
|
476
|
+
asm("minpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
|
477
|
+
#endif
|
478
|
+
return res;
|
479
|
+
#else
|
480
|
+
// Arguments are reversed to match NaN propagation behavior of std::min.
|
481
|
+
return _mm_min_pd(b, a);
|
482
|
+
#endif
|
483
|
+
}
|
255
484
|
template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
|
256
485
|
{
|
257
486
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
@@ -263,8 +492,45 @@ template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const
|
|
263
492
|
#endif
|
264
493
|
}
|
265
494
|
|
266
|
-
|
267
|
-
template<> EIGEN_STRONG_INLINE
|
495
|
+
|
496
|
+
template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) {
|
497
|
+
#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
|
498
|
+
// There appears to be a bug in GCC, by which the optimizer may
|
499
|
+
// flip the argument order in calls to _mm_max_ps, so we have to
|
500
|
+
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
|
501
|
+
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
|
502
|
+
#ifdef EIGEN_VECTORIZE_AVX
|
503
|
+
Packet4f res;
|
504
|
+
asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
|
505
|
+
#else
|
506
|
+
Packet4f res = b;
|
507
|
+
asm("maxps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
|
508
|
+
#endif
|
509
|
+
return res;
|
510
|
+
#else
|
511
|
+
// Arguments are reversed to match NaN propagation behavior of std::max.
|
512
|
+
return _mm_max_ps(b, a);
|
513
|
+
#endif
|
514
|
+
}
|
515
|
+
template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) {
|
516
|
+
#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
|
517
|
+
// There appears to be a bug in GCC, by which the optimizer may
|
518
|
+
// flip the argument order in calls to _mm_max_pd, so we have to
|
519
|
+
// resort to inline ASM here. This is supposed to be fixed in gcc6.3,
|
520
|
+
// see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
|
521
|
+
#ifdef EIGEN_VECTORIZE_AVX
|
522
|
+
Packet2d res;
|
523
|
+
asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
|
524
|
+
#else
|
525
|
+
Packet2d res = b;
|
526
|
+
asm("maxpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
|
527
|
+
#endif
|
528
|
+
return res;
|
529
|
+
#else
|
530
|
+
// Arguments are reversed to match NaN propagation behavior of std::max.
|
531
|
+
return _mm_max_pd(b, a);
|
532
|
+
#endif
|
533
|
+
}
|
268
534
|
template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
|
269
535
|
{
|
270
536
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
@@ -276,36 +542,180 @@ template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const
|
|
276
542
|
#endif
|
277
543
|
}
|
278
544
|
|
545
|
+
template <typename Packet, typename Op>
|
546
|
+
EIGEN_STRONG_INLINE Packet pminmax_propagate_numbers(const Packet& a, const Packet& b, Op op) {
|
547
|
+
// In this implementation, we take advantage of the fact that pmin/pmax for SSE
|
548
|
+
// always return a if either a or b is NaN.
|
549
|
+
Packet not_nan_mask_a = pcmp_eq(a, a);
|
550
|
+
Packet m = op(a, b);
|
551
|
+
return pselect<Packet>(not_nan_mask_a, m, b);
|
552
|
+
}
|
553
|
+
|
554
|
+
template <typename Packet, typename Op>
|
555
|
+
EIGEN_STRONG_INLINE Packet pminmax_propagate_nan(const Packet& a, const Packet& b, Op op) {
|
556
|
+
// In this implementation, we take advantage of the fact that pmin/pmax for SSE
|
557
|
+
// always return a if either a or b is NaN.
|
558
|
+
Packet not_nan_mask_a = pcmp_eq(a, a);
|
559
|
+
Packet m = op(b, a);
|
560
|
+
return pselect<Packet>(not_nan_mask_a, m, a);
|
561
|
+
}
|
562
|
+
|
563
|
+
// Add specializations for min/max with prescribed NaN progation.
|
564
|
+
template<>
|
565
|
+
EIGEN_STRONG_INLINE Packet4f pmin<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) {
|
566
|
+
return pminmax_propagate_numbers(a, b, pmin<Packet4f>);
|
567
|
+
}
|
568
|
+
template<>
|
569
|
+
EIGEN_STRONG_INLINE Packet2d pmin<PropagateNumbers, Packet2d>(const Packet2d& a, const Packet2d& b) {
|
570
|
+
return pminmax_propagate_numbers(a, b, pmin<Packet2d>);
|
571
|
+
}
|
572
|
+
template<>
|
573
|
+
EIGEN_STRONG_INLINE Packet4f pmax<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) {
|
574
|
+
return pminmax_propagate_numbers(a, b, pmax<Packet4f>);
|
575
|
+
}
|
576
|
+
template<>
|
577
|
+
EIGEN_STRONG_INLINE Packet2d pmax<PropagateNumbers, Packet2d>(const Packet2d& a, const Packet2d& b) {
|
578
|
+
return pminmax_propagate_numbers(a, b, pmax<Packet2d>);
|
579
|
+
}
|
580
|
+
template<>
|
581
|
+
EIGEN_STRONG_INLINE Packet4f pmin<PropagateNaN, Packet4f>(const Packet4f& a, const Packet4f& b) {
|
582
|
+
return pminmax_propagate_nan(a, b, pmin<Packet4f>);
|
583
|
+
}
|
584
|
+
template<>
|
585
|
+
EIGEN_STRONG_INLINE Packet2d pmin<PropagateNaN, Packet2d>(const Packet2d& a, const Packet2d& b) {
|
586
|
+
return pminmax_propagate_nan(a, b, pmin<Packet2d>);
|
587
|
+
}
|
588
|
+
template<>
|
589
|
+
EIGEN_STRONG_INLINE Packet4f pmax<PropagateNaN, Packet4f>(const Packet4f& a, const Packet4f& b) {
|
590
|
+
return pminmax_propagate_nan(a, b, pmax<Packet4f>);
|
591
|
+
}
|
592
|
+
template<>
|
593
|
+
EIGEN_STRONG_INLINE Packet2d pmax<PropagateNaN, Packet2d>(const Packet2d& a, const Packet2d& b) {
|
594
|
+
return pminmax_propagate_nan(a, b, pmax<Packet2d>);
|
595
|
+
}
|
596
|
+
|
597
|
+
template<int N> EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(const Packet4i& a) { return _mm_srai_epi32(a,N); }
|
598
|
+
template<int N> EIGEN_STRONG_INLINE Packet4i plogical_shift_right (const Packet4i& a) { return _mm_srli_epi32(a,N); }
|
599
|
+
template<int N> EIGEN_STRONG_INLINE Packet4i plogical_shift_left (const Packet4i& a) { return _mm_slli_epi32(a,N); }
|
600
|
+
|
601
|
+
template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
|
602
|
+
{
|
603
|
+
const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
|
604
|
+
return _mm_and_ps(a,mask);
|
605
|
+
}
|
606
|
+
template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
|
607
|
+
{
|
608
|
+
const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
|
609
|
+
return _mm_and_pd(a,mask);
|
610
|
+
}
|
611
|
+
template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
|
612
|
+
{
|
613
|
+
#ifdef EIGEN_VECTORIZE_SSSE3
|
614
|
+
return _mm_abs_epi32(a);
|
615
|
+
#else
|
616
|
+
Packet4i aux = _mm_srai_epi32(a,31);
|
617
|
+
return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
|
618
|
+
#endif
|
619
|
+
}
|
620
|
+
|
279
621
|
#ifdef EIGEN_VECTORIZE_SSE4_1
|
280
|
-
template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a)
|
281
|
-
|
622
|
+
template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a)
|
623
|
+
{
|
624
|
+
// Unfortunatly _mm_round_ps doesn't have a rounding mode to implement numext::round.
|
625
|
+
const Packet4f mask = pset1frombits<Packet4f>(0x80000000u);
|
626
|
+
const Packet4f prev0dot5 = pset1frombits<Packet4f>(0x3EFFFFFFu);
|
627
|
+
return _mm_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
|
628
|
+
}
|
629
|
+
|
630
|
+
template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a)
|
631
|
+
{
|
632
|
+
const Packet2d mask = _mm_castsi128_pd(_mm_set_epi64x(0x8000000000000000ull, 0x8000000000000000ull));
|
633
|
+
const Packet2d prev0dot5 = _mm_castsi128_pd(_mm_set_epi64x(0x3FDFFFFFFFFFFFFFull, 0x3FDFFFFFFFFFFFFFull));
|
634
|
+
return _mm_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
|
635
|
+
}
|
636
|
+
|
637
|
+
template<> EIGEN_STRONG_INLINE Packet4f print<Packet4f>(const Packet4f& a) { return _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
|
638
|
+
template<> EIGEN_STRONG_INLINE Packet2d print<Packet2d>(const Packet2d& a) { return _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
|
282
639
|
|
283
640
|
template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return _mm_ceil_ps(a); }
|
284
641
|
template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return _mm_ceil_pd(a); }
|
285
642
|
|
286
643
|
template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return _mm_floor_ps(a); }
|
287
644
|
template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return _mm_floor_pd(a); }
|
288
|
-
#
|
645
|
+
#else
|
646
|
+
template<> EIGEN_STRONG_INLINE Packet4f print(const Packet4f& a) {
|
647
|
+
// Adds and subtracts signum(a) * 2^23 to force rounding.
|
648
|
+
const Packet4f limit = pset1<Packet4f>(static_cast<float>(1<<23));
|
649
|
+
const Packet4f abs_a = pabs(a);
|
650
|
+
Packet4f r = padd(abs_a, limit);
|
651
|
+
// Don't compile-away addition and subtraction.
|
652
|
+
EIGEN_OPTIMIZATION_BARRIER(r);
|
653
|
+
r = psub(r, limit);
|
654
|
+
// If greater than limit, simply return a. Otherwise, account for sign.
|
655
|
+
r = pselect(pcmp_lt(abs_a, limit),
|
656
|
+
pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a);
|
657
|
+
return r;
|
658
|
+
}
|
289
659
|
|
290
|
-
template<> EIGEN_STRONG_INLINE
|
291
|
-
|
292
|
-
|
660
|
+
template<> EIGEN_STRONG_INLINE Packet2d print(const Packet2d& a) {
|
661
|
+
// Adds and subtracts signum(a) * 2^52 to force rounding.
|
662
|
+
const Packet2d limit = pset1<Packet2d>(static_cast<double>(1ull<<52));
|
663
|
+
const Packet2d abs_a = pabs(a);
|
664
|
+
Packet2d r = padd(abs_a, limit);
|
665
|
+
// Don't compile-away addition and subtraction.
|
666
|
+
EIGEN_OPTIMIZATION_BARRIER(r);
|
667
|
+
r = psub(r, limit);
|
668
|
+
// If greater than limit, simply return a. Otherwise, account for sign.
|
669
|
+
r = pselect(pcmp_lt(abs_a, limit),
|
670
|
+
pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a);
|
671
|
+
return r;
|
672
|
+
}
|
293
673
|
|
294
|
-
template<> EIGEN_STRONG_INLINE Packet4f
|
295
|
-
|
296
|
-
|
674
|
+
template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
|
675
|
+
{
|
676
|
+
const Packet4f cst_1 = pset1<Packet4f>(1.0f);
|
677
|
+
Packet4f tmp = print<Packet4f>(a);
|
678
|
+
// If greater, subtract one.
|
679
|
+
Packet4f mask = _mm_cmpgt_ps(tmp, a);
|
680
|
+
mask = pand(mask, cst_1);
|
681
|
+
return psub(tmp, mask);
|
682
|
+
}
|
297
683
|
|
298
|
-
template<> EIGEN_STRONG_INLINE
|
299
|
-
|
300
|
-
|
684
|
+
template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a)
|
685
|
+
{
|
686
|
+
const Packet2d cst_1 = pset1<Packet2d>(1.0);
|
687
|
+
Packet2d tmp = print<Packet2d>(a);
|
688
|
+
// If greater, subtract one.
|
689
|
+
Packet2d mask = _mm_cmpgt_pd(tmp, a);
|
690
|
+
mask = pand(mask, cst_1);
|
691
|
+
return psub(tmp, mask);
|
692
|
+
}
|
301
693
|
|
302
|
-
template<> EIGEN_STRONG_INLINE Packet4f
|
303
|
-
|
304
|
-
|
694
|
+
template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a)
|
695
|
+
{
|
696
|
+
const Packet4f cst_1 = pset1<Packet4f>(1.0f);
|
697
|
+
Packet4f tmp = print<Packet4f>(a);
|
698
|
+
// If smaller, add one.
|
699
|
+
Packet4f mask = _mm_cmplt_ps(tmp, a);
|
700
|
+
mask = pand(mask, cst_1);
|
701
|
+
return padd(tmp, mask);
|
702
|
+
}
|
703
|
+
|
704
|
+
template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a)
|
705
|
+
{
|
706
|
+
const Packet2d cst_1 = pset1<Packet2d>(1.0);
|
707
|
+
Packet2d tmp = print<Packet2d>(a);
|
708
|
+
// If smaller, add one.
|
709
|
+
Packet2d mask = _mm_cmplt_pd(tmp, a);
|
710
|
+
mask = pand(mask, cst_1);
|
711
|
+
return padd(tmp, mask);
|
712
|
+
}
|
713
|
+
#endif
|
305
714
|
|
306
715
|
template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
|
307
716
|
template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
|
308
717
|
template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
|
718
|
+
template<> EIGEN_STRONG_INLINE Packet16b pload<Packet16b>(const bool* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
|
309
719
|
|
310
720
|
#if EIGEN_COMP_MSVC
|
311
721
|
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
|
@@ -340,6 +750,10 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
|
|
340
750
|
EIGEN_DEBUG_UNALIGNED_LOAD
|
341
751
|
return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
|
342
752
|
}
|
753
|
+
template<> EIGEN_STRONG_INLINE Packet16b ploadu<Packet16b>(const bool* from) {
|
754
|
+
EIGEN_DEBUG_UNALIGNED_LOAD
|
755
|
+
return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
|
756
|
+
}
|
343
757
|
|
344
758
|
|
345
759
|
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
|
@@ -355,13 +769,32 @@ template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
|
|
355
769
|
return vec4i_swizzle1(tmp, 0, 0, 1, 1);
|
356
770
|
}
|
357
771
|
|
772
|
+
// Loads 8 bools from memory and returns the packet
|
773
|
+
// {b0, b0, b1, b1, b2, b2, b3, b3, b4, b4, b5, b5, b6, b6, b7, b7}
|
774
|
+
template<> EIGEN_STRONG_INLINE Packet16b ploaddup<Packet16b>(const bool* from)
|
775
|
+
{
|
776
|
+
__m128i tmp = _mm_castpd_si128(pload1<Packet2d>(reinterpret_cast<const double*>(from)));
|
777
|
+
return _mm_unpacklo_epi8(tmp, tmp);
|
778
|
+
}
|
779
|
+
|
780
|
+
// Loads 4 bools from memory and returns the packet
|
781
|
+
// {b0, b0 b0, b0, b1, b1, b1, b1, b2, b2, b2, b2, b3, b3, b3, b3}
|
782
|
+
template<> EIGEN_STRONG_INLINE Packet16b
|
783
|
+
ploadquad<Packet16b>(const bool* from) {
|
784
|
+
__m128i tmp = _mm_castps_si128(pload1<Packet4f>(reinterpret_cast<const float*>(from)));
|
785
|
+
tmp = _mm_unpacklo_epi8(tmp, tmp);
|
786
|
+
return _mm_unpacklo_epi16(tmp, tmp);
|
787
|
+
}
|
788
|
+
|
358
789
|
template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
|
359
790
|
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
|
360
791
|
template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
|
792
|
+
template<> EIGEN_STRONG_INLINE void pstore<bool>(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
|
361
793
|
|
362
794
|
template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); }
|
363
795
|
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); }
|
364
796
|
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
|
797
|
+
template<> EIGEN_STRONG_INLINE void pstoreu<bool>(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
|
365
798
|
|
366
799
|
template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
|
367
800
|
{
|
@@ -374,7 +807,15 @@ template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const dou
|
|
374
807
|
template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
|
375
808
|
{
|
376
809
|
return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
377
|
-
|
810
|
+
}
|
811
|
+
|
812
|
+
template<> EIGEN_DEVICE_FUNC inline Packet16b pgather<bool, Packet16b>(const bool* from, Index stride)
|
813
|
+
{
|
814
|
+
return _mm_set_epi8(from[15*stride], from[14*stride], from[13*stride], from[12*stride],
|
815
|
+
from[11*stride], from[10*stride], from[9*stride], from[8*stride],
|
816
|
+
from[7*stride], from[6*stride], from[5*stride], from[4*stride],
|
817
|
+
from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
|
818
|
+
}
|
378
819
|
|
379
820
|
template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
|
380
821
|
{
|
@@ -395,6 +836,14 @@ template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const
|
|
395
836
|
to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
|
396
837
|
to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
|
397
838
|
}
|
839
|
+
template<> EIGEN_DEVICE_FUNC inline void pscatter<bool, Packet16b>(bool* to, const Packet16b& from, Index stride)
|
840
|
+
{
|
841
|
+
to[4*stride*0] = _mm_cvtsi128_si32(from);
|
842
|
+
to[4*stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
|
843
|
+
to[4*stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
|
844
|
+
to[4*stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
|
845
|
+
}
|
846
|
+
|
398
847
|
|
399
848
|
// some compilers might be tempted to perform multiple moves instead of using a vector path.
|
400
849
|
template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
|
@@ -409,7 +858,7 @@ template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double&
|
|
409
858
|
pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
|
410
859
|
}
|
411
860
|
|
412
|
-
#if EIGEN_COMP_PGI
|
861
|
+
#if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900
|
413
862
|
typedef const void * SsePrefetchPtrType;
|
414
863
|
#else
|
415
864
|
typedef const char * SsePrefetchPtrType;
|
@@ -437,32 +886,62 @@ template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { retu
|
|
437
886
|
template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
|
438
887
|
template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
|
439
888
|
#endif
|
889
|
+
template<> EIGEN_STRONG_INLINE bool pfirst<Packet16b>(const Packet16b& a) { int x = _mm_cvtsi128_si32(a); return static_cast<bool>(x & 1); }
|
440
890
|
|
441
|
-
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
|
442
|
-
{ return
|
443
|
-
template<> EIGEN_STRONG_INLINE
|
444
|
-
|
445
|
-
|
446
|
-
|
891
|
+
template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return _mm_shuffle_ps(a,a,0x1B); }
|
892
|
+
template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return _mm_shuffle_pd(a,a,0x1); }
|
893
|
+
template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return _mm_shuffle_epi32(a,0x1B); }
|
894
|
+
template<> EIGEN_STRONG_INLINE Packet16b preverse(const Packet16b& a) {
|
895
|
+
#ifdef EIGEN_VECTORIZE_SSSE3
|
896
|
+
__m128i mask = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
897
|
+
return _mm_shuffle_epi8(a, mask);
|
898
|
+
#else
|
899
|
+
Packet16b tmp = _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 1, 2, 3));
|
900
|
+
tmp = _mm_shufflehi_epi16(_mm_shufflelo_epi16(tmp, _MM_SHUFFLE(2, 3, 0, 1)), _MM_SHUFFLE(2, 3, 0, 1));
|
901
|
+
return _mm_or_si128(_mm_slli_epi16(tmp, 8), _mm_srli_epi16(tmp, 8));
|
902
|
+
#endif
|
903
|
+
}
|
447
904
|
|
448
|
-
template<> EIGEN_STRONG_INLINE Packet4f
|
449
|
-
|
450
|
-
const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
|
451
|
-
return _mm_and_ps(a,mask);
|
905
|
+
template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent) {
|
906
|
+
return pfrexp_generic(a,exponent);
|
452
907
|
}
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
908
|
+
|
909
|
+
// Extract exponent without existence of Packet2l.
|
910
|
+
template<>
|
911
|
+
EIGEN_STRONG_INLINE
|
912
|
+
Packet2d pfrexp_generic_get_biased_exponent(const Packet2d& a) {
|
913
|
+
const Packet2d cst_exp_mask = pset1frombits<Packet2d>(static_cast<uint64_t>(0x7ff0000000000000ull));
|
914
|
+
__m128i a_expo = _mm_srli_epi64(_mm_castpd_si128(pand(a, cst_exp_mask)), 52);
|
915
|
+
return _mm_cvtepi32_pd(vec4i_swizzle1(a_expo, 0, 2, 1, 3));
|
457
916
|
}
|
458
|
-
|
459
|
-
{
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
return
|
465
|
-
|
917
|
+
|
918
|
+
template<> EIGEN_STRONG_INLINE Packet2d pfrexp<Packet2d>(const Packet2d& a, Packet2d& exponent) {
|
919
|
+
return pfrexp_generic(a, exponent);
|
920
|
+
}
|
921
|
+
|
922
|
+
template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent) {
|
923
|
+
return pldexp_generic(a,exponent);
|
924
|
+
}
|
925
|
+
|
926
|
+
// We specialize pldexp here, since the generic implementation uses Packet2l, which is not well
|
927
|
+
// supported by SSE, and has more range than is needed for exponents.
|
928
|
+
template<> EIGEN_STRONG_INLINE Packet2d pldexp<Packet2d>(const Packet2d& a, const Packet2d& exponent) {
|
929
|
+
// Clamp exponent to [-2099, 2099]
|
930
|
+
const Packet2d max_exponent = pset1<Packet2d>(2099.0);
|
931
|
+
const Packet2d e = pmin(pmax(exponent, pnegate(max_exponent)), max_exponent);
|
932
|
+
|
933
|
+
// Convert e to integer and swizzle to low-order bits.
|
934
|
+
const Packet4i ei = vec4i_swizzle1(_mm_cvtpd_epi32(e), 0, 3, 1, 3);
|
935
|
+
|
936
|
+
// Split 2^e into four factors and multiply:
|
937
|
+
const Packet4i bias = _mm_set_epi32(0, 1023, 0, 1023);
|
938
|
+
Packet4i b = parithmetic_shift_right<2>(ei); // floor(e/4)
|
939
|
+
Packet2d c = _mm_castsi128_pd(_mm_slli_epi64(padd(b, bias), 52)); // 2^b
|
940
|
+
Packet2d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
|
941
|
+
b = psub(psub(psub(ei, b), b), b); // e - 3b
|
942
|
+
c = _mm_castsi128_pd(_mm_slli_epi64(padd(b, bias), 52)); // 2^(e - 3b)
|
943
|
+
out = pmul(out, c); // a * 2^e
|
944
|
+
return out;
|
466
945
|
}
|
467
946
|
|
468
947
|
// with AVX, the default implementations based on pload1 are faster
|
@@ -505,38 +984,6 @@ EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
|
|
505
984
|
vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
|
506
985
|
}
|
507
986
|
|
508
|
-
#ifdef EIGEN_VECTORIZE_SSE3
|
509
|
-
template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
|
510
|
-
{
|
511
|
-
return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
|
512
|
-
}
|
513
|
-
|
514
|
-
template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
|
515
|
-
{
|
516
|
-
return _mm_hadd_pd(vecs[0], vecs[1]);
|
517
|
-
}
|
518
|
-
|
519
|
-
#else
|
520
|
-
template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
|
521
|
-
{
|
522
|
-
Packet4f tmp0, tmp1, tmp2;
|
523
|
-
tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
|
524
|
-
tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
|
525
|
-
tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
|
526
|
-
tmp0 = _mm_add_ps(tmp0, tmp1);
|
527
|
-
tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
|
528
|
-
tmp1 = _mm_add_ps(tmp1, tmp2);
|
529
|
-
tmp2 = _mm_movehl_ps(tmp1, tmp0);
|
530
|
-
tmp0 = _mm_movelh_ps(tmp0, tmp1);
|
531
|
-
return _mm_add_ps(tmp0, tmp2);
|
532
|
-
}
|
533
|
-
|
534
|
-
template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
|
535
|
-
{
|
536
|
-
return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
|
537
|
-
}
|
538
|
-
#endif // SSE3
|
539
|
-
|
540
987
|
template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
|
541
988
|
{
|
542
989
|
// Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
|
@@ -562,38 +1009,28 @@ template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
|
|
562
1009
|
}
|
563
1010
|
|
564
1011
|
#ifdef EIGEN_VECTORIZE_SSSE3
|
565
|
-
template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
|
566
|
-
{
|
567
|
-
return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
|
568
|
-
}
|
569
1012
|
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
|
570
1013
|
{
|
571
1014
|
Packet4i tmp0 = _mm_hadd_epi32(a,a);
|
572
1015
|
return pfirst<Packet4i>(_mm_hadd_epi32(tmp0,tmp0));
|
573
1016
|
}
|
1017
|
+
|
574
1018
|
#else
|
575
1019
|
template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
|
576
1020
|
{
|
577
1021
|
Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
|
578
1022
|
return pfirst(tmp) + pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1));
|
579
1023
|
}
|
1024
|
+
#endif
|
580
1025
|
|
581
|
-
template<> EIGEN_STRONG_INLINE
|
582
|
-
|
583
|
-
|
584
|
-
tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
|
585
|
-
tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
|
586
|
-
tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
|
587
|
-
tmp0 = _mm_add_epi32(tmp0, tmp1);
|
588
|
-
tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
|
589
|
-
tmp1 = _mm_add_epi32(tmp1, tmp2);
|
590
|
-
tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
|
591
|
-
tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
|
592
|
-
return _mm_add_epi32(tmp0, tmp2);
|
1026
|
+
template<> EIGEN_STRONG_INLINE bool predux<Packet16b>(const Packet16b& a) {
|
1027
|
+
Packet4i tmp = _mm_or_si128(a, _mm_unpackhi_epi64(a,a));
|
1028
|
+
return (pfirst(tmp) != 0) || (pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1)) != 0);
|
593
1029
|
}
|
594
|
-
|
1030
|
+
|
595
1031
|
// Other reduction functions:
|
596
1032
|
|
1033
|
+
|
597
1034
|
// mul
|
598
1035
|
template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
|
599
1036
|
{
|
@@ -611,7 +1048,13 @@ template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
|
|
611
1048
|
// TODO try to call _mm_mul_epu32 directly
|
612
1049
|
EIGEN_ALIGN16 int aux[4];
|
613
1050
|
pstore(aux, a);
|
614
|
-
return (aux[0] * aux[1]) * (aux[2] * aux[3])
|
1051
|
+
return (aux[0] * aux[1]) * (aux[2] * aux[3]);
|
1052
|
+
}
|
1053
|
+
|
1054
|
+
template<> EIGEN_STRONG_INLINE bool predux_mul<Packet16b>(const Packet16b& a) {
|
1055
|
+
Packet4i tmp = _mm_and_si128(a, _mm_unpackhi_epi64(a,a));
|
1056
|
+
return ((pfirst<Packet4i>(tmp) == 0x01010101) &&
|
1057
|
+
(pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1)) == 0x01010101));
|
615
1058
|
}
|
616
1059
|
|
617
1060
|
// min
|
@@ -666,113 +1109,16 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
|
|
666
1109
|
#endif // EIGEN_VECTORIZE_SSE4_1
|
667
1110
|
}
|
668
1111
|
|
669
|
-
|
670
|
-
// template
|
671
|
-
// {
|
672
|
-
// Packet4f res = b;
|
673
|
-
// asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
|
674
|
-
// return res;
|
675
|
-
// }
|
676
|
-
// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i& a, const Packet4i& b, const int i)
|
1112
|
+
// not needed yet
|
1113
|
+
// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet4f& x)
|
677
1114
|
// {
|
678
|
-
//
|
679
|
-
// asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
|
680
|
-
// return res;
|
1115
|
+
// return _mm_movemask_ps(x) == 0xF;
|
681
1116
|
// }
|
682
|
-
#endif
|
683
|
-
|
684
|
-
#ifdef EIGEN_VECTORIZE_SSSE3
|
685
|
-
// SSSE3 versions
|
686
|
-
template<int Offset>
|
687
|
-
struct palign_impl<Offset,Packet4f>
|
688
|
-
{
|
689
|
-
static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
|
690
|
-
{
|
691
|
-
if (Offset!=0)
|
692
|
-
first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
|
693
|
-
}
|
694
|
-
};
|
695
1117
|
|
696
|
-
template
|
697
|
-
struct palign_impl<Offset,Packet4i>
|
1118
|
+
template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
|
698
1119
|
{
|
699
|
-
|
700
|
-
|
701
|
-
if (Offset!=0)
|
702
|
-
first = _mm_alignr_epi8(second,first, Offset*4);
|
703
|
-
}
|
704
|
-
};
|
705
|
-
|
706
|
-
template<int Offset>
|
707
|
-
struct palign_impl<Offset,Packet2d>
|
708
|
-
{
|
709
|
-
static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
|
710
|
-
{
|
711
|
-
if (Offset==1)
|
712
|
-
first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
|
713
|
-
}
|
714
|
-
};
|
715
|
-
#else
|
716
|
-
// SSE2 versions
|
717
|
-
template<int Offset>
|
718
|
-
struct palign_impl<Offset,Packet4f>
|
719
|
-
{
|
720
|
-
static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
|
721
|
-
{
|
722
|
-
if (Offset==1)
|
723
|
-
{
|
724
|
-
first = _mm_move_ss(first,second);
|
725
|
-
first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
|
726
|
-
}
|
727
|
-
else if (Offset==2)
|
728
|
-
{
|
729
|
-
first = _mm_movehl_ps(first,first);
|
730
|
-
first = _mm_movelh_ps(first,second);
|
731
|
-
}
|
732
|
-
else if (Offset==3)
|
733
|
-
{
|
734
|
-
first = _mm_move_ss(first,second);
|
735
|
-
first = _mm_shuffle_ps(first,second,0x93);
|
736
|
-
}
|
737
|
-
}
|
738
|
-
};
|
739
|
-
|
740
|
-
template<int Offset>
|
741
|
-
struct palign_impl<Offset,Packet4i>
|
742
|
-
{
|
743
|
-
static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
|
744
|
-
{
|
745
|
-
if (Offset==1)
|
746
|
-
{
|
747
|
-
first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
|
748
|
-
first = _mm_shuffle_epi32(first,0x39);
|
749
|
-
}
|
750
|
-
else if (Offset==2)
|
751
|
-
{
|
752
|
-
first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
|
753
|
-
first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
|
754
|
-
}
|
755
|
-
else if (Offset==3)
|
756
|
-
{
|
757
|
-
first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
|
758
|
-
first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
|
759
|
-
}
|
760
|
-
}
|
761
|
-
};
|
762
|
-
|
763
|
-
template<int Offset>
|
764
|
-
struct palign_impl<Offset,Packet2d>
|
765
|
-
{
|
766
|
-
static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
|
767
|
-
{
|
768
|
-
if (Offset==1)
|
769
|
-
{
|
770
|
-
first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
|
771
|
-
first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
|
772
|
-
}
|
773
|
-
}
|
774
|
-
};
|
775
|
-
#endif
|
1120
|
+
return _mm_movemask_ps(x) != 0x0;
|
1121
|
+
}
|
776
1122
|
|
777
1123
|
EIGEN_DEVICE_FUNC inline void
|
778
1124
|
ptranspose(PacketBlock<Packet4f,4>& kernel) {
|
@@ -799,6 +1145,100 @@ ptranspose(PacketBlock<Packet4i,4>& kernel) {
|
|
799
1145
|
kernel.packet[3] = _mm_unpackhi_epi64(T2, T3);
|
800
1146
|
}
|
801
1147
|
|
1148
|
+
EIGEN_DEVICE_FUNC inline void
|
1149
|
+
ptranspose(PacketBlock<Packet16b,4>& kernel) {
|
1150
|
+
__m128i T0 = _mm_unpacklo_epi8(kernel.packet[0], kernel.packet[1]);
|
1151
|
+
__m128i T1 = _mm_unpackhi_epi8(kernel.packet[0], kernel.packet[1]);
|
1152
|
+
__m128i T2 = _mm_unpacklo_epi8(kernel.packet[2], kernel.packet[3]);
|
1153
|
+
__m128i T3 = _mm_unpackhi_epi8(kernel.packet[2], kernel.packet[3]);
|
1154
|
+
kernel.packet[0] = _mm_unpacklo_epi16(T0, T2);
|
1155
|
+
kernel.packet[1] = _mm_unpackhi_epi16(T0, T2);
|
1156
|
+
kernel.packet[2] = _mm_unpacklo_epi16(T1, T3);
|
1157
|
+
kernel.packet[3] = _mm_unpackhi_epi16(T1, T3);
|
1158
|
+
}
|
1159
|
+
|
1160
|
+
EIGEN_DEVICE_FUNC inline void
|
1161
|
+
ptranspose(PacketBlock<Packet16b,16>& kernel) {
|
1162
|
+
// If we number the elements in the input thus:
|
1163
|
+
// kernel.packet[ 0] = {00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 0a, 0b, 0c, 0d, 0e, 0f}
|
1164
|
+
// kernel.packet[ 1] = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 1a, 1b, 1c, 1d, 1e, 1f}
|
1165
|
+
// ...
|
1166
|
+
// kernel.packet[15] = {f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, fa, fb, fc, fd, fe, ff},
|
1167
|
+
//
|
1168
|
+
// the desired output is:
|
1169
|
+
// kernel.packet[ 0] = {00, 10, 20, 30, 40, 50, 60, 70, 80, 90, a0, b0, c0, d0, e0, f0}
|
1170
|
+
// kernel.packet[ 1] = {01, 11, 21, 31, 41, 51, 61, 71, 81, 91, a1, b1, c1, d1, e1, f1}
|
1171
|
+
// ...
|
1172
|
+
// kernel.packet[15] = {0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, af, bf, cf, df, ef, ff},
|
1173
|
+
__m128i t0 = _mm_unpacklo_epi8(kernel.packet[0], kernel.packet[1]); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
|
1174
|
+
__m128i t1 = _mm_unpackhi_epi8(kernel.packet[0], kernel.packet[1]); // 08 18 09 19 0a 1a 0b 1b 0c 1c 0d 1d 0e 1e 0f 1f
|
1175
|
+
__m128i t2 = _mm_unpacklo_epi8(kernel.packet[2], kernel.packet[3]); // 20 30 21 31 22 32 ... 27 37
|
1176
|
+
__m128i t3 = _mm_unpackhi_epi8(kernel.packet[2], kernel.packet[3]); // 28 38 29 39 2a 3a ... 2f 3f
|
1177
|
+
__m128i t4 = _mm_unpacklo_epi8(kernel.packet[4], kernel.packet[5]); // 40 50 41 51 42 52 47 57
|
1178
|
+
__m128i t5 = _mm_unpackhi_epi8(kernel.packet[4], kernel.packet[5]); // 48 58 49 59 4a 5a
|
1179
|
+
__m128i t6 = _mm_unpacklo_epi8(kernel.packet[6], kernel.packet[7]);
|
1180
|
+
__m128i t7 = _mm_unpackhi_epi8(kernel.packet[6], kernel.packet[7]);
|
1181
|
+
__m128i t8 = _mm_unpacklo_epi8(kernel.packet[8], kernel.packet[9]);
|
1182
|
+
__m128i t9 = _mm_unpackhi_epi8(kernel.packet[8], kernel.packet[9]);
|
1183
|
+
__m128i ta = _mm_unpacklo_epi8(kernel.packet[10], kernel.packet[11]);
|
1184
|
+
__m128i tb = _mm_unpackhi_epi8(kernel.packet[10], kernel.packet[11]);
|
1185
|
+
__m128i tc = _mm_unpacklo_epi8(kernel.packet[12], kernel.packet[13]);
|
1186
|
+
__m128i td = _mm_unpackhi_epi8(kernel.packet[12], kernel.packet[13]);
|
1187
|
+
__m128i te = _mm_unpacklo_epi8(kernel.packet[14], kernel.packet[15]);
|
1188
|
+
__m128i tf = _mm_unpackhi_epi8(kernel.packet[14], kernel.packet[15]);
|
1189
|
+
|
1190
|
+
__m128i s0 = _mm_unpacklo_epi16(t0, t2); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
|
1191
|
+
__m128i s1 = _mm_unpackhi_epi16(t0, t2); // 04 14 24 34
|
1192
|
+
__m128i s2 = _mm_unpacklo_epi16(t1, t3); // 08 18 28 38 ...
|
1193
|
+
__m128i s3 = _mm_unpackhi_epi16(t1, t3); // 0c 1c 2c 3c ...
|
1194
|
+
__m128i s4 = _mm_unpacklo_epi16(t4, t6); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
|
1195
|
+
__m128i s5 = _mm_unpackhi_epi16(t4, t6); // 44 54 64 74 ...
|
1196
|
+
__m128i s6 = _mm_unpacklo_epi16(t5, t7);
|
1197
|
+
__m128i s7 = _mm_unpackhi_epi16(t5, t7);
|
1198
|
+
__m128i s8 = _mm_unpacklo_epi16(t8, ta);
|
1199
|
+
__m128i s9 = _mm_unpackhi_epi16(t8, ta);
|
1200
|
+
__m128i sa = _mm_unpacklo_epi16(t9, tb);
|
1201
|
+
__m128i sb = _mm_unpackhi_epi16(t9, tb);
|
1202
|
+
__m128i sc = _mm_unpacklo_epi16(tc, te);
|
1203
|
+
__m128i sd = _mm_unpackhi_epi16(tc, te);
|
1204
|
+
__m128i se = _mm_unpacklo_epi16(td, tf);
|
1205
|
+
__m128i sf = _mm_unpackhi_epi16(td, tf);
|
1206
|
+
|
1207
|
+
__m128i u0 = _mm_unpacklo_epi32(s0, s4); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
|
1208
|
+
__m128i u1 = _mm_unpackhi_epi32(s0, s4); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
|
1209
|
+
__m128i u2 = _mm_unpacklo_epi32(s1, s5);
|
1210
|
+
__m128i u3 = _mm_unpackhi_epi32(s1, s5);
|
1211
|
+
__m128i u4 = _mm_unpacklo_epi32(s2, s6);
|
1212
|
+
__m128i u5 = _mm_unpackhi_epi32(s2, s6);
|
1213
|
+
__m128i u6 = _mm_unpacklo_epi32(s3, s7);
|
1214
|
+
__m128i u7 = _mm_unpackhi_epi32(s3, s7);
|
1215
|
+
__m128i u8 = _mm_unpacklo_epi32(s8, sc);
|
1216
|
+
__m128i u9 = _mm_unpackhi_epi32(s8, sc);
|
1217
|
+
__m128i ua = _mm_unpacklo_epi32(s9, sd);
|
1218
|
+
__m128i ub = _mm_unpackhi_epi32(s9, sd);
|
1219
|
+
__m128i uc = _mm_unpacklo_epi32(sa, se);
|
1220
|
+
__m128i ud = _mm_unpackhi_epi32(sa, se);
|
1221
|
+
__m128i ue = _mm_unpacklo_epi32(sb, sf);
|
1222
|
+
__m128i uf = _mm_unpackhi_epi32(sb, sf);
|
1223
|
+
|
1224
|
+
kernel.packet[0] = _mm_unpacklo_epi64(u0, u8);
|
1225
|
+
kernel.packet[1] = _mm_unpackhi_epi64(u0, u8);
|
1226
|
+
kernel.packet[2] = _mm_unpacklo_epi64(u1, u9);
|
1227
|
+
kernel.packet[3] = _mm_unpackhi_epi64(u1, u9);
|
1228
|
+
kernel.packet[4] = _mm_unpacklo_epi64(u2, ua);
|
1229
|
+
kernel.packet[5] = _mm_unpackhi_epi64(u2, ua);
|
1230
|
+
kernel.packet[6] = _mm_unpacklo_epi64(u3, ub);
|
1231
|
+
kernel.packet[7] = _mm_unpackhi_epi64(u3, ub);
|
1232
|
+
kernel.packet[8] = _mm_unpacklo_epi64(u4, uc);
|
1233
|
+
kernel.packet[9] = _mm_unpackhi_epi64(u4, uc);
|
1234
|
+
kernel.packet[10] = _mm_unpacklo_epi64(u5, ud);
|
1235
|
+
kernel.packet[11] = _mm_unpackhi_epi64(u5, ud);
|
1236
|
+
kernel.packet[12] = _mm_unpacklo_epi64(u6, ue);
|
1237
|
+
kernel.packet[13] = _mm_unpackhi_epi64(u6, ue);
|
1238
|
+
kernel.packet[14] = _mm_unpacklo_epi64(u7, uf);
|
1239
|
+
kernel.packet[15] = _mm_unpackhi_epi64(u7, uf);
|
1240
|
+
}
|
1241
|
+
|
802
1242
|
template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
|
803
1243
|
const __m128i zero = _mm_setzero_si128();
|
804
1244
|
const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
|
@@ -830,59 +1270,229 @@ template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, cons
|
|
830
1270
|
#endif
|
831
1271
|
}
|
832
1272
|
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
return
|
837
|
-
|
838
|
-
|
1273
|
+
// Scalar path for pmadd with FMA to ensure consistency with vectorized path.
|
1274
|
+
#ifdef EIGEN_VECTORIZE_FMA
|
1275
|
+
template<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) {
|
1276
|
+
return ::fmaf(a,b,c);
|
1277
|
+
}
|
1278
|
+
template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, const double& c) {
|
1279
|
+
return ::fma(a,b,c);
|
1280
|
+
}
|
839
1281
|
#endif
|
1282
|
+
|
1283
|
+
|
1284
|
+
// Packet math for Eigen::half
|
1285
|
+
// Disable the following code since it's broken on too many platforms / compilers.
|
1286
|
+
//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
|
1287
|
+
#if 0
|
1288
|
+
|
1289
|
+
typedef struct {
|
1290
|
+
__m64 x;
|
1291
|
+
} Packet4h;
|
1292
|
+
|
1293
|
+
|
1294
|
+
template<> struct is_arithmetic<Packet4h> { enum { value = true }; };
|
1295
|
+
|
1296
|
+
template <>
|
1297
|
+
struct packet_traits<Eigen::half> : default_packet_traits {
|
1298
|
+
typedef Packet4h type;
|
1299
|
+
// There is no half-size packet for Packet4h.
|
1300
|
+
typedef Packet4h half;
|
1301
|
+
enum {
|
1302
|
+
Vectorizable = 1,
|
1303
|
+
AlignedOnScalar = 1,
|
1304
|
+
size = 4,
|
1305
|
+
HasHalfPacket = 0,
|
1306
|
+
HasAdd = 1,
|
1307
|
+
HasSub = 1,
|
1308
|
+
HasMul = 1,
|
1309
|
+
HasDiv = 1,
|
1310
|
+
HasNegate = 0,
|
1311
|
+
HasAbs = 0,
|
1312
|
+
HasAbs2 = 0,
|
1313
|
+
HasMin = 0,
|
1314
|
+
HasMax = 0,
|
1315
|
+
HasConj = 0,
|
1316
|
+
HasSetLinear = 0,
|
1317
|
+
HasSqrt = 0,
|
1318
|
+
HasRsqrt = 0,
|
1319
|
+
HasExp = 0,
|
1320
|
+
HasLog = 0,
|
1321
|
+
HasBlend = 0
|
1322
|
+
};
|
1323
|
+
};
|
1324
|
+
|
1325
|
+
|
1326
|
+
template<> struct unpacket_traits<Packet4h> { typedef Eigen::half type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4h half; };
|
1327
|
+
|
1328
|
+
template<> EIGEN_STRONG_INLINE Packet4h pset1<Packet4h>(const Eigen::half& from) {
|
1329
|
+
Packet4h result;
|
1330
|
+
result.x = _mm_set1_pi16(from.x);
|
1331
|
+
return result;
|
840
1332
|
}
|
841
1333
|
|
842
|
-
template<> EIGEN_STRONG_INLINE
|
843
|
-
|
844
|
-
#ifdef EIGEN_VECTORIZE_SSE4_1
|
845
|
-
return _mm_blend_pd(a,pset1<Packet2d>(b),1);
|
846
|
-
#else
|
847
|
-
return _mm_move_sd(a, _mm_load_sd(&b));
|
848
|
-
#endif
|
1334
|
+
template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet4h>(const Packet4h& from) {
|
1335
|
+
return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm_cvtsi64_si32(from.x)));
|
849
1336
|
}
|
850
1337
|
|
851
|
-
template<> EIGEN_STRONG_INLINE
|
852
|
-
|
853
|
-
|
854
|
-
|
855
|
-
|
856
|
-
|
857
|
-
|
858
|
-
|
1338
|
+
template<> EIGEN_STRONG_INLINE Packet4h pconj(const Packet4h& a) { return a; }
|
1339
|
+
|
1340
|
+
template<> EIGEN_STRONG_INLINE Packet4h padd<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
1341
|
+
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
1342
|
+
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
1343
|
+
|
1344
|
+
Eigen::half h[4];
|
1345
|
+
|
1346
|
+
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
1347
|
+
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
1348
|
+
h[0] = ha + hb;
|
1349
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
1350
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
1351
|
+
h[1] = ha + hb;
|
1352
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
1353
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
1354
|
+
h[2] = ha + hb;
|
1355
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
1356
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
1357
|
+
h[3] = ha + hb;
|
1358
|
+
Packet4h result;
|
1359
|
+
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
1360
|
+
return result;
|
1361
|
+
}
|
1362
|
+
|
1363
|
+
template<> EIGEN_STRONG_INLINE Packet4h psub<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
1364
|
+
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
1365
|
+
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
1366
|
+
|
1367
|
+
Eigen::half h[4];
|
1368
|
+
|
1369
|
+
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
1370
|
+
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
1371
|
+
h[0] = ha - hb;
|
1372
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
1373
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
1374
|
+
h[1] = ha - hb;
|
1375
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
1376
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
1377
|
+
h[2] = ha - hb;
|
1378
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
1379
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
1380
|
+
h[3] = ha - hb;
|
1381
|
+
Packet4h result;
|
1382
|
+
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
1383
|
+
return result;
|
1384
|
+
}
|
1385
|
+
|
1386
|
+
template<> EIGEN_STRONG_INLINE Packet4h pmul<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
1387
|
+
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
1388
|
+
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
1389
|
+
|
1390
|
+
Eigen::half h[4];
|
1391
|
+
|
1392
|
+
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
1393
|
+
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
1394
|
+
h[0] = ha * hb;
|
1395
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
1396
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
1397
|
+
h[1] = ha * hb;
|
1398
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
1399
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
1400
|
+
h[2] = ha * hb;
|
1401
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
1402
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
1403
|
+
h[3] = ha * hb;
|
1404
|
+
Packet4h result;
|
1405
|
+
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
1406
|
+
return result;
|
1407
|
+
}
|
1408
|
+
|
1409
|
+
template<> EIGEN_STRONG_INLINE Packet4h pdiv<Packet4h>(const Packet4h& a, const Packet4h& b) {
|
1410
|
+
__int64_t a64 = _mm_cvtm64_si64(a.x);
|
1411
|
+
__int64_t b64 = _mm_cvtm64_si64(b.x);
|
1412
|
+
|
1413
|
+
Eigen::half h[4];
|
1414
|
+
|
1415
|
+
Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
|
1416
|
+
Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
|
1417
|
+
h[0] = ha / hb;
|
1418
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
|
1419
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
|
1420
|
+
h[1] = ha / hb;
|
1421
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
|
1422
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
|
1423
|
+
h[2] = ha / hb;
|
1424
|
+
ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
|
1425
|
+
hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
|
1426
|
+
h[3] = ha / hb;
|
1427
|
+
Packet4h result;
|
1428
|
+
result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
|
1429
|
+
return result;
|
1430
|
+
}
|
1431
|
+
|
1432
|
+
template<> EIGEN_STRONG_INLINE Packet4h pload<Packet4h>(const Eigen::half* from) {
|
1433
|
+
Packet4h result;
|
1434
|
+
result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
|
1435
|
+
return result;
|
1436
|
+
}
|
1437
|
+
|
1438
|
+
template<> EIGEN_STRONG_INLINE Packet4h ploadu<Packet4h>(const Eigen::half* from) {
|
1439
|
+
Packet4h result;
|
1440
|
+
result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
|
1441
|
+
return result;
|
859
1442
|
}
|
860
1443
|
|
861
|
-
template<> EIGEN_STRONG_INLINE
|
1444
|
+
template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet4h& from) {
|
1445
|
+
__int64_t r = _mm_cvtm64_si64(from.x);
|
1446
|
+
*(reinterpret_cast<__int64_t*>(to)) = r;
|
1447
|
+
}
|
1448
|
+
|
1449
|
+
template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet4h& from) {
|
1450
|
+
__int64_t r = _mm_cvtm64_si64(from.x);
|
1451
|
+
*(reinterpret_cast<__int64_t*>(to)) = r;
|
1452
|
+
}
|
1453
|
+
|
1454
|
+
template<> EIGEN_STRONG_INLINE Packet4h
|
1455
|
+
ploadquad<Packet4h>(const Eigen::half* from) {
|
1456
|
+
return pset1<Packet4h>(*from);
|
1457
|
+
}
|
1458
|
+
|
1459
|
+
template<> EIGEN_STRONG_INLINE Packet4h pgather<Eigen::half, Packet4h>(const Eigen::half* from, Index stride)
|
862
1460
|
{
|
863
|
-
|
864
|
-
|
865
|
-
|
866
|
-
const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x0,0xFFFFFFFF,0xFFFFFFFF));
|
867
|
-
return _mm_or_pd(_mm_andnot_pd(mask, a), _mm_and_pd(mask, pset1<Packet2d>(b)));
|
868
|
-
#endif
|
1461
|
+
Packet4h result;
|
1462
|
+
result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
|
1463
|
+
return result;
|
869
1464
|
}
|
870
1465
|
|
871
|
-
|
872
|
-
|
873
|
-
|
874
|
-
|
1466
|
+
template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet4h>(Eigen::half* to, const Packet4h& from, Index stride)
|
1467
|
+
{
|
1468
|
+
__int64_t a = _mm_cvtm64_si64(from.x);
|
1469
|
+
to[stride*0].x = static_cast<unsigned short>(a);
|
1470
|
+
to[stride*1].x = static_cast<unsigned short>(a >> 16);
|
1471
|
+
to[stride*2].x = static_cast<unsigned short>(a >> 32);
|
1472
|
+
to[stride*3].x = static_cast<unsigned short>(a >> 48);
|
875
1473
|
}
|
876
|
-
|
877
|
-
|
1474
|
+
|
1475
|
+
EIGEN_STRONG_INLINE void
|
1476
|
+
ptranspose(PacketBlock<Packet4h,4>& kernel) {
|
1477
|
+
__m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x);
|
1478
|
+
__m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x);
|
1479
|
+
__m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x);
|
1480
|
+
__m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x);
|
1481
|
+
|
1482
|
+
kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1);
|
1483
|
+
kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1);
|
1484
|
+
kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3);
|
1485
|
+
kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3);
|
878
1486
|
}
|
1487
|
+
|
879
1488
|
#endif
|
880
1489
|
|
1490
|
+
|
881
1491
|
} // end namespace internal
|
882
1492
|
|
883
1493
|
} // end namespace Eigen
|
884
1494
|
|
885
|
-
#if EIGEN_COMP_PGI
|
1495
|
+
#if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900
|
886
1496
|
// PGI++ does not define the following intrinsics in C++ mode.
|
887
1497
|
static inline __m128 _mm_castpd_ps (__m128d x) { return reinterpret_cast<__m128&>(x); }
|
888
1498
|
static inline __m128i _mm_castpd_si128(__m128d x) { return reinterpret_cast<__m128i&>(x); }
|