tomoto 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +3 -0
- data/LICENSE.txt +22 -0
- data/README.md +123 -0
- data/ext/tomoto/ext.cpp +245 -0
- data/ext/tomoto/extconf.rb +28 -0
- data/lib/tomoto.rb +12 -0
- data/lib/tomoto/ct.rb +11 -0
- data/lib/tomoto/hdp.rb +11 -0
- data/lib/tomoto/lda.rb +67 -0
- data/lib/tomoto/version.rb +3 -0
- data/vendor/EigenRand/EigenRand/Core.h +1139 -0
- data/vendor/EigenRand/EigenRand/Dists/Basic.h +111 -0
- data/vendor/EigenRand/EigenRand/Dists/Discrete.h +877 -0
- data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +108 -0
- data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +626 -0
- data/vendor/EigenRand/EigenRand/EigenRand +19 -0
- data/vendor/EigenRand/EigenRand/Macro.h +24 -0
- data/vendor/EigenRand/EigenRand/MorePacketMath.h +978 -0
- data/vendor/EigenRand/EigenRand/PacketFilter.h +286 -0
- data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +624 -0
- data/vendor/EigenRand/EigenRand/RandUtils.h +413 -0
- data/vendor/EigenRand/EigenRand/doc.h +220 -0
- data/vendor/EigenRand/LICENSE +21 -0
- data/vendor/EigenRand/README.md +288 -0
- data/vendor/eigen/COPYING.BSD +26 -0
- data/vendor/eigen/COPYING.GPL +674 -0
- data/vendor/eigen/COPYING.LGPL +502 -0
- data/vendor/eigen/COPYING.MINPACK +52 -0
- data/vendor/eigen/COPYING.MPL2 +373 -0
- data/vendor/eigen/COPYING.README +18 -0
- data/vendor/eigen/Eigen/CMakeLists.txt +19 -0
- data/vendor/eigen/Eigen/Cholesky +46 -0
- data/vendor/eigen/Eigen/CholmodSupport +48 -0
- data/vendor/eigen/Eigen/Core +537 -0
- data/vendor/eigen/Eigen/Dense +7 -0
- data/vendor/eigen/Eigen/Eigen +2 -0
- data/vendor/eigen/Eigen/Eigenvalues +61 -0
- data/vendor/eigen/Eigen/Geometry +62 -0
- data/vendor/eigen/Eigen/Householder +30 -0
- data/vendor/eigen/Eigen/IterativeLinearSolvers +48 -0
- data/vendor/eigen/Eigen/Jacobi +33 -0
- data/vendor/eigen/Eigen/LU +50 -0
- data/vendor/eigen/Eigen/MetisSupport +35 -0
- data/vendor/eigen/Eigen/OrderingMethods +73 -0
- data/vendor/eigen/Eigen/PaStiXSupport +48 -0
- data/vendor/eigen/Eigen/PardisoSupport +35 -0
- data/vendor/eigen/Eigen/QR +51 -0
- data/vendor/eigen/Eigen/QtAlignedMalloc +40 -0
- data/vendor/eigen/Eigen/SPQRSupport +34 -0
- data/vendor/eigen/Eigen/SVD +51 -0
- data/vendor/eigen/Eigen/Sparse +36 -0
- data/vendor/eigen/Eigen/SparseCholesky +45 -0
- data/vendor/eigen/Eigen/SparseCore +69 -0
- data/vendor/eigen/Eigen/SparseLU +46 -0
- data/vendor/eigen/Eigen/SparseQR +37 -0
- data/vendor/eigen/Eigen/StdDeque +27 -0
- data/vendor/eigen/Eigen/StdList +26 -0
- data/vendor/eigen/Eigen/StdVector +27 -0
- data/vendor/eigen/Eigen/SuperLUSupport +64 -0
- data/vendor/eigen/Eigen/UmfPackSupport +40 -0
- data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +673 -0
- data/vendor/eigen/Eigen/src/Cholesky/LLT.h +542 -0
- data/vendor/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
- data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +639 -0
- data/vendor/eigen/Eigen/src/Core/Array.h +329 -0
- data/vendor/eigen/Eigen/src/Core/ArrayBase.h +226 -0
- data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +209 -0
- data/vendor/eigen/Eigen/src/Core/Assign.h +90 -0
- data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +935 -0
- data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +178 -0
- data/vendor/eigen/Eigen/src/Core/BandMatrix.h +353 -0
- data/vendor/eigen/Eigen/src/Core/Block.h +452 -0
- data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +164 -0
- data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +160 -0
- data/vendor/eigen/Eigen/src/Core/ConditionEstimator.h +175 -0
- data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +1688 -0
- data/vendor/eigen/Eigen/src/Core/CoreIterators.h +127 -0
- data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +184 -0
- data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +866 -0
- data/vendor/eigen/Eigen/src/Core/CwiseTernaryOp.h +197 -0
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +103 -0
- data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +128 -0
- data/vendor/eigen/Eigen/src/Core/DenseBase.h +611 -0
- data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +681 -0
- data/vendor/eigen/Eigen/src/Core/DenseStorage.h +570 -0
- data/vendor/eigen/Eigen/src/Core/Diagonal.h +260 -0
- data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +343 -0
- data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +28 -0
- data/vendor/eigen/Eigen/src/Core/Dot.h +318 -0
- data/vendor/eigen/Eigen/src/Core/EigenBase.h +159 -0
- data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +146 -0
- data/vendor/eigen/Eigen/src/Core/Fuzzy.h +155 -0
- data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +455 -0
- data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +593 -0
- data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +187 -0
- data/vendor/eigen/Eigen/src/Core/IO.h +225 -0
- data/vendor/eigen/Eigen/src/Core/Inverse.h +118 -0
- data/vendor/eigen/Eigen/src/Core/Map.h +171 -0
- data/vendor/eigen/Eigen/src/Core/MapBase.h +303 -0
- data/vendor/eigen/Eigen/src/Core/MathFunctions.h +1415 -0
- data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +101 -0
- data/vendor/eigen/Eigen/src/Core/Matrix.h +459 -0
- data/vendor/eigen/Eigen/src/Core/MatrixBase.h +529 -0
- data/vendor/eigen/Eigen/src/Core/NestByValue.h +110 -0
- data/vendor/eigen/Eigen/src/Core/NoAlias.h +108 -0
- data/vendor/eigen/Eigen/src/Core/NumTraits.h +248 -0
- data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +633 -0
- data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +1035 -0
- data/vendor/eigen/Eigen/src/Core/Product.h +186 -0
- data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +1112 -0
- data/vendor/eigen/Eigen/src/Core/Random.h +182 -0
- data/vendor/eigen/Eigen/src/Core/Redux.h +505 -0
- data/vendor/eigen/Eigen/src/Core/Ref.h +283 -0
- data/vendor/eigen/Eigen/src/Core/Replicate.h +142 -0
- data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +117 -0
- data/vendor/eigen/Eigen/src/Core/Reverse.h +211 -0
- data/vendor/eigen/Eigen/src/Core/Select.h +162 -0
- data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +352 -0
- data/vendor/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
- data/vendor/eigen/Eigen/src/Core/Solve.h +188 -0
- data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +235 -0
- data/vendor/eigen/Eigen/src/Core/SolverBase.h +130 -0
- data/vendor/eigen/Eigen/src/Core/StableNorm.h +221 -0
- data/vendor/eigen/Eigen/src/Core/Stride.h +111 -0
- data/vendor/eigen/Eigen/src/Core/Swap.h +67 -0
- data/vendor/eigen/Eigen/src/Core/Transpose.h +403 -0
- data/vendor/eigen/Eigen/src/Core/Transpositions.h +407 -0
- data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +983 -0
- data/vendor/eigen/Eigen/src/Core/VectorBlock.h +96 -0
- data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +695 -0
- data/vendor/eigen/Eigen/src/Core/Visitor.h +273 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +451 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +439 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +637 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +51 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +391 -0
- data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1316 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +430 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +322 -0
- data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +1061 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +103 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +674 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/MathFunctions.h +91 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +333 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +1124 -0
- data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +212 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +29 -0
- data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +49 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +490 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +91 -0
- data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +760 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +471 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +562 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +895 -0
- data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +77 -0
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +397 -0
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +137 -0
- data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +945 -0
- data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +168 -0
- data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +475 -0
- data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +188 -0
- data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +136 -0
- data/vendor/eigen/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
- data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +792 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2156 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +492 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +311 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +122 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +619 -0
- data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
- data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +163 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +521 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +287 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +260 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
- data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +93 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +466 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +315 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +335 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +163 -0
- data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +145 -0
- data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +398 -0
- data/vendor/eigen/Eigen/src/Core/util/Constants.h +547 -0
- data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +83 -0
- data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +302 -0
- data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +130 -0
- data/vendor/eigen/Eigen/src/Core/util/Macros.h +1001 -0
- data/vendor/eigen/Eigen/src/Core/util/Memory.h +993 -0
- data/vendor/eigen/Eigen/src/Core/util/Meta.h +534 -0
- data/vendor/eigen/Eigen/src/Core/util/NonMPL2.h +3 -0
- data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +27 -0
- data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +218 -0
- data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +821 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +459 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +654 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +546 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +870 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
- data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +556 -0
- data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +392 -0
- data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +247 -0
- data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +114 -0
- data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +497 -0
- data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +282 -0
- data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +234 -0
- data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +195 -0
- data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +814 -0
- data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +199 -0
- data/vendor/eigen/Eigen/src/Geometry/RotationBase.h +206 -0
- data/vendor/eigen/Eigen/src/Geometry/Scaling.h +170 -0
- data/vendor/eigen/Eigen/src/Geometry/Transform.h +1542 -0
- data/vendor/eigen/Eigen/src/Geometry/Translation.h +208 -0
- data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +166 -0
- data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +161 -0
- data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +103 -0
- data/vendor/eigen/Eigen/src/Householder/Householder.h +172 -0
- data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +470 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +228 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +246 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +400 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +462 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +394 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +216 -0
- data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +115 -0
- data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +462 -0
- data/vendor/eigen/Eigen/src/LU/Determinant.h +101 -0
- data/vendor/eigen/Eigen/src/LU/FullPivLU.h +891 -0
- data/vendor/eigen/Eigen/src/LU/InverseImpl.h +415 -0
- data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +611 -0
- data/vendor/eigen/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
- data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +338 -0
- data/vendor/eigen/Eigen/src/MetisSupport/MetisSupport.h +137 -0
- data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +445 -0
- data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +1843 -0
- data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +157 -0
- data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
- data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +543 -0
- data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +653 -0
- data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
- data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +562 -0
- data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +676 -0
- data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +409 -0
- data/vendor/eigen/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
- data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +313 -0
- data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +1246 -0
- data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +804 -0
- data/vendor/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
- data/vendor/eigen/Eigen/src/SVD/SVDBase.h +315 -0
- data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +689 -0
- data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +199 -0
- data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +377 -0
- data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +258 -0
- data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
- data/vendor/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +216 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +603 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseColEtree.h +206 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +341 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +726 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +148 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +320 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseDot.h +98 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseMap.h +305 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +1403 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +405 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparsePermutation.h +178 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +169 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseRedux.h +49 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +397 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +656 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseTranspose.h +92 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +178 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +478 -0
- data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +253 -0
- data/vendor/eigen/Eigen/src/SparseCore/TriangularSolver.h +315 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +773 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +301 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
- data/vendor/eigen/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
- data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +745 -0
- data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +126 -0
- data/vendor/eigen/Eigen/src/StlSupport/StdList.h +106 -0
- data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +131 -0
- data/vendor/eigen/Eigen/src/StlSupport/details.h +84 -0
- data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +1027 -0
- data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +506 -0
- data/vendor/eigen/Eigen/src/misc/Image.h +82 -0
- data/vendor/eigen/Eigen/src/misc/Kernel.h +79 -0
- data/vendor/eigen/Eigen/src/misc/RealSvd2x2.h +55 -0
- data/vendor/eigen/Eigen/src/misc/blas.h +440 -0
- data/vendor/eigen/Eigen/src/misc/lapack.h +152 -0
- data/vendor/eigen/Eigen/src/misc/lapacke.h +16291 -0
- data/vendor/eigen/Eigen/src/misc/lapacke_mangling.h +17 -0
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +332 -0
- data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +552 -0
- data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +1058 -0
- data/vendor/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
- data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +163 -0
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
- data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +85 -0
- data/vendor/eigen/README.md +3 -0
- data/vendor/eigen/bench/README.txt +55 -0
- data/vendor/eigen/bench/btl/COPYING +340 -0
- data/vendor/eigen/bench/btl/README +154 -0
- data/vendor/eigen/bench/tensors/README +21 -0
- data/vendor/eigen/blas/README.txt +6 -0
- data/vendor/eigen/demos/mandelbrot/README +10 -0
- data/vendor/eigen/demos/mix_eigen_and_c/README +9 -0
- data/vendor/eigen/demos/opengl/README +13 -0
- data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +1760 -0
- data/vendor/eigen/unsupported/README.txt +50 -0
- data/vendor/tomotopy/LICENSE +21 -0
- data/vendor/tomotopy/README.kr.rst +375 -0
- data/vendor/tomotopy/README.rst +382 -0
- data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +362 -0
- data/vendor/tomotopy/src/Labeling/FoRelevance.h +88 -0
- data/vendor/tomotopy/src/Labeling/Labeler.h +50 -0
- data/vendor/tomotopy/src/TopicModel/CT.h +37 -0
- data/vendor/tomotopy/src/TopicModel/CTModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/CTModel.hpp +293 -0
- data/vendor/tomotopy/src/TopicModel/DMR.h +51 -0
- data/vendor/tomotopy/src/TopicModel/DMRModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +374 -0
- data/vendor/tomotopy/src/TopicModel/DT.h +65 -0
- data/vendor/tomotopy/src/TopicModel/DTM.h +22 -0
- data/vendor/tomotopy/src/TopicModel/DTModel.cpp +15 -0
- data/vendor/tomotopy/src/TopicModel/DTModel.hpp +572 -0
- data/vendor/tomotopy/src/TopicModel/GDMR.h +37 -0
- data/vendor/tomotopy/src/TopicModel/GDMRModel.cpp +14 -0
- data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +485 -0
- data/vendor/tomotopy/src/TopicModel/HDP.h +74 -0
- data/vendor/tomotopy/src/TopicModel/HDPModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +592 -0
- data/vendor/tomotopy/src/TopicModel/HLDA.h +40 -0
- data/vendor/tomotopy/src/TopicModel/HLDAModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +681 -0
- data/vendor/tomotopy/src/TopicModel/HPA.h +27 -0
- data/vendor/tomotopy/src/TopicModel/HPAModel.cpp +21 -0
- data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +588 -0
- data/vendor/tomotopy/src/TopicModel/LDA.h +144 -0
- data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +442 -0
- data/vendor/tomotopy/src/TopicModel/LDAModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +1058 -0
- data/vendor/tomotopy/src/TopicModel/LLDA.h +45 -0
- data/vendor/tomotopy/src/TopicModel/LLDAModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +203 -0
- data/vendor/tomotopy/src/TopicModel/MGLDA.h +63 -0
- data/vendor/tomotopy/src/TopicModel/MGLDAModel.cpp +17 -0
- data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +558 -0
- data/vendor/tomotopy/src/TopicModel/PA.h +43 -0
- data/vendor/tomotopy/src/TopicModel/PAModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/PAModel.hpp +467 -0
- data/vendor/tomotopy/src/TopicModel/PLDA.h +17 -0
- data/vendor/tomotopy/src/TopicModel/PLDAModel.cpp +13 -0
- data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +214 -0
- data/vendor/tomotopy/src/TopicModel/SLDA.h +54 -0
- data/vendor/tomotopy/src/TopicModel/SLDAModel.cpp +17 -0
- data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +456 -0
- data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +692 -0
- data/vendor/tomotopy/src/Utils/AliasMethod.hpp +169 -0
- data/vendor/tomotopy/src/Utils/Dictionary.h +80 -0
- data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +181 -0
- data/vendor/tomotopy/src/Utils/LBFGS.h +202 -0
- data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBacktracking.h +120 -0
- data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBracketing.h +122 -0
- data/vendor/tomotopy/src/Utils/LBFGS/Param.h +213 -0
- data/vendor/tomotopy/src/Utils/LUT.hpp +82 -0
- data/vendor/tomotopy/src/Utils/MultiNormalDistribution.hpp +69 -0
- data/vendor/tomotopy/src/Utils/PolyaGamma.hpp +200 -0
- data/vendor/tomotopy/src/Utils/PolyaGammaHybrid.hpp +672 -0
- data/vendor/tomotopy/src/Utils/ThreadPool.hpp +150 -0
- data/vendor/tomotopy/src/Utils/Trie.hpp +220 -0
- data/vendor/tomotopy/src/Utils/TruncMultiNormal.hpp +94 -0
- data/vendor/tomotopy/src/Utils/Utils.hpp +337 -0
- data/vendor/tomotopy/src/Utils/avx_gamma.h +46 -0
- data/vendor/tomotopy/src/Utils/avx_mathfun.h +736 -0
- data/vendor/tomotopy/src/Utils/exception.h +28 -0
- data/vendor/tomotopy/src/Utils/math.h +281 -0
- data/vendor/tomotopy/src/Utils/rtnorm.hpp +2690 -0
- data/vendor/tomotopy/src/Utils/sample.hpp +192 -0
- data/vendor/tomotopy/src/Utils/serializer.hpp +695 -0
- data/vendor/tomotopy/src/Utils/slp.hpp +131 -0
- data/vendor/tomotopy/src/Utils/sse_gamma.h +48 -0
- data/vendor/tomotopy/src/Utils/sse_mathfun.h +710 -0
- data/vendor/tomotopy/src/Utils/text.hpp +49 -0
- data/vendor/tomotopy/src/Utils/tvector.hpp +543 -0
- metadata +531 -0
@@ -0,0 +1,337 @@
|
|
1
|
+
#pragma once
|
2
|
+
#include <cassert>
|
3
|
+
#include <vector>
|
4
|
+
#include <functional>
|
5
|
+
#include <typeinfo>
|
6
|
+
#include <algorithm>
|
7
|
+
#include <memory>
|
8
|
+
#include <mutex>
|
9
|
+
#include <iterator>
|
10
|
+
|
11
|
+
namespace tomoto
|
12
|
+
{
|
13
|
+
template<typename T, typename... Args,
|
14
|
+
typename std::enable_if<!std::is_array<T>::value, int>::type = 0>
|
15
|
+
std::unique_ptr<T> make_unique(Args&&... args)
|
16
|
+
{
|
17
|
+
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
|
18
|
+
}
|
19
|
+
|
20
|
+
template<typename T,
|
21
|
+
typename std::enable_if<std::is_array<T>::value, int>::type = 0>
|
22
|
+
std::unique_ptr<T> make_unique(size_t size)
|
23
|
+
{
|
24
|
+
return std::unique_ptr<T>(new typename std::remove_extent<T>::type [size]);
|
25
|
+
}
|
26
|
+
|
27
|
+
template<typename T>
|
28
|
+
constexpr T * as_mutable(const T * value) noexcept {
|
29
|
+
return const_cast<T *>(value);
|
30
|
+
}
|
31
|
+
|
32
|
+
template<bool _lock>
|
33
|
+
class OptionalLock : public std::lock_guard<std::mutex>
|
34
|
+
{
|
35
|
+
public:
|
36
|
+
using std::lock_guard<std::mutex>::lock_guard;
|
37
|
+
};
|
38
|
+
|
39
|
+
template<>
|
40
|
+
class OptionalLock<false>
|
41
|
+
{
|
42
|
+
public:
|
43
|
+
OptionalLock(const std::mutex& mtx)
|
44
|
+
{}
|
45
|
+
};
|
46
|
+
|
47
|
+
template<bool _Dec, typename _Ty>
|
48
|
+
struct CntUpdater
|
49
|
+
{
|
50
|
+
inline _Ty operator()(_Ty& val, _Ty inc)
|
51
|
+
{
|
52
|
+
return val += inc;
|
53
|
+
}
|
54
|
+
};
|
55
|
+
|
56
|
+
template<typename _Ty>
|
57
|
+
struct CntUpdater<true, _Ty>
|
58
|
+
{
|
59
|
+
inline _Ty operator()(_Ty& val, _Ty inc)
|
60
|
+
{
|
61
|
+
return val = std::max(val + inc, (_Ty)0);
|
62
|
+
}
|
63
|
+
};
|
64
|
+
|
65
|
+
template<bool _Dec, typename _Ty> _Ty updateCnt(_Ty& val, _Ty inc)
|
66
|
+
{
|
67
|
+
auto ret = CntUpdater<_Dec, _Ty>{}(val, inc);
|
68
|
+
assert(ret >= 0);
|
69
|
+
return ret;
|
70
|
+
}
|
71
|
+
|
72
|
+
template<class UnaryFunction>
|
73
|
+
UnaryFunction forRandom(size_t N, size_t seed, UnaryFunction f)
|
74
|
+
{
|
75
|
+
static size_t primes[16] = {
|
76
|
+
65537, 65539, 65543, 65551, 65557, 65563,
|
77
|
+
65579, 65581, 65587, 65599, 65609, 65617,
|
78
|
+
65629, 65633, 65647, 65651
|
79
|
+
};
|
80
|
+
if (!N) return f;
|
81
|
+
size_t P = primes[seed & 0xF];
|
82
|
+
if (N % P == 0)
|
83
|
+
{
|
84
|
+
P = primes[(seed + 1) & 0xF];
|
85
|
+
if (N % P == 0) P = primes[(seed + 2) & 0xF];
|
86
|
+
if (N % P == 0) P = primes[(seed + 3) & 0xF];
|
87
|
+
}
|
88
|
+
P %= N;
|
89
|
+
for (size_t i = 0; i < N; ++i) {
|
90
|
+
f(((i + seed) * P) % N);
|
91
|
+
}
|
92
|
+
return f;
|
93
|
+
}
|
94
|
+
|
95
|
+
template<class RandomIt, class UnaryFunction>
|
96
|
+
UnaryFunction forEachRandom(RandomIt first, RandomIt last, size_t seed, UnaryFunction f)
|
97
|
+
{
|
98
|
+
static size_t primes[16] = {
|
99
|
+
65537, 65539, 65543, 65551, 65557, 65563,
|
100
|
+
65579, 65581, 65587, 65599, 65609, 65617,
|
101
|
+
65629, 65633, 65647, 65651
|
102
|
+
};
|
103
|
+
|
104
|
+
const size_t N = std::distance(first, last);
|
105
|
+
if (!N) return f;
|
106
|
+
size_t P = primes[seed & 0xF];
|
107
|
+
if (N % P == 0)
|
108
|
+
{
|
109
|
+
P = primes[(seed + 1) & 0xF];
|
110
|
+
if (N % P == 0) P = primes[(seed + 2) & 0xF];
|
111
|
+
if (N % P == 0) P = primes[(seed + 3) & 0xF];
|
112
|
+
}
|
113
|
+
P %= N;
|
114
|
+
for (size_t i = 0; i < N; ++i) {
|
115
|
+
f(first[((i + seed) * P) % N]);
|
116
|
+
}
|
117
|
+
return f;
|
118
|
+
}
|
119
|
+
|
120
|
+
template<class _Cont, class _Ty>
|
121
|
+
size_t insertIntoEmpty(_Cont& cont, _Ty&& e)
|
122
|
+
{
|
123
|
+
size_t pos = 0;
|
124
|
+
for (auto& c : cont)
|
125
|
+
{
|
126
|
+
if (!(bool)c)
|
127
|
+
{
|
128
|
+
c = e;
|
129
|
+
return pos;
|
130
|
+
}
|
131
|
+
++pos;
|
132
|
+
}
|
133
|
+
cont.emplace_back(e);
|
134
|
+
return pos;
|
135
|
+
}
|
136
|
+
|
137
|
+
/*
|
138
|
+
* _Container src: (in/out) container to be sorted
|
139
|
+
* vector<integer> order: (out) a vector mapping old idx to new idx (order[oldIdx] => newIdx)
|
140
|
+
* _Less cmp: (in) comparator
|
141
|
+
*/
|
142
|
+
template<typename _Container, typename _OrderType = uint32_t, typename _Less = std::less<typename _Container::value_type>>
|
143
|
+
void sortAndWriteOrder(_Container& src, std::vector<_OrderType>& order, size_t rotate = 0, _Less cmp = _Less{})
|
144
|
+
{
|
145
|
+
typedef std::pair<typename _Container::value_type, _OrderType> voPair_t;
|
146
|
+
std::vector<voPair_t> pv(src.size());
|
147
|
+
for (_OrderType i = 0; i < src.size(); ++i)
|
148
|
+
{
|
149
|
+
pv[i] = std::make_pair(src[i], i);
|
150
|
+
}
|
151
|
+
|
152
|
+
std::stable_sort(pv.begin(), pv.end(), [&cmp](const voPair_t& a, const voPair_t& b)
|
153
|
+
{
|
154
|
+
return cmp(a.first, b.first);
|
155
|
+
});
|
156
|
+
if (rotate) std::rotate(pv.begin(), pv.begin() + rotate, pv.end());
|
157
|
+
order = std::vector<_OrderType>(src.size());
|
158
|
+
for (size_t i = 0; i < src.size(); ++i)
|
159
|
+
{
|
160
|
+
src[i] = pv[i].first;
|
161
|
+
order[pv[i].second] = i;
|
162
|
+
}
|
163
|
+
}
|
164
|
+
|
165
|
+
template <typename _BaseIter>
|
166
|
+
struct FilteredIter : _BaseIter
|
167
|
+
{
|
168
|
+
using filterTy = std::function<bool(const typename std::iterator_traits<_BaseIter>::value_type&)>;
|
169
|
+
|
170
|
+
FilteredIter() = default;
|
171
|
+
FilteredIter(filterTy filter, _BaseIter base, _BaseIter end = {})
|
172
|
+
: _BaseIter(base), _filter(filter), _end(end)
|
173
|
+
{
|
174
|
+
while (*this != _end && !_filter(**this)) {
|
175
|
+
++*this;
|
176
|
+
}
|
177
|
+
}
|
178
|
+
|
179
|
+
FilteredIter& operator++()
|
180
|
+
{
|
181
|
+
do
|
182
|
+
{
|
183
|
+
_BaseIter::operator++();
|
184
|
+
} while (*this != _end && !_filter(**this));
|
185
|
+
return *this;
|
186
|
+
}
|
187
|
+
|
188
|
+
FilteredIter operator++(int)
|
189
|
+
{
|
190
|
+
FilteredIter copy = *this;
|
191
|
+
++*this;
|
192
|
+
return copy;
|
193
|
+
}
|
194
|
+
|
195
|
+
private:
|
196
|
+
filterTy _filter;
|
197
|
+
_BaseIter _end;
|
198
|
+
};
|
199
|
+
|
200
|
+
template <typename _BaseIter>
|
201
|
+
FilteredIter<_BaseIter> makeFilteredIter(
|
202
|
+
typename FilteredIter<_BaseIter>::filterTy filter,
|
203
|
+
_BaseIter base, _BaseIter end = {})
|
204
|
+
{
|
205
|
+
return { filter, base, end };
|
206
|
+
}
|
207
|
+
|
208
|
+
template <typename _UnaryFunc, typename _Iterator>
|
209
|
+
class TransformIter
|
210
|
+
{
|
211
|
+
private:
|
212
|
+
_Iterator i;
|
213
|
+
_UnaryFunc f;
|
214
|
+
public:
|
215
|
+
using reference = typename std::result_of<
|
216
|
+
const _UnaryFunc(typename std::iterator_traits<_Iterator>::reference)
|
217
|
+
>::type;
|
218
|
+
using value_type = reference;
|
219
|
+
|
220
|
+
using pointer = void;
|
221
|
+
using iterator_category = typename std::iterator_traits<_Iterator>::iterator_category;
|
222
|
+
using difference_type = typename std::iterator_traits<_Iterator>::difference_type;
|
223
|
+
|
224
|
+
TransformIter(const _Iterator& _iter = {}, _UnaryFunc _f = {})
|
225
|
+
: i(_iter), f(_f)
|
226
|
+
{}
|
227
|
+
|
228
|
+
reference operator*()
|
229
|
+
{
|
230
|
+
return f(*i);
|
231
|
+
}
|
232
|
+
|
233
|
+
const reference operator*() const
|
234
|
+
{
|
235
|
+
return f(*i);
|
236
|
+
}
|
237
|
+
|
238
|
+
reference operator[](std::size_t idx)
|
239
|
+
{
|
240
|
+
return f(i[idx]);
|
241
|
+
}
|
242
|
+
|
243
|
+
const reference operator[](std::size_t idx) const
|
244
|
+
{
|
245
|
+
return f(i[idx]);
|
246
|
+
}
|
247
|
+
|
248
|
+
TransformIter& operator++()
|
249
|
+
{
|
250
|
+
++i;
|
251
|
+
return *this;
|
252
|
+
}
|
253
|
+
|
254
|
+
TransformIter& operator++(int)
|
255
|
+
{
|
256
|
+
auto c = *this;
|
257
|
+
++i;
|
258
|
+
return c;
|
259
|
+
}
|
260
|
+
|
261
|
+
TransformIter& operator--()
|
262
|
+
{
|
263
|
+
--i;
|
264
|
+
return *this;
|
265
|
+
}
|
266
|
+
|
267
|
+
TransformIter& operator--(int)
|
268
|
+
{
|
269
|
+
auto c = *this;
|
270
|
+
--i;
|
271
|
+
return c;
|
272
|
+
}
|
273
|
+
|
274
|
+
TransformIter operator+(int n) const
|
275
|
+
{
|
276
|
+
return { f, i + n };
|
277
|
+
}
|
278
|
+
|
279
|
+
TransformIter operator-(int n) const
|
280
|
+
{
|
281
|
+
return { f, i - n };
|
282
|
+
}
|
283
|
+
|
284
|
+
TransformIter& operator+=(int n)
|
285
|
+
{
|
286
|
+
i += n;
|
287
|
+
return *this;
|
288
|
+
}
|
289
|
+
|
290
|
+
TransformIter& operator-=(int n)
|
291
|
+
{
|
292
|
+
i -= n;
|
293
|
+
return *this;
|
294
|
+
}
|
295
|
+
|
296
|
+
typename std::iterator_traits<_Iterator>::difference_type operator-(const TransformIter& o) const
|
297
|
+
{
|
298
|
+
return i - o.i;
|
299
|
+
}
|
300
|
+
|
301
|
+
bool operator==(const TransformIter& o) const
|
302
|
+
{
|
303
|
+
return i == o.i;
|
304
|
+
}
|
305
|
+
|
306
|
+
bool operator!=(const TransformIter& o) const
|
307
|
+
{
|
308
|
+
return i != o.i;
|
309
|
+
}
|
310
|
+
|
311
|
+
bool operator<(const TransformIter& o) const
|
312
|
+
{
|
313
|
+
return i < o.i;
|
314
|
+
}
|
315
|
+
|
316
|
+
bool operator>(const TransformIter& o) const
|
317
|
+
{
|
318
|
+
return i > o.i;
|
319
|
+
}
|
320
|
+
|
321
|
+
bool operator<=(const TransformIter& o) const
|
322
|
+
{
|
323
|
+
return i <= o.i;
|
324
|
+
}
|
325
|
+
|
326
|
+
bool operator>=(const TransformIter& o) const
|
327
|
+
{
|
328
|
+
return i >= o.i;
|
329
|
+
}
|
330
|
+
};
|
331
|
+
|
332
|
+
template <typename _UnaryFunc, typename _Iterator>
|
333
|
+
TransformIter<_UnaryFunc, _Iterator> makeTransformIter(const _Iterator& iter, _UnaryFunc f)
|
334
|
+
{
|
335
|
+
return { iter, f };
|
336
|
+
}
|
337
|
+
}
|
@@ -0,0 +1,46 @@
|
|
1
|
+
#pragma once
|
2
|
+
#include "avx_mathfun.h"
|
3
|
+
|
4
|
+
// approximation : lgamma(z) ~= (z+2.5)ln(z+3) - z - 3 + 0.5 ln (2pi) + 1/12/(z + 3) - ln (z(z+1)(z+2))
|
5
|
+
inline __m256 lgamma_ps(__m256 x)
|
6
|
+
{
|
7
|
+
__m256 x_3 = _mm256_add_ps(x, _mm256_set1_ps(3));
|
8
|
+
__m256 ret = _mm256_mul_ps(_mm256_add_ps(x_3, _mm256_set1_ps(-0.5f)), log_ps(x_3));
|
9
|
+
ret = _mm256_sub_ps(ret, x_3);
|
10
|
+
ret = _mm256_add_ps(ret, _mm256_set1_ps(0.91893853f));
|
11
|
+
ret = _mm256_add_ps(ret, _mm256_div_ps(_mm256_set1_ps(1 / 12.f), x_3));
|
12
|
+
ret = _mm256_sub_ps(ret, log_ps(_mm256_mul_ps(
|
13
|
+
_mm256_mul_ps(_mm256_sub_ps(x_3, _mm256_set1_ps(1)), _mm256_sub_ps(x_3, _mm256_set1_ps(2))), x)));
|
14
|
+
return ret;
|
15
|
+
}
|
16
|
+
|
17
|
+
// approximation : lgamma(z + a) - lgamma(z) = (z + a + 1.5) * log(z + a + 2) - (z + 1.5) * log(z + 2) - a + (1. / (z + a + 2) - 1. / (z + 2)) / 12. - log(((z + a) * (z + a + 1)) / (z * (z + 1)))
|
18
|
+
inline __m256 lgamma_subt(__m256 z, __m256 a)
|
19
|
+
{
|
20
|
+
__m256 _1p5 = _mm256_set1_ps(1.5);
|
21
|
+
__m256 _2 = _mm256_set1_ps(2);
|
22
|
+
__m256 za = _mm256_add_ps(z, a);
|
23
|
+
__m256 ret = _mm256_mul_ps(_mm256_add_ps(za, _1p5), log_ps(_mm256_add_ps(za, _2)));
|
24
|
+
ret = _mm256_sub_ps(ret, _mm256_mul_ps(_mm256_add_ps(z, _1p5), log_ps(_mm256_add_ps(z, _2))));
|
25
|
+
ret = _mm256_sub_ps(ret, a);
|
26
|
+
__m256 _1 = _mm256_set1_ps(1);
|
27
|
+
__m256 _1_12 = _mm256_set1_ps(1 / 12.f);
|
28
|
+
ret = _mm256_add_ps(ret, _mm256_sub_ps(_mm256_div_ps(_1_12, _mm256_add_ps(za, _2)), _mm256_div_ps(_1_12, _mm256_add_ps(z, _2))));
|
29
|
+
ret = _mm256_sub_ps(ret, log_ps(_mm256_div_ps(_mm256_div_ps(_mm256_mul_ps(za, _mm256_add_ps(za, _1)), z), _mm256_add_ps(z, _1))));
|
30
|
+
return ret;
|
31
|
+
}
|
32
|
+
|
33
|
+
|
34
|
+
// approximation : digamma(z) ~= ln(z+4) - 1/2/(z+4) - 1/12/(z+4)^2 - 1/z - 1/(z+1) - 1/(z+2) - 1/(z+3)
|
35
|
+
inline __m256 digamma_ps(__m256 x)
|
36
|
+
{
|
37
|
+
__m256 x_4 = _mm256_add_ps(x, _mm256_set1_ps(4));
|
38
|
+
__m256 ret = log_ps(x_4);
|
39
|
+
ret = _mm256_sub_ps(ret, _mm256_div_ps(_mm256_set1_ps(1 / 2.f), x_4));
|
40
|
+
ret = _mm256_sub_ps(ret, _mm256_div_ps(_mm256_div_ps(_mm256_set1_ps(1 / 12.f), x_4), x_4));
|
41
|
+
ret = _mm256_sub_ps(ret, _mm256_rcp_ps(_mm256_sub_ps(x_4, _mm256_set1_ps(1))));
|
42
|
+
ret = _mm256_sub_ps(ret, _mm256_rcp_ps(_mm256_sub_ps(x_4, _mm256_set1_ps(2))));
|
43
|
+
ret = _mm256_sub_ps(ret, _mm256_rcp_ps(_mm256_sub_ps(x_4, _mm256_set1_ps(3))));
|
44
|
+
ret = _mm256_sub_ps(ret, _mm256_rcp_ps(_mm256_sub_ps(x_4, _mm256_set1_ps(4))));
|
45
|
+
return ret;
|
46
|
+
}
|
@@ -0,0 +1,736 @@
|
|
1
|
+
/*
|
2
|
+
AVX implementation of sin, cos, sincos, exp and log
|
3
|
+
|
4
|
+
Based on "sse_mathfun.h", by Julien Pommier
|
5
|
+
http://gruntthepeon.free.fr/ssemath/
|
6
|
+
|
7
|
+
Copyright (C) 2012 Giovanni Garberoglio
|
8
|
+
Interdisciplinary Laboratory for Computational Science (LISC)
|
9
|
+
Fondazione Bruno Kessler and University of Trento
|
10
|
+
via Sommarive, 18
|
11
|
+
I-38123 Trento (Italy)
|
12
|
+
|
13
|
+
This software is provided 'as-is', without any express or implied
|
14
|
+
warranty. In no event will the authors be held liable for any damages
|
15
|
+
arising from the use of this software.
|
16
|
+
|
17
|
+
Permission is granted to anyone to use this software for any purpose,
|
18
|
+
including commercial applications, and to alter it and redistribute it
|
19
|
+
freely, subject to the following restrictions:
|
20
|
+
|
21
|
+
1. The origin of this software must not be misrepresented; you must not
|
22
|
+
claim that you wrote the original software. If you use this software
|
23
|
+
in a product, an acknowledgment in the product documentation would be
|
24
|
+
appreciated but is not required.
|
25
|
+
2. Altered source versions must be plainly marked as such, and must not be
|
26
|
+
misrepresented as being the original software.
|
27
|
+
3. This notice may not be removed or altered from any source distribution.
|
28
|
+
|
29
|
+
(this is the zlib license)
|
30
|
+
*/
|
31
|
+
|
32
|
+
#include <immintrin.h>
|
33
|
+
|
34
|
+
/* yes I know, the top of this file is quite ugly */
|
35
|
+
#ifdef _MSC_VER
|
36
|
+
#define ALIGN32_BEG __declspec(align(32))
|
37
|
+
#define ALIGN32_END
|
38
|
+
#define __attribute__(x)
|
39
|
+
#else
|
40
|
+
#define ALIGN32_BEG
|
41
|
+
#define ALIGN32_END __attribute__((aligned(32)))
|
42
|
+
#endif
|
43
|
+
|
44
|
+
/* __m128 is ugly to write */
|
45
|
+
typedef __m256 v8sf; // std::vector of 8 float (avx)
|
46
|
+
typedef __m256i v8si; // std::vector of 8 int (avx)
|
47
|
+
typedef __m128i v4si; // std::vector of 8 int (avx)
|
48
|
+
|
49
|
+
#define _PI32AVX_CONST(Name, Val) \
|
50
|
+
static const ALIGN32_BEG int _pi32avx_##Name[4] ALIGN32_END = { Val, Val, Val, Val }
|
51
|
+
|
52
|
+
_PI32AVX_CONST(1, 1);
|
53
|
+
_PI32AVX_CONST(inv1, ~1);
|
54
|
+
_PI32AVX_CONST(2, 2);
|
55
|
+
_PI32AVX_CONST(4, 4);
|
56
|
+
|
57
|
+
|
58
|
+
/* declare some AVX constants -- why can't I figure a better way to do that? */
|
59
|
+
#define _PS256_CONST(Name, Val) \
|
60
|
+
static const ALIGN32_BEG float _ps256_##Name[8] ALIGN32_END = { Val, Val, Val, Val, Val, Val, Val, Val }
|
61
|
+
#define _PI32_CONST256(Name, Val) \
|
62
|
+
static const ALIGN32_BEG int _pi32_256_##Name[8] ALIGN32_END = { Val, Val, Val, Val, Val, Val, Val, Val }
|
63
|
+
#define _PS256_CONST_TYPE(Name, Type, Val) \
|
64
|
+
static const ALIGN32_BEG Type _ps256_##Name[8] ALIGN32_END = { Val, Val, Val, Val, Val, Val, Val, Val }
|
65
|
+
|
66
|
+
_PS256_CONST(1, 1.0f);
|
67
|
+
_PS256_CONST(0p5, 0.5f);
|
68
|
+
/* the smallest non denormalized float number */
|
69
|
+
_PS256_CONST_TYPE(min_norm_pos, unsigned int, 0x00800000u);
|
70
|
+
_PS256_CONST_TYPE(mant_mask, unsigned int, 0x7f800000u);
|
71
|
+
_PS256_CONST_TYPE(inv_mant_mask, unsigned int, ~0x7f800000u);
|
72
|
+
|
73
|
+
_PS256_CONST_TYPE(sign_mask, unsigned int, 0x80000000u);
|
74
|
+
_PS256_CONST_TYPE(inv_sign_mask, unsigned int, ~0x80000000u);
|
75
|
+
|
76
|
+
_PI32_CONST256(0, 0);
|
77
|
+
_PI32_CONST256(1, 1);
|
78
|
+
_PI32_CONST256(inv1, ~1);
|
79
|
+
_PI32_CONST256(2, 2);
|
80
|
+
_PI32_CONST256(4, 4);
|
81
|
+
_PI32_CONST256(0x7f, 0x7f);
|
82
|
+
|
83
|
+
_PS256_CONST(cephes_SQRTHF, 0.707106781186547524f);
|
84
|
+
_PS256_CONST(cephes_log_p0, 7.0376836292E-2f);
|
85
|
+
_PS256_CONST(cephes_log_p1, -1.1514610310E-1f);
|
86
|
+
_PS256_CONST(cephes_log_p2, 1.1676998740E-1f);
|
87
|
+
_PS256_CONST(cephes_log_p3, -1.2420140846E-1f);
|
88
|
+
_PS256_CONST(cephes_log_p4, +1.4249322787E-1f);
|
89
|
+
_PS256_CONST(cephes_log_p5, -1.6668057665E-1f);
|
90
|
+
_PS256_CONST(cephes_log_p6, +2.0000714765E-1f);
|
91
|
+
_PS256_CONST(cephes_log_p7, -2.4999993993E-1f);
|
92
|
+
_PS256_CONST(cephes_log_p8, +3.3333331174E-1f);
|
93
|
+
_PS256_CONST(cephes_log_q1, -2.12194440e-4f);
|
94
|
+
_PS256_CONST(cephes_log_q2, 0.693359375f);
|
95
|
+
|
96
|
+
#ifndef __AVX2__
|
97
|
+
|
98
|
+
typedef union imm_xmm_union {
|
99
|
+
v8si imm;
|
100
|
+
v4si xmm[2];
|
101
|
+
} imm_xmm_union;
|
102
|
+
|
103
|
+
#define COPY_IMM_TO_XMM(imm_, xmm0_, xmm1_) { \
|
104
|
+
imm_xmm_union u __attribute__((aligned(32))); \
|
105
|
+
u.imm = imm_; \
|
106
|
+
xmm0_ = u.xmm[0]; \
|
107
|
+
xmm1_ = u.xmm[1]; \
|
108
|
+
}
|
109
|
+
|
110
|
+
#define COPY_XMM_TO_IMM(xmm0_, xmm1_, imm_) { \
|
111
|
+
imm_xmm_union u __attribute__((aligned(32))); \
|
112
|
+
u.xmm[0]=xmm0_; u.xmm[1]=xmm1_; imm_ = u.imm; \
|
113
|
+
}
|
114
|
+
|
115
|
+
#define AVX2_BITOP_USING_SSE2(fn) \
|
116
|
+
static inline v8si p_mm256_##fn(v8si x, int a) \
|
117
|
+
{ \
|
118
|
+
/* use SSE2 instruction to perform the bitop AVX2 */ \
|
119
|
+
v4si x1, x2; \
|
120
|
+
v8si ret; \
|
121
|
+
COPY_IMM_TO_XMM(x, x1, x2); \
|
122
|
+
x1 = _mm_##fn(x1,a); \
|
123
|
+
x2 = _mm_##fn(x2,a); \
|
124
|
+
COPY_XMM_TO_IMM(x1, x2, ret); \
|
125
|
+
return(ret); \
|
126
|
+
}
|
127
|
+
|
128
|
+
#define AVX2_INTOP_USING_SSE2(fn) \
|
129
|
+
static inline v8si p_mm256_##fn(v8si x, v8si y) \
|
130
|
+
{ \
|
131
|
+
/* use SSE2 instructions to perform the AVX2 integer operation */ \
|
132
|
+
v4si x1, x2; \
|
133
|
+
v4si y1, y2; \
|
134
|
+
v8si ret; \
|
135
|
+
COPY_IMM_TO_XMM(x, x1, x2); \
|
136
|
+
COPY_IMM_TO_XMM(y, y1, y2); \
|
137
|
+
x1 = _mm_##fn(x1,y1); \
|
138
|
+
x2 = _mm_##fn(x2,y2); \
|
139
|
+
COPY_XMM_TO_IMM(x1, x2, ret); \
|
140
|
+
return(ret); \
|
141
|
+
}
|
142
|
+
|
143
|
+
#define AVX2_INTOP_USING_SSE2_128(fn) \
|
144
|
+
static inline v8si p_mm256_##fn##256(v8si x, v8si y) \
|
145
|
+
{ \
|
146
|
+
/* use SSE2 instructions to perform the AVX2 integer operation */ \
|
147
|
+
v4si x1, x2; \
|
148
|
+
v4si y1, y2; \
|
149
|
+
v8si ret; \
|
150
|
+
COPY_IMM_TO_XMM(x, x1, x2); \
|
151
|
+
COPY_IMM_TO_XMM(y, y1, y2); \
|
152
|
+
x1 = _mm_##fn##128(x1,y1); \
|
153
|
+
x2 = _mm_##fn##128(x2,y2); \
|
154
|
+
COPY_XMM_TO_IMM(x1, x2, ret); \
|
155
|
+
return(ret); \
|
156
|
+
}
|
157
|
+
#else
|
158
|
+
|
159
|
+
#define AVX2_BITOP_USING_SSE2(fn) \
|
160
|
+
static inline v8si p_mm256_##fn(v8si x, int a) { return _mm256_##fn(x, a); }
|
161
|
+
|
162
|
+
#define AVX2_INTOP_USING_SSE2(fn) \
|
163
|
+
static inline v8si p_mm256_##fn(v8si x, v8si y) { return _mm256_##fn(x, y); }
|
164
|
+
|
165
|
+
#define AVX2_INTOP_USING_SSE2_128(fn) \
|
166
|
+
static inline v8si p_mm256_##fn##256(v8si x, v8si y) { return _mm256_##fn##256(x, y); }
|
167
|
+
|
168
|
+
#endif /* __AVX2__ */
|
169
|
+
|
170
|
+
AVX2_BITOP_USING_SSE2(slli_epi32)
|
171
|
+
AVX2_BITOP_USING_SSE2(srli_epi32)
|
172
|
+
|
173
|
+
AVX2_INTOP_USING_SSE2(cmpeq_epi32)
|
174
|
+
AVX2_INTOP_USING_SSE2(sub_epi32)
|
175
|
+
AVX2_INTOP_USING_SSE2(add_epi32)
|
176
|
+
|
177
|
+
/* natural logarithm computed for 8 simultaneous float
|
178
|
+
return NaN for x <= 0
|
179
|
+
*/
|
180
|
+
inline v8sf log_ps(v8sf x) {
|
181
|
+
v8si imm0;
|
182
|
+
v8sf one = *(v8sf*)_ps256_1;
|
183
|
+
|
184
|
+
//v8sf invalid_mask = _mm256_cmple_ps(x, _mm256_setzero_ps());
|
185
|
+
v8sf invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_LE_OS);
|
186
|
+
|
187
|
+
x = _mm256_max_ps(x, *(v8sf*)_ps256_min_norm_pos); /* cut off denormalized stuff */
|
188
|
+
|
189
|
+
// can be done with AVX2
|
190
|
+
imm0 = p_mm256_srli_epi32(_mm256_castps_si256(x), 23);
|
191
|
+
|
192
|
+
/* keep only the fractional part */
|
193
|
+
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_mant_mask);
|
194
|
+
x = _mm256_or_ps(x, *(v8sf*)_ps256_0p5);
|
195
|
+
|
196
|
+
// this is again another AVX2 instruction
|
197
|
+
imm0 = p_mm256_sub_epi32(imm0, *(v8si*)_pi32_256_0x7f);
|
198
|
+
v8sf e = _mm256_cvtepi32_ps(imm0);
|
199
|
+
|
200
|
+
e = _mm256_add_ps(e, one);
|
201
|
+
|
202
|
+
/* part2:
|
203
|
+
if( x < SQRTHF ) {
|
204
|
+
e -= 1;
|
205
|
+
x = x + x - 1.0;
|
206
|
+
} else { x = x - 1.0; }
|
207
|
+
*/
|
208
|
+
//v8sf mask = _mm256_cmplt_ps(x, *(v8sf*)_ps256_cephes_SQRTHF);
|
209
|
+
v8sf mask = _mm256_cmp_ps(x, *(v8sf*)_ps256_cephes_SQRTHF, _CMP_LT_OS);
|
210
|
+
v8sf tmp = _mm256_and_ps(x, mask);
|
211
|
+
x = _mm256_sub_ps(x, one);
|
212
|
+
e = _mm256_sub_ps(e, _mm256_and_ps(one, mask));
|
213
|
+
x = _mm256_add_ps(x, tmp);
|
214
|
+
|
215
|
+
v8sf z = _mm256_mul_ps(x, x);
|
216
|
+
|
217
|
+
v8sf y = *(v8sf*)_ps256_cephes_log_p0;
|
218
|
+
y = _mm256_mul_ps(y, x);
|
219
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p1);
|
220
|
+
y = _mm256_mul_ps(y, x);
|
221
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p2);
|
222
|
+
y = _mm256_mul_ps(y, x);
|
223
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p3);
|
224
|
+
y = _mm256_mul_ps(y, x);
|
225
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p4);
|
226
|
+
y = _mm256_mul_ps(y, x);
|
227
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p5);
|
228
|
+
y = _mm256_mul_ps(y, x);
|
229
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p6);
|
230
|
+
y = _mm256_mul_ps(y, x);
|
231
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p7);
|
232
|
+
y = _mm256_mul_ps(y, x);
|
233
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p8);
|
234
|
+
y = _mm256_mul_ps(y, x);
|
235
|
+
|
236
|
+
y = _mm256_mul_ps(y, z);
|
237
|
+
|
238
|
+
tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q1);
|
239
|
+
y = _mm256_add_ps(y, tmp);
|
240
|
+
|
241
|
+
|
242
|
+
tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
|
243
|
+
y = _mm256_sub_ps(y, tmp);
|
244
|
+
|
245
|
+
tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q2);
|
246
|
+
x = _mm256_add_ps(x, y);
|
247
|
+
x = _mm256_add_ps(x, tmp);
|
248
|
+
x = _mm256_or_ps(x, invalid_mask); // negative arg will be NAN
|
249
|
+
return x;
|
250
|
+
}
|
251
|
+
|
252
|
+
_PS256_CONST(exp_hi, 88.3762626647949f);
|
253
|
+
_PS256_CONST(exp_lo, -88.3762626647949f);
|
254
|
+
|
255
|
+
_PS256_CONST(cephes_LOG2EF, 1.44269504088896341f);
|
256
|
+
_PS256_CONST(cephes_exp_C1, 0.693359375f);
|
257
|
+
_PS256_CONST(cephes_exp_C2, -2.12194440e-4f);
|
258
|
+
|
259
|
+
_PS256_CONST(cephes_exp_p0, 1.9875691500E-4f);
|
260
|
+
_PS256_CONST(cephes_exp_p1, 1.3981999507E-3f);
|
261
|
+
_PS256_CONST(cephes_exp_p2, 8.3334519073E-3f);
|
262
|
+
_PS256_CONST(cephes_exp_p3, 4.1665795894E-2f);
|
263
|
+
_PS256_CONST(cephes_exp_p4, 1.6666665459E-1f);
|
264
|
+
_PS256_CONST(cephes_exp_p5, 5.0000001201E-1f);
|
265
|
+
|
266
|
+
inline v8sf exp_ps(v8sf x) {
|
267
|
+
v8sf tmp = _mm256_setzero_ps(), fx;
|
268
|
+
v8si imm0;
|
269
|
+
v8sf one = *(v8sf*)_ps256_1;
|
270
|
+
|
271
|
+
x = _mm256_min_ps(x, *(v8sf*)_ps256_exp_hi);
|
272
|
+
x = _mm256_max_ps(x, *(v8sf*)_ps256_exp_lo);
|
273
|
+
|
274
|
+
/* express exp(x) as exp(g + n*log(2)) */
|
275
|
+
fx = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_LOG2EF);
|
276
|
+
fx = _mm256_add_ps(fx, *(v8sf*)_ps256_0p5);
|
277
|
+
|
278
|
+
/* how to perform a floorf with SSE: just below */
|
279
|
+
//imm0 = _mm256_cvttps_epi32(fx);
|
280
|
+
//tmp = _mm256_cvtepi32_ps(imm0);
|
281
|
+
|
282
|
+
tmp = _mm256_floor_ps(fx);
|
283
|
+
|
284
|
+
/* if greater, substract 1 */
|
285
|
+
//v8sf mask = _mm256_cmpgt_ps(tmp, fx);
|
286
|
+
v8sf mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS);
|
287
|
+
mask = _mm256_and_ps(mask, one);
|
288
|
+
fx = _mm256_sub_ps(tmp, mask);
|
289
|
+
|
290
|
+
tmp = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C1);
|
291
|
+
v8sf z = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C2);
|
292
|
+
x = _mm256_sub_ps(x, tmp);
|
293
|
+
x = _mm256_sub_ps(x, z);
|
294
|
+
|
295
|
+
z = _mm256_mul_ps(x, x);
|
296
|
+
|
297
|
+
v8sf y = *(v8sf*)_ps256_cephes_exp_p0;
|
298
|
+
y = _mm256_mul_ps(y, x);
|
299
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p1);
|
300
|
+
y = _mm256_mul_ps(y, x);
|
301
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p2);
|
302
|
+
y = _mm256_mul_ps(y, x);
|
303
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p3);
|
304
|
+
y = _mm256_mul_ps(y, x);
|
305
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p4);
|
306
|
+
y = _mm256_mul_ps(y, x);
|
307
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p5);
|
308
|
+
y = _mm256_mul_ps(y, z);
|
309
|
+
y = _mm256_add_ps(y, x);
|
310
|
+
y = _mm256_add_ps(y, one);
|
311
|
+
|
312
|
+
/* build 2^n */
|
313
|
+
imm0 = _mm256_cvttps_epi32(fx);
|
314
|
+
// another two AVX2 instructions
|
315
|
+
imm0 = p_mm256_add_epi32(imm0, *(v8si*)_pi32_256_0x7f);
|
316
|
+
imm0 = p_mm256_slli_epi32(imm0, 23);
|
317
|
+
v8sf pow2n = _mm256_castsi256_ps(imm0);
|
318
|
+
y = _mm256_mul_ps(y, pow2n);
|
319
|
+
return y;
|
320
|
+
}
|
321
|
+
|
322
|
+
_PS256_CONST(minus_cephes_DP1, -0.78515625f);
|
323
|
+
_PS256_CONST(minus_cephes_DP2, -2.4187564849853515625e-4f);
|
324
|
+
_PS256_CONST(minus_cephes_DP3, -3.77489497744594108e-8f);
|
325
|
+
_PS256_CONST(sincof_p0, -1.9515295891E-4f);
|
326
|
+
_PS256_CONST(sincof_p1, 8.3321608736E-3f);
|
327
|
+
_PS256_CONST(sincof_p2, -1.6666654611E-1f);
|
328
|
+
_PS256_CONST(coscof_p0, 2.443315711809948E-005f);
|
329
|
+
_PS256_CONST(coscof_p1, -1.388731625493765E-003f);
|
330
|
+
_PS256_CONST(coscof_p2, 4.166664568298827E-002f);
|
331
|
+
_PS256_CONST(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
|
332
|
+
|
333
|
+
|
334
|
+
/* evaluation of 8 sines at onces using AVX intrisics
|
335
|
+
|
336
|
+
The code is the exact rewriting of the cephes sinf function.
|
337
|
+
Precision is excellent as long as x < 8192 (I did not bother to
|
338
|
+
take into account the special handling they have for greater values
|
339
|
+
-- it does not return garbage for arguments over 8192, though, but
|
340
|
+
the extra precision is missing).
|
341
|
+
|
342
|
+
Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
|
343
|
+
surprising but correct result.
|
344
|
+
|
345
|
+
*/
|
346
|
+
inline v8sf sin_ps(v8sf x) { // any x
|
347
|
+
v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, sign_bit, y;
|
348
|
+
v8si imm0, imm2;
|
349
|
+
|
350
|
+
#ifndef __AVX2__
|
351
|
+
v4si imm0_1, imm0_2;
|
352
|
+
v4si imm2_1, imm2_2;
|
353
|
+
#endif
|
354
|
+
|
355
|
+
sign_bit = x;
|
356
|
+
/* take the absolute value */
|
357
|
+
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
|
358
|
+
/* extract the sign bit (upper one) */
|
359
|
+
sign_bit = _mm256_and_ps(sign_bit, *(v8sf*)_ps256_sign_mask);
|
360
|
+
|
361
|
+
/* scale by 4/Pi */
|
362
|
+
y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
|
363
|
+
|
364
|
+
/*
|
365
|
+
Here we start a series of integer operations, which are in the
|
366
|
+
realm of AVX2.
|
367
|
+
If we don't have AVX, let's perform them using SSE2 directives
|
368
|
+
*/
|
369
|
+
|
370
|
+
#ifdef __AVX2__
|
371
|
+
/* store the integer part of y in mm0 */
|
372
|
+
imm2 = _mm256_cvttps_epi32(y);
|
373
|
+
/* j=(j+1) & (~1) (see the cephes sources) */
|
374
|
+
// another two AVX2 instruction
|
375
|
+
imm2 = p_mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
|
376
|
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
|
377
|
+
y = _mm256_cvtepi32_ps(imm2);
|
378
|
+
|
379
|
+
/* get the swap sign flag */
|
380
|
+
imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
|
381
|
+
imm0 = p_mm256_slli_epi32(imm0, 29);
|
382
|
+
/* get the polynom selection mask
|
383
|
+
there is one polynom for 0 <= x <= Pi/4
|
384
|
+
and another one for Pi/4<x<=Pi/2
|
385
|
+
|
386
|
+
Both branches will be computed.
|
387
|
+
*/
|
388
|
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
|
389
|
+
imm2 = p_mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
|
390
|
+
#else
|
391
|
+
/* we use SSE2 routines to perform the integer ops */
|
392
|
+
COPY_IMM_TO_XMM(_mm256_cvttps_epi32(y), imm2_1, imm2_2);
|
393
|
+
|
394
|
+
imm2_1 = _mm_add_epi32(imm2_1, *(v4si*)_pi32avx_1);
|
395
|
+
imm2_2 = _mm_add_epi32(imm2_2, *(v4si*)_pi32avx_1);
|
396
|
+
|
397
|
+
imm2_1 = _mm_and_si128(imm2_1, *(v4si*)_pi32avx_inv1);
|
398
|
+
imm2_2 = _mm_and_si128(imm2_2, *(v4si*)_pi32avx_inv1);
|
399
|
+
|
400
|
+
COPY_XMM_TO_IMM(imm2_1, imm2_2, imm2);
|
401
|
+
y = _mm256_cvtepi32_ps(imm2);
|
402
|
+
|
403
|
+
imm0_1 = _mm_and_si128(imm2_1, *(v4si*)_pi32avx_4);
|
404
|
+
imm0_2 = _mm_and_si128(imm2_2, *(v4si*)_pi32avx_4);
|
405
|
+
|
406
|
+
imm0_1 = _mm_slli_epi32(imm0_1, 29);
|
407
|
+
imm0_2 = _mm_slli_epi32(imm0_2, 29);
|
408
|
+
|
409
|
+
COPY_XMM_TO_IMM(imm0_1, imm0_2, imm0);
|
410
|
+
|
411
|
+
imm2_1 = _mm_and_si128(imm2_1, *(v4si*)_pi32avx_2);
|
412
|
+
imm2_2 = _mm_and_si128(imm2_2, *(v4si*)_pi32avx_2);
|
413
|
+
|
414
|
+
imm2_1 = _mm_cmpeq_epi32(imm2_1, _mm_setzero_si128());
|
415
|
+
imm2_2 = _mm_cmpeq_epi32(imm2_2, _mm_setzero_si128());
|
416
|
+
|
417
|
+
COPY_XMM_TO_IMM(imm2_1, imm2_2, imm2);
|
418
|
+
#endif
|
419
|
+
|
420
|
+
v8sf swap_sign_bit = _mm256_castsi256_ps(imm0);
|
421
|
+
v8sf poly_mask = _mm256_castsi256_ps(imm2);
|
422
|
+
sign_bit = _mm256_xor_ps(sign_bit, swap_sign_bit);
|
423
|
+
|
424
|
+
/* The magic pass: "Extended precision modular arithmetic"
|
425
|
+
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
426
|
+
xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
|
427
|
+
xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
|
428
|
+
xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
|
429
|
+
xmm1 = _mm256_mul_ps(y, xmm1);
|
430
|
+
xmm2 = _mm256_mul_ps(y, xmm2);
|
431
|
+
xmm3 = _mm256_mul_ps(y, xmm3);
|
432
|
+
x = _mm256_add_ps(x, xmm1);
|
433
|
+
x = _mm256_add_ps(x, xmm2);
|
434
|
+
x = _mm256_add_ps(x, xmm3);
|
435
|
+
|
436
|
+
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
437
|
+
y = *(v8sf*)_ps256_coscof_p0;
|
438
|
+
v8sf z = _mm256_mul_ps(x, x);
|
439
|
+
|
440
|
+
y = _mm256_mul_ps(y, z);
|
441
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
|
442
|
+
y = _mm256_mul_ps(y, z);
|
443
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
|
444
|
+
y = _mm256_mul_ps(y, z);
|
445
|
+
y = _mm256_mul_ps(y, z);
|
446
|
+
v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
|
447
|
+
y = _mm256_sub_ps(y, tmp);
|
448
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
|
449
|
+
|
450
|
+
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
451
|
+
|
452
|
+
v8sf y2 = *(v8sf*)_ps256_sincof_p0;
|
453
|
+
y2 = _mm256_mul_ps(y2, z);
|
454
|
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
|
455
|
+
y2 = _mm256_mul_ps(y2, z);
|
456
|
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
|
457
|
+
y2 = _mm256_mul_ps(y2, z);
|
458
|
+
y2 = _mm256_mul_ps(y2, x);
|
459
|
+
y2 = _mm256_add_ps(y2, x);
|
460
|
+
|
461
|
+
/* select the correct result from the two polynoms */
|
462
|
+
xmm3 = poly_mask;
|
463
|
+
y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
|
464
|
+
y = _mm256_andnot_ps(xmm3, y);
|
465
|
+
y = _mm256_add_ps(y, y2);
|
466
|
+
/* update the sign */
|
467
|
+
y = _mm256_xor_ps(y, sign_bit);
|
468
|
+
|
469
|
+
return y;
|
470
|
+
}
|
471
|
+
|
472
|
+
/* almost the same as sin_ps */
|
473
|
+
inline v8sf cos_ps(v8sf x) { // any x
|
474
|
+
v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, y;
|
475
|
+
v8si imm0, imm2;
|
476
|
+
|
477
|
+
#ifndef __AVX2__
|
478
|
+
v4si imm0_1, imm0_2;
|
479
|
+
v4si imm2_1, imm2_2;
|
480
|
+
#endif
|
481
|
+
|
482
|
+
/* take the absolute value */
|
483
|
+
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
|
484
|
+
|
485
|
+
/* scale by 4/Pi */
|
486
|
+
y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
|
487
|
+
|
488
|
+
#ifdef __AVX2__
|
489
|
+
/* store the integer part of y in mm0 */
|
490
|
+
imm2 = _mm256_cvttps_epi32(y);
|
491
|
+
/* j=(j+1) & (~1) (see the cephes sources) */
|
492
|
+
imm2 = p_mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
|
493
|
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
|
494
|
+
y = _mm256_cvtepi32_ps(imm2);
|
495
|
+
imm2 = p_mm256_sub_epi32(imm2, *(v8si*)_pi32_256_2);
|
496
|
+
|
497
|
+
/* get the swap sign flag */
|
498
|
+
imm0 = _mm256_andnot_si256(imm2, *(v8si*)_pi32_256_4);
|
499
|
+
imm0 = p_mm256_slli_epi32(imm0, 29);
|
500
|
+
/* get the polynom selection mask */
|
501
|
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
|
502
|
+
imm2 = p_mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
|
503
|
+
#else
|
504
|
+
|
505
|
+
/* we use SSE2 routines to perform the integer ops */
|
506
|
+
COPY_IMM_TO_XMM(_mm256_cvttps_epi32(y), imm2_1, imm2_2);
|
507
|
+
|
508
|
+
imm2_1 = _mm_add_epi32(imm2_1, *(v4si*)_pi32avx_1);
|
509
|
+
imm2_2 = _mm_add_epi32(imm2_2, *(v4si*)_pi32avx_1);
|
510
|
+
|
511
|
+
imm2_1 = _mm_and_si128(imm2_1, *(v4si*)_pi32avx_inv1);
|
512
|
+
imm2_2 = _mm_and_si128(imm2_2, *(v4si*)_pi32avx_inv1);
|
513
|
+
|
514
|
+
COPY_XMM_TO_IMM(imm2_1, imm2_2, imm2);
|
515
|
+
y = _mm256_cvtepi32_ps(imm2);
|
516
|
+
|
517
|
+
imm2_1 = _mm_sub_epi32(imm2_1, *(v4si*)_pi32avx_2);
|
518
|
+
imm2_2 = _mm_sub_epi32(imm2_2, *(v4si*)_pi32avx_2);
|
519
|
+
|
520
|
+
imm0_1 = _mm_andnot_si128(imm2_1, *(v4si*)_pi32avx_4);
|
521
|
+
imm0_2 = _mm_andnot_si128(imm2_2, *(v4si*)_pi32avx_4);
|
522
|
+
|
523
|
+
imm0_1 = _mm_slli_epi32(imm0_1, 29);
|
524
|
+
imm0_2 = _mm_slli_epi32(imm0_2, 29);
|
525
|
+
|
526
|
+
COPY_XMM_TO_IMM(imm0_1, imm0_2, imm0);
|
527
|
+
|
528
|
+
imm2_1 = _mm_and_si128(imm2_1, *(v4si*)_pi32avx_2);
|
529
|
+
imm2_2 = _mm_and_si128(imm2_2, *(v4si*)_pi32avx_2);
|
530
|
+
|
531
|
+
imm2_1 = _mm_cmpeq_epi32(imm2_1, _mm_setzero_si128());
|
532
|
+
imm2_2 = _mm_cmpeq_epi32(imm2_2, _mm_setzero_si128());
|
533
|
+
|
534
|
+
COPY_XMM_TO_IMM(imm2_1, imm2_2, imm2);
|
535
|
+
#endif
|
536
|
+
|
537
|
+
v8sf sign_bit = _mm256_castsi256_ps(imm0);
|
538
|
+
v8sf poly_mask = _mm256_castsi256_ps(imm2);
|
539
|
+
|
540
|
+
/* The magic pass: "Extended precision modular arithmetic"
|
541
|
+
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
542
|
+
xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
|
543
|
+
xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
|
544
|
+
xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
|
545
|
+
xmm1 = _mm256_mul_ps(y, xmm1);
|
546
|
+
xmm2 = _mm256_mul_ps(y, xmm2);
|
547
|
+
xmm3 = _mm256_mul_ps(y, xmm3);
|
548
|
+
x = _mm256_add_ps(x, xmm1);
|
549
|
+
x = _mm256_add_ps(x, xmm2);
|
550
|
+
x = _mm256_add_ps(x, xmm3);
|
551
|
+
|
552
|
+
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
553
|
+
y = *(v8sf*)_ps256_coscof_p0;
|
554
|
+
v8sf z = _mm256_mul_ps(x, x);
|
555
|
+
|
556
|
+
y = _mm256_mul_ps(y, z);
|
557
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
|
558
|
+
y = _mm256_mul_ps(y, z);
|
559
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
|
560
|
+
y = _mm256_mul_ps(y, z);
|
561
|
+
y = _mm256_mul_ps(y, z);
|
562
|
+
v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
|
563
|
+
y = _mm256_sub_ps(y, tmp);
|
564
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
|
565
|
+
|
566
|
+
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
567
|
+
|
568
|
+
v8sf y2 = *(v8sf*)_ps256_sincof_p0;
|
569
|
+
y2 = _mm256_mul_ps(y2, z);
|
570
|
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
|
571
|
+
y2 = _mm256_mul_ps(y2, z);
|
572
|
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
|
573
|
+
y2 = _mm256_mul_ps(y2, z);
|
574
|
+
y2 = _mm256_mul_ps(y2, x);
|
575
|
+
y2 = _mm256_add_ps(y2, x);
|
576
|
+
|
577
|
+
/* select the correct result from the two polynoms */
|
578
|
+
xmm3 = poly_mask;
|
579
|
+
y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
|
580
|
+
y = _mm256_andnot_ps(xmm3, y);
|
581
|
+
y = _mm256_add_ps(y, y2);
|
582
|
+
/* update the sign */
|
583
|
+
y = _mm256_xor_ps(y, sign_bit);
|
584
|
+
|
585
|
+
return y;
|
586
|
+
}
|
587
|
+
|
588
|
+
/* since sin256_ps and cos256_ps are almost identical, sincos256_ps could replace both of them..
|
589
|
+
it is almost as fast, and gives you a free cosine with your sine */
|
590
|
+
inline void sincos_ps(v8sf x, v8sf *s, v8sf *c) {
|
591
|
+
|
592
|
+
v8sf xmm1, xmm2, xmm3 = _mm256_setzero_ps(), sign_bit_sin, y;
|
593
|
+
v8si imm0, imm2, imm4;
|
594
|
+
|
595
|
+
#ifndef __AVX2__
|
596
|
+
v4si imm0_1, imm0_2;
|
597
|
+
v4si imm2_1, imm2_2;
|
598
|
+
v4si imm4_1, imm4_2;
|
599
|
+
#endif
|
600
|
+
|
601
|
+
sign_bit_sin = x;
|
602
|
+
/* take the absolute value */
|
603
|
+
x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
|
604
|
+
/* extract the sign bit (upper one) */
|
605
|
+
sign_bit_sin = _mm256_and_ps(sign_bit_sin, *(v8sf*)_ps256_sign_mask);
|
606
|
+
|
607
|
+
/* scale by 4/Pi */
|
608
|
+
y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
|
609
|
+
|
610
|
+
#ifdef __AVX2__
|
611
|
+
/* store the integer part of y in imm2 */
|
612
|
+
imm2 = _mm256_cvttps_epi32(y);
|
613
|
+
|
614
|
+
/* j=(j+1) & (~1) (see the cephes sources) */
|
615
|
+
imm2 = p_mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
|
616
|
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
|
617
|
+
|
618
|
+
y = _mm256_cvtepi32_ps(imm2);
|
619
|
+
imm4 = imm2;
|
620
|
+
|
621
|
+
/* get the swap sign flag for the sine */
|
622
|
+
imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
|
623
|
+
imm0 = p_mm256_slli_epi32(imm0, 29);
|
624
|
+
//v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
|
625
|
+
|
626
|
+
/* get the polynom selection mask for the sine*/
|
627
|
+
imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
|
628
|
+
imm2 = p_mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
|
629
|
+
//v8sf poly_mask = _mm256_castsi256_ps(imm2);
|
630
|
+
#else
|
631
|
+
/* we use SSE2 routines to perform the integer ops */
|
632
|
+
COPY_IMM_TO_XMM(_mm256_cvttps_epi32(y), imm2_1, imm2_2);
|
633
|
+
|
634
|
+
imm2_1 = _mm_add_epi32(imm2_1, *(v4si*)_pi32avx_1);
|
635
|
+
imm2_2 = _mm_add_epi32(imm2_2, *(v4si*)_pi32avx_1);
|
636
|
+
|
637
|
+
imm2_1 = _mm_and_si128(imm2_1, *(v4si*)_pi32avx_inv1);
|
638
|
+
imm2_2 = _mm_and_si128(imm2_2, *(v4si*)_pi32avx_inv1);
|
639
|
+
|
640
|
+
COPY_XMM_TO_IMM(imm2_1, imm2_2, imm2);
|
641
|
+
y = _mm256_cvtepi32_ps(imm2);
|
642
|
+
|
643
|
+
imm4_1 = imm2_1;
|
644
|
+
imm4_2 = imm2_2;
|
645
|
+
|
646
|
+
imm0_1 = _mm_and_si128(imm2_1, *(v4si*)_pi32avx_4);
|
647
|
+
imm0_2 = _mm_and_si128(imm2_2, *(v4si*)_pi32avx_4);
|
648
|
+
|
649
|
+
imm0_1 = _mm_slli_epi32(imm0_1, 29);
|
650
|
+
imm0_2 = _mm_slli_epi32(imm0_2, 29);
|
651
|
+
|
652
|
+
COPY_XMM_TO_IMM(imm0_1, imm0_2, imm0);
|
653
|
+
|
654
|
+
imm2_1 = _mm_and_si128(imm2_1, *(v4si*)_pi32avx_2);
|
655
|
+
imm2_2 = _mm_and_si128(imm2_2, *(v4si*)_pi32avx_2);
|
656
|
+
|
657
|
+
imm2_1 = _mm_cmpeq_epi32(imm2_1, _mm_setzero_si128());
|
658
|
+
imm2_2 = _mm_cmpeq_epi32(imm2_2, _mm_setzero_si128());
|
659
|
+
|
660
|
+
COPY_XMM_TO_IMM(imm2_1, imm2_2, imm2);
|
661
|
+
#endif
|
662
|
+
v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
|
663
|
+
v8sf poly_mask = _mm256_castsi256_ps(imm2);
|
664
|
+
|
665
|
+
/* The magic pass: "Extended precision modular arithmetic"
|
666
|
+
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
667
|
+
xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
|
668
|
+
xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
|
669
|
+
xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
|
670
|
+
xmm1 = _mm256_mul_ps(y, xmm1);
|
671
|
+
xmm2 = _mm256_mul_ps(y, xmm2);
|
672
|
+
xmm3 = _mm256_mul_ps(y, xmm3);
|
673
|
+
x = _mm256_add_ps(x, xmm1);
|
674
|
+
x = _mm256_add_ps(x, xmm2);
|
675
|
+
x = _mm256_add_ps(x, xmm3);
|
676
|
+
|
677
|
+
#ifdef __AVX2__
|
678
|
+
imm4 = p_mm256_sub_epi32(imm4, *(v8si*)_pi32_256_2);
|
679
|
+
imm4 = _mm256_andnot_si256(imm4, *(v8si*)_pi32_256_4);
|
680
|
+
imm4 = p_mm256_slli_epi32(imm4, 29);
|
681
|
+
#else
|
682
|
+
imm4_1 = _mm_sub_epi32(imm4_1, *(v4si*)_pi32avx_2);
|
683
|
+
imm4_2 = _mm_sub_epi32(imm4_2, *(v4si*)_pi32avx_2);
|
684
|
+
|
685
|
+
imm4_1 = _mm_andnot_si128(imm4_1, *(v4si*)_pi32avx_4);
|
686
|
+
imm4_2 = _mm_andnot_si128(imm4_2, *(v4si*)_pi32avx_4);
|
687
|
+
|
688
|
+
imm4_1 = _mm_slli_epi32(imm4_1, 29);
|
689
|
+
imm4_2 = _mm_slli_epi32(imm4_2, 29);
|
690
|
+
|
691
|
+
COPY_XMM_TO_IMM(imm4_1, imm4_2, imm4);
|
692
|
+
#endif
|
693
|
+
|
694
|
+
v8sf sign_bit_cos = _mm256_castsi256_ps(imm4);
|
695
|
+
|
696
|
+
sign_bit_sin = _mm256_xor_ps(sign_bit_sin, swap_sign_bit_sin);
|
697
|
+
|
698
|
+
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
699
|
+
v8sf z = _mm256_mul_ps(x, x);
|
700
|
+
y = *(v8sf*)_ps256_coscof_p0;
|
701
|
+
|
702
|
+
y = _mm256_mul_ps(y, z);
|
703
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
|
704
|
+
y = _mm256_mul_ps(y, z);
|
705
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
|
706
|
+
y = _mm256_mul_ps(y, z);
|
707
|
+
y = _mm256_mul_ps(y, z);
|
708
|
+
v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
|
709
|
+
y = _mm256_sub_ps(y, tmp);
|
710
|
+
y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
|
711
|
+
|
712
|
+
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
713
|
+
|
714
|
+
v8sf y2 = *(v8sf*)_ps256_sincof_p0;
|
715
|
+
y2 = _mm256_mul_ps(y2, z);
|
716
|
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
|
717
|
+
y2 = _mm256_mul_ps(y2, z);
|
718
|
+
y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
|
719
|
+
y2 = _mm256_mul_ps(y2, z);
|
720
|
+
y2 = _mm256_mul_ps(y2, x);
|
721
|
+
y2 = _mm256_add_ps(y2, x);
|
722
|
+
|
723
|
+
/* select the correct result from the two polynoms */
|
724
|
+
xmm3 = poly_mask;
|
725
|
+
v8sf ysin2 = _mm256_and_ps(xmm3, y2);
|
726
|
+
v8sf ysin1 = _mm256_andnot_ps(xmm3, y);
|
727
|
+
y2 = _mm256_sub_ps(y2, ysin2);
|
728
|
+
y = _mm256_sub_ps(y, ysin1);
|
729
|
+
|
730
|
+
xmm1 = _mm256_add_ps(ysin1, ysin2);
|
731
|
+
xmm2 = _mm256_add_ps(y, y2);
|
732
|
+
|
733
|
+
/* update the sign */
|
734
|
+
*s = _mm256_xor_ps(xmm1, sign_bit_sin);
|
735
|
+
*c = _mm256_xor_ps(xmm2, sign_bit_cos);
|
736
|
+
}
|