tomoto 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (420) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +3 -0
  3. data/LICENSE.txt +22 -0
  4. data/README.md +123 -0
  5. data/ext/tomoto/ext.cpp +245 -0
  6. data/ext/tomoto/extconf.rb +28 -0
  7. data/lib/tomoto.rb +12 -0
  8. data/lib/tomoto/ct.rb +11 -0
  9. data/lib/tomoto/hdp.rb +11 -0
  10. data/lib/tomoto/lda.rb +67 -0
  11. data/lib/tomoto/version.rb +3 -0
  12. data/vendor/EigenRand/EigenRand/Core.h +1139 -0
  13. data/vendor/EigenRand/EigenRand/Dists/Basic.h +111 -0
  14. data/vendor/EigenRand/EigenRand/Dists/Discrete.h +877 -0
  15. data/vendor/EigenRand/EigenRand/Dists/GammaPoisson.h +108 -0
  16. data/vendor/EigenRand/EigenRand/Dists/NormalExp.h +626 -0
  17. data/vendor/EigenRand/EigenRand/EigenRand +19 -0
  18. data/vendor/EigenRand/EigenRand/Macro.h +24 -0
  19. data/vendor/EigenRand/EigenRand/MorePacketMath.h +978 -0
  20. data/vendor/EigenRand/EigenRand/PacketFilter.h +286 -0
  21. data/vendor/EigenRand/EigenRand/PacketRandomEngine.h +624 -0
  22. data/vendor/EigenRand/EigenRand/RandUtils.h +413 -0
  23. data/vendor/EigenRand/EigenRand/doc.h +220 -0
  24. data/vendor/EigenRand/LICENSE +21 -0
  25. data/vendor/EigenRand/README.md +288 -0
  26. data/vendor/eigen/COPYING.BSD +26 -0
  27. data/vendor/eigen/COPYING.GPL +674 -0
  28. data/vendor/eigen/COPYING.LGPL +502 -0
  29. data/vendor/eigen/COPYING.MINPACK +52 -0
  30. data/vendor/eigen/COPYING.MPL2 +373 -0
  31. data/vendor/eigen/COPYING.README +18 -0
  32. data/vendor/eigen/Eigen/CMakeLists.txt +19 -0
  33. data/vendor/eigen/Eigen/Cholesky +46 -0
  34. data/vendor/eigen/Eigen/CholmodSupport +48 -0
  35. data/vendor/eigen/Eigen/Core +537 -0
  36. data/vendor/eigen/Eigen/Dense +7 -0
  37. data/vendor/eigen/Eigen/Eigen +2 -0
  38. data/vendor/eigen/Eigen/Eigenvalues +61 -0
  39. data/vendor/eigen/Eigen/Geometry +62 -0
  40. data/vendor/eigen/Eigen/Householder +30 -0
  41. data/vendor/eigen/Eigen/IterativeLinearSolvers +48 -0
  42. data/vendor/eigen/Eigen/Jacobi +33 -0
  43. data/vendor/eigen/Eigen/LU +50 -0
  44. data/vendor/eigen/Eigen/MetisSupport +35 -0
  45. data/vendor/eigen/Eigen/OrderingMethods +73 -0
  46. data/vendor/eigen/Eigen/PaStiXSupport +48 -0
  47. data/vendor/eigen/Eigen/PardisoSupport +35 -0
  48. data/vendor/eigen/Eigen/QR +51 -0
  49. data/vendor/eigen/Eigen/QtAlignedMalloc +40 -0
  50. data/vendor/eigen/Eigen/SPQRSupport +34 -0
  51. data/vendor/eigen/Eigen/SVD +51 -0
  52. data/vendor/eigen/Eigen/Sparse +36 -0
  53. data/vendor/eigen/Eigen/SparseCholesky +45 -0
  54. data/vendor/eigen/Eigen/SparseCore +69 -0
  55. data/vendor/eigen/Eigen/SparseLU +46 -0
  56. data/vendor/eigen/Eigen/SparseQR +37 -0
  57. data/vendor/eigen/Eigen/StdDeque +27 -0
  58. data/vendor/eigen/Eigen/StdList +26 -0
  59. data/vendor/eigen/Eigen/StdVector +27 -0
  60. data/vendor/eigen/Eigen/SuperLUSupport +64 -0
  61. data/vendor/eigen/Eigen/UmfPackSupport +40 -0
  62. data/vendor/eigen/Eigen/src/Cholesky/LDLT.h +673 -0
  63. data/vendor/eigen/Eigen/src/Cholesky/LLT.h +542 -0
  64. data/vendor/eigen/Eigen/src/Cholesky/LLT_LAPACKE.h +99 -0
  65. data/vendor/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +639 -0
  66. data/vendor/eigen/Eigen/src/Core/Array.h +329 -0
  67. data/vendor/eigen/Eigen/src/Core/ArrayBase.h +226 -0
  68. data/vendor/eigen/Eigen/src/Core/ArrayWrapper.h +209 -0
  69. data/vendor/eigen/Eigen/src/Core/Assign.h +90 -0
  70. data/vendor/eigen/Eigen/src/Core/AssignEvaluator.h +935 -0
  71. data/vendor/eigen/Eigen/src/Core/Assign_MKL.h +178 -0
  72. data/vendor/eigen/Eigen/src/Core/BandMatrix.h +353 -0
  73. data/vendor/eigen/Eigen/src/Core/Block.h +452 -0
  74. data/vendor/eigen/Eigen/src/Core/BooleanRedux.h +164 -0
  75. data/vendor/eigen/Eigen/src/Core/CommaInitializer.h +160 -0
  76. data/vendor/eigen/Eigen/src/Core/ConditionEstimator.h +175 -0
  77. data/vendor/eigen/Eigen/src/Core/CoreEvaluators.h +1688 -0
  78. data/vendor/eigen/Eigen/src/Core/CoreIterators.h +127 -0
  79. data/vendor/eigen/Eigen/src/Core/CwiseBinaryOp.h +184 -0
  80. data/vendor/eigen/Eigen/src/Core/CwiseNullaryOp.h +866 -0
  81. data/vendor/eigen/Eigen/src/Core/CwiseTernaryOp.h +197 -0
  82. data/vendor/eigen/Eigen/src/Core/CwiseUnaryOp.h +103 -0
  83. data/vendor/eigen/Eigen/src/Core/CwiseUnaryView.h +128 -0
  84. data/vendor/eigen/Eigen/src/Core/DenseBase.h +611 -0
  85. data/vendor/eigen/Eigen/src/Core/DenseCoeffsBase.h +681 -0
  86. data/vendor/eigen/Eigen/src/Core/DenseStorage.h +570 -0
  87. data/vendor/eigen/Eigen/src/Core/Diagonal.h +260 -0
  88. data/vendor/eigen/Eigen/src/Core/DiagonalMatrix.h +343 -0
  89. data/vendor/eigen/Eigen/src/Core/DiagonalProduct.h +28 -0
  90. data/vendor/eigen/Eigen/src/Core/Dot.h +318 -0
  91. data/vendor/eigen/Eigen/src/Core/EigenBase.h +159 -0
  92. data/vendor/eigen/Eigen/src/Core/ForceAlignedAccess.h +146 -0
  93. data/vendor/eigen/Eigen/src/Core/Fuzzy.h +155 -0
  94. data/vendor/eigen/Eigen/src/Core/GeneralProduct.h +455 -0
  95. data/vendor/eigen/Eigen/src/Core/GenericPacketMath.h +593 -0
  96. data/vendor/eigen/Eigen/src/Core/GlobalFunctions.h +187 -0
  97. data/vendor/eigen/Eigen/src/Core/IO.h +225 -0
  98. data/vendor/eigen/Eigen/src/Core/Inverse.h +118 -0
  99. data/vendor/eigen/Eigen/src/Core/Map.h +171 -0
  100. data/vendor/eigen/Eigen/src/Core/MapBase.h +303 -0
  101. data/vendor/eigen/Eigen/src/Core/MathFunctions.h +1415 -0
  102. data/vendor/eigen/Eigen/src/Core/MathFunctionsImpl.h +101 -0
  103. data/vendor/eigen/Eigen/src/Core/Matrix.h +459 -0
  104. data/vendor/eigen/Eigen/src/Core/MatrixBase.h +529 -0
  105. data/vendor/eigen/Eigen/src/Core/NestByValue.h +110 -0
  106. data/vendor/eigen/Eigen/src/Core/NoAlias.h +108 -0
  107. data/vendor/eigen/Eigen/src/Core/NumTraits.h +248 -0
  108. data/vendor/eigen/Eigen/src/Core/PermutationMatrix.h +633 -0
  109. data/vendor/eigen/Eigen/src/Core/PlainObjectBase.h +1035 -0
  110. data/vendor/eigen/Eigen/src/Core/Product.h +186 -0
  111. data/vendor/eigen/Eigen/src/Core/ProductEvaluators.h +1112 -0
  112. data/vendor/eigen/Eigen/src/Core/Random.h +182 -0
  113. data/vendor/eigen/Eigen/src/Core/Redux.h +505 -0
  114. data/vendor/eigen/Eigen/src/Core/Ref.h +283 -0
  115. data/vendor/eigen/Eigen/src/Core/Replicate.h +142 -0
  116. data/vendor/eigen/Eigen/src/Core/ReturnByValue.h +117 -0
  117. data/vendor/eigen/Eigen/src/Core/Reverse.h +211 -0
  118. data/vendor/eigen/Eigen/src/Core/Select.h +162 -0
  119. data/vendor/eigen/Eigen/src/Core/SelfAdjointView.h +352 -0
  120. data/vendor/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h +47 -0
  121. data/vendor/eigen/Eigen/src/Core/Solve.h +188 -0
  122. data/vendor/eigen/Eigen/src/Core/SolveTriangular.h +235 -0
  123. data/vendor/eigen/Eigen/src/Core/SolverBase.h +130 -0
  124. data/vendor/eigen/Eigen/src/Core/StableNorm.h +221 -0
  125. data/vendor/eigen/Eigen/src/Core/Stride.h +111 -0
  126. data/vendor/eigen/Eigen/src/Core/Swap.h +67 -0
  127. data/vendor/eigen/Eigen/src/Core/Transpose.h +403 -0
  128. data/vendor/eigen/Eigen/src/Core/Transpositions.h +407 -0
  129. data/vendor/eigen/Eigen/src/Core/TriangularMatrix.h +983 -0
  130. data/vendor/eigen/Eigen/src/Core/VectorBlock.h +96 -0
  131. data/vendor/eigen/Eigen/src/Core/VectorwiseOp.h +695 -0
  132. data/vendor/eigen/Eigen/src/Core/Visitor.h +273 -0
  133. data/vendor/eigen/Eigen/src/Core/arch/AVX/Complex.h +451 -0
  134. data/vendor/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +439 -0
  135. data/vendor/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +637 -0
  136. data/vendor/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +51 -0
  137. data/vendor/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +391 -0
  138. data/vendor/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +1316 -0
  139. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +430 -0
  140. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +322 -0
  141. data/vendor/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +1061 -0
  142. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Complex.h +103 -0
  143. data/vendor/eigen/Eigen/src/Core/arch/CUDA/Half.h +674 -0
  144. data/vendor/eigen/Eigen/src/Core/arch/CUDA/MathFunctions.h +91 -0
  145. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMath.h +333 -0
  146. data/vendor/eigen/Eigen/src/Core/arch/CUDA/PacketMathHalf.h +1124 -0
  147. data/vendor/eigen/Eigen/src/Core/arch/CUDA/TypeCasting.h +212 -0
  148. data/vendor/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +29 -0
  149. data/vendor/eigen/Eigen/src/Core/arch/Default/Settings.h +49 -0
  150. data/vendor/eigen/Eigen/src/Core/arch/NEON/Complex.h +490 -0
  151. data/vendor/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +91 -0
  152. data/vendor/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +760 -0
  153. data/vendor/eigen/Eigen/src/Core/arch/SSE/Complex.h +471 -0
  154. data/vendor/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +562 -0
  155. data/vendor/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +895 -0
  156. data/vendor/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +77 -0
  157. data/vendor/eigen/Eigen/src/Core/arch/ZVector/Complex.h +397 -0
  158. data/vendor/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +137 -0
  159. data/vendor/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +945 -0
  160. data/vendor/eigen/Eigen/src/Core/functors/AssignmentFunctors.h +168 -0
  161. data/vendor/eigen/Eigen/src/Core/functors/BinaryFunctors.h +475 -0
  162. data/vendor/eigen/Eigen/src/Core/functors/NullaryFunctors.h +188 -0
  163. data/vendor/eigen/Eigen/src/Core/functors/StlFunctors.h +136 -0
  164. data/vendor/eigen/Eigen/src/Core/functors/TernaryFunctors.h +25 -0
  165. data/vendor/eigen/Eigen/src/Core/functors/UnaryFunctors.h +792 -0
  166. data/vendor/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +2156 -0
  167. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +492 -0
  168. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h +311 -0
  169. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h +145 -0
  170. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h +122 -0
  171. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +619 -0
  172. data/vendor/eigen/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h +136 -0
  173. data/vendor/eigen/Eigen/src/Core/products/Parallelizer.h +163 -0
  174. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix.h +521 -0
  175. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h +287 -0
  176. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector.h +260 -0
  177. data/vendor/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h +118 -0
  178. data/vendor/eigen/Eigen/src/Core/products/SelfadjointProduct.h +133 -0
  179. data/vendor/eigen/Eigen/src/Core/products/SelfadjointRank2Update.h +93 -0
  180. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix.h +466 -0
  181. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h +315 -0
  182. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector.h +350 -0
  183. data/vendor/eigen/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h +255 -0
  184. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix.h +335 -0
  185. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h +163 -0
  186. data/vendor/eigen/Eigen/src/Core/products/TriangularSolverVector.h +145 -0
  187. data/vendor/eigen/Eigen/src/Core/util/BlasUtil.h +398 -0
  188. data/vendor/eigen/Eigen/src/Core/util/Constants.h +547 -0
  189. data/vendor/eigen/Eigen/src/Core/util/DisableStupidWarnings.h +83 -0
  190. data/vendor/eigen/Eigen/src/Core/util/ForwardDeclarations.h +302 -0
  191. data/vendor/eigen/Eigen/src/Core/util/MKL_support.h +130 -0
  192. data/vendor/eigen/Eigen/src/Core/util/Macros.h +1001 -0
  193. data/vendor/eigen/Eigen/src/Core/util/Memory.h +993 -0
  194. data/vendor/eigen/Eigen/src/Core/util/Meta.h +534 -0
  195. data/vendor/eigen/Eigen/src/Core/util/NonMPL2.h +3 -0
  196. data/vendor/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h +27 -0
  197. data/vendor/eigen/Eigen/src/Core/util/StaticAssert.h +218 -0
  198. data/vendor/eigen/Eigen/src/Core/util/XprHelper.h +821 -0
  199. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexEigenSolver.h +346 -0
  200. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur.h +459 -0
  201. data/vendor/eigen/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h +91 -0
  202. data/vendor/eigen/Eigen/src/Eigenvalues/EigenSolver.h +622 -0
  203. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +418 -0
  204. data/vendor/eigen/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h +226 -0
  205. data/vendor/eigen/Eigen/src/Eigenvalues/HessenbergDecomposition.h +374 -0
  206. data/vendor/eigen/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +158 -0
  207. data/vendor/eigen/Eigen/src/Eigenvalues/RealQZ.h +654 -0
  208. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur.h +546 -0
  209. data/vendor/eigen/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h +77 -0
  210. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +870 -0
  211. data/vendor/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h +87 -0
  212. data/vendor/eigen/Eigen/src/Eigenvalues/Tridiagonalization.h +556 -0
  213. data/vendor/eigen/Eigen/src/Geometry/AlignedBox.h +392 -0
  214. data/vendor/eigen/Eigen/src/Geometry/AngleAxis.h +247 -0
  215. data/vendor/eigen/Eigen/src/Geometry/EulerAngles.h +114 -0
  216. data/vendor/eigen/Eigen/src/Geometry/Homogeneous.h +497 -0
  217. data/vendor/eigen/Eigen/src/Geometry/Hyperplane.h +282 -0
  218. data/vendor/eigen/Eigen/src/Geometry/OrthoMethods.h +234 -0
  219. data/vendor/eigen/Eigen/src/Geometry/ParametrizedLine.h +195 -0
  220. data/vendor/eigen/Eigen/src/Geometry/Quaternion.h +814 -0
  221. data/vendor/eigen/Eigen/src/Geometry/Rotation2D.h +199 -0
  222. data/vendor/eigen/Eigen/src/Geometry/RotationBase.h +206 -0
  223. data/vendor/eigen/Eigen/src/Geometry/Scaling.h +170 -0
  224. data/vendor/eigen/Eigen/src/Geometry/Transform.h +1542 -0
  225. data/vendor/eigen/Eigen/src/Geometry/Translation.h +208 -0
  226. data/vendor/eigen/Eigen/src/Geometry/Umeyama.h +166 -0
  227. data/vendor/eigen/Eigen/src/Geometry/arch/Geometry_SSE.h +161 -0
  228. data/vendor/eigen/Eigen/src/Householder/BlockHouseholder.h +103 -0
  229. data/vendor/eigen/Eigen/src/Householder/Householder.h +172 -0
  230. data/vendor/eigen/Eigen/src/Householder/HouseholderSequence.h +470 -0
  231. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +226 -0
  232. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +228 -0
  233. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +246 -0
  234. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +400 -0
  235. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +462 -0
  236. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +394 -0
  237. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h +216 -0
  238. data/vendor/eigen/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h +115 -0
  239. data/vendor/eigen/Eigen/src/Jacobi/Jacobi.h +462 -0
  240. data/vendor/eigen/Eigen/src/LU/Determinant.h +101 -0
  241. data/vendor/eigen/Eigen/src/LU/FullPivLU.h +891 -0
  242. data/vendor/eigen/Eigen/src/LU/InverseImpl.h +415 -0
  243. data/vendor/eigen/Eigen/src/LU/PartialPivLU.h +611 -0
  244. data/vendor/eigen/Eigen/src/LU/PartialPivLU_LAPACKE.h +83 -0
  245. data/vendor/eigen/Eigen/src/LU/arch/Inverse_SSE.h +338 -0
  246. data/vendor/eigen/Eigen/src/MetisSupport/MetisSupport.h +137 -0
  247. data/vendor/eigen/Eigen/src/OrderingMethods/Amd.h +445 -0
  248. data/vendor/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +1843 -0
  249. data/vendor/eigen/Eigen/src/OrderingMethods/Ordering.h +157 -0
  250. data/vendor/eigen/Eigen/src/PaStiXSupport/PaStiXSupport.h +678 -0
  251. data/vendor/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +543 -0
  252. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR.h +653 -0
  253. data/vendor/eigen/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h +97 -0
  254. data/vendor/eigen/Eigen/src/QR/CompleteOrthogonalDecomposition.h +562 -0
  255. data/vendor/eigen/Eigen/src/QR/FullPivHouseholderQR.h +676 -0
  256. data/vendor/eigen/Eigen/src/QR/HouseholderQR.h +409 -0
  257. data/vendor/eigen/Eigen/src/QR/HouseholderQR_LAPACKE.h +68 -0
  258. data/vendor/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +313 -0
  259. data/vendor/eigen/Eigen/src/SVD/BDCSVD.h +1246 -0
  260. data/vendor/eigen/Eigen/src/SVD/JacobiSVD.h +804 -0
  261. data/vendor/eigen/Eigen/src/SVD/JacobiSVD_LAPACKE.h +91 -0
  262. data/vendor/eigen/Eigen/src/SVD/SVDBase.h +315 -0
  263. data/vendor/eigen/Eigen/src/SVD/UpperBidiagonalization.h +414 -0
  264. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +689 -0
  265. data/vendor/eigen/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h +199 -0
  266. data/vendor/eigen/Eigen/src/SparseCore/AmbiVector.h +377 -0
  267. data/vendor/eigen/Eigen/src/SparseCore/CompressedStorage.h +258 -0
  268. data/vendor/eigen/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +352 -0
  269. data/vendor/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h +67 -0
  270. data/vendor/eigen/Eigen/src/SparseCore/SparseAssign.h +216 -0
  271. data/vendor/eigen/Eigen/src/SparseCore/SparseBlock.h +603 -0
  272. data/vendor/eigen/Eigen/src/SparseCore/SparseColEtree.h +206 -0
  273. data/vendor/eigen/Eigen/src/SparseCore/SparseCompressedBase.h +341 -0
  274. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +726 -0
  275. data/vendor/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h +148 -0
  276. data/vendor/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +320 -0
  277. data/vendor/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h +138 -0
  278. data/vendor/eigen/Eigen/src/SparseCore/SparseDot.h +98 -0
  279. data/vendor/eigen/Eigen/src/SparseCore/SparseFuzzy.h +29 -0
  280. data/vendor/eigen/Eigen/src/SparseCore/SparseMap.h +305 -0
  281. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrix.h +1403 -0
  282. data/vendor/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +405 -0
  283. data/vendor/eigen/Eigen/src/SparseCore/SparsePermutation.h +178 -0
  284. data/vendor/eigen/Eigen/src/SparseCore/SparseProduct.h +169 -0
  285. data/vendor/eigen/Eigen/src/SparseCore/SparseRedux.h +49 -0
  286. data/vendor/eigen/Eigen/src/SparseCore/SparseRef.h +397 -0
  287. data/vendor/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h +656 -0
  288. data/vendor/eigen/Eigen/src/SparseCore/SparseSolverBase.h +124 -0
  289. data/vendor/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +198 -0
  290. data/vendor/eigen/Eigen/src/SparseCore/SparseTranspose.h +92 -0
  291. data/vendor/eigen/Eigen/src/SparseCore/SparseTriangularView.h +189 -0
  292. data/vendor/eigen/Eigen/src/SparseCore/SparseUtil.h +178 -0
  293. data/vendor/eigen/Eigen/src/SparseCore/SparseVector.h +478 -0
  294. data/vendor/eigen/Eigen/src/SparseCore/SparseView.h +253 -0
  295. data/vendor/eigen/Eigen/src/SparseCore/TriangularSolver.h +315 -0
  296. data/vendor/eigen/Eigen/src/SparseLU/SparseLU.h +773 -0
  297. data/vendor/eigen/Eigen/src/SparseLU/SparseLUImpl.h +66 -0
  298. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +226 -0
  299. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Structs.h +110 -0
  300. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +301 -0
  301. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_Utils.h +80 -0
  302. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h +181 -0
  303. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_column_dfs.h +179 -0
  304. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h +107 -0
  305. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_gemm_kernel.h +280 -0
  306. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +126 -0
  307. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +130 -0
  308. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +223 -0
  309. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_panel_dfs.h +258 -0
  310. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h +137 -0
  311. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_pruneL.h +136 -0
  312. data/vendor/eigen/Eigen/src/SparseLU/SparseLU_relax_snode.h +83 -0
  313. data/vendor/eigen/Eigen/src/SparseQR/SparseQR.h +745 -0
  314. data/vendor/eigen/Eigen/src/StlSupport/StdDeque.h +126 -0
  315. data/vendor/eigen/Eigen/src/StlSupport/StdList.h +106 -0
  316. data/vendor/eigen/Eigen/src/StlSupport/StdVector.h +131 -0
  317. data/vendor/eigen/Eigen/src/StlSupport/details.h +84 -0
  318. data/vendor/eigen/Eigen/src/SuperLUSupport/SuperLUSupport.h +1027 -0
  319. data/vendor/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +506 -0
  320. data/vendor/eigen/Eigen/src/misc/Image.h +82 -0
  321. data/vendor/eigen/Eigen/src/misc/Kernel.h +79 -0
  322. data/vendor/eigen/Eigen/src/misc/RealSvd2x2.h +55 -0
  323. data/vendor/eigen/Eigen/src/misc/blas.h +440 -0
  324. data/vendor/eigen/Eigen/src/misc/lapack.h +152 -0
  325. data/vendor/eigen/Eigen/src/misc/lapacke.h +16291 -0
  326. data/vendor/eigen/Eigen/src/misc/lapacke_mangling.h +17 -0
  327. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +332 -0
  328. data/vendor/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +552 -0
  329. data/vendor/eigen/Eigen/src/plugins/BlockMethods.h +1058 -0
  330. data/vendor/eigen/Eigen/src/plugins/CommonCwiseBinaryOps.h +115 -0
  331. data/vendor/eigen/Eigen/src/plugins/CommonCwiseUnaryOps.h +163 -0
  332. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +152 -0
  333. data/vendor/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +85 -0
  334. data/vendor/eigen/README.md +3 -0
  335. data/vendor/eigen/bench/README.txt +55 -0
  336. data/vendor/eigen/bench/btl/COPYING +340 -0
  337. data/vendor/eigen/bench/btl/README +154 -0
  338. data/vendor/eigen/bench/tensors/README +21 -0
  339. data/vendor/eigen/blas/README.txt +6 -0
  340. data/vendor/eigen/demos/mandelbrot/README +10 -0
  341. data/vendor/eigen/demos/mix_eigen_and_c/README +9 -0
  342. data/vendor/eigen/demos/opengl/README +13 -0
  343. data/vendor/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md +1760 -0
  344. data/vendor/eigen/unsupported/README.txt +50 -0
  345. data/vendor/tomotopy/LICENSE +21 -0
  346. data/vendor/tomotopy/README.kr.rst +375 -0
  347. data/vendor/tomotopy/README.rst +382 -0
  348. data/vendor/tomotopy/src/Labeling/FoRelevance.cpp +362 -0
  349. data/vendor/tomotopy/src/Labeling/FoRelevance.h +88 -0
  350. data/vendor/tomotopy/src/Labeling/Labeler.h +50 -0
  351. data/vendor/tomotopy/src/TopicModel/CT.h +37 -0
  352. data/vendor/tomotopy/src/TopicModel/CTModel.cpp +13 -0
  353. data/vendor/tomotopy/src/TopicModel/CTModel.hpp +293 -0
  354. data/vendor/tomotopy/src/TopicModel/DMR.h +51 -0
  355. data/vendor/tomotopy/src/TopicModel/DMRModel.cpp +13 -0
  356. data/vendor/tomotopy/src/TopicModel/DMRModel.hpp +374 -0
  357. data/vendor/tomotopy/src/TopicModel/DT.h +65 -0
  358. data/vendor/tomotopy/src/TopicModel/DTM.h +22 -0
  359. data/vendor/tomotopy/src/TopicModel/DTModel.cpp +15 -0
  360. data/vendor/tomotopy/src/TopicModel/DTModel.hpp +572 -0
  361. data/vendor/tomotopy/src/TopicModel/GDMR.h +37 -0
  362. data/vendor/tomotopy/src/TopicModel/GDMRModel.cpp +14 -0
  363. data/vendor/tomotopy/src/TopicModel/GDMRModel.hpp +485 -0
  364. data/vendor/tomotopy/src/TopicModel/HDP.h +74 -0
  365. data/vendor/tomotopy/src/TopicModel/HDPModel.cpp +13 -0
  366. data/vendor/tomotopy/src/TopicModel/HDPModel.hpp +592 -0
  367. data/vendor/tomotopy/src/TopicModel/HLDA.h +40 -0
  368. data/vendor/tomotopy/src/TopicModel/HLDAModel.cpp +13 -0
  369. data/vendor/tomotopy/src/TopicModel/HLDAModel.hpp +681 -0
  370. data/vendor/tomotopy/src/TopicModel/HPA.h +27 -0
  371. data/vendor/tomotopy/src/TopicModel/HPAModel.cpp +21 -0
  372. data/vendor/tomotopy/src/TopicModel/HPAModel.hpp +588 -0
  373. data/vendor/tomotopy/src/TopicModel/LDA.h +144 -0
  374. data/vendor/tomotopy/src/TopicModel/LDACVB0Model.hpp +442 -0
  375. data/vendor/tomotopy/src/TopicModel/LDAModel.cpp +13 -0
  376. data/vendor/tomotopy/src/TopicModel/LDAModel.hpp +1058 -0
  377. data/vendor/tomotopy/src/TopicModel/LLDA.h +45 -0
  378. data/vendor/tomotopy/src/TopicModel/LLDAModel.cpp +13 -0
  379. data/vendor/tomotopy/src/TopicModel/LLDAModel.hpp +203 -0
  380. data/vendor/tomotopy/src/TopicModel/MGLDA.h +63 -0
  381. data/vendor/tomotopy/src/TopicModel/MGLDAModel.cpp +17 -0
  382. data/vendor/tomotopy/src/TopicModel/MGLDAModel.hpp +558 -0
  383. data/vendor/tomotopy/src/TopicModel/PA.h +43 -0
  384. data/vendor/tomotopy/src/TopicModel/PAModel.cpp +13 -0
  385. data/vendor/tomotopy/src/TopicModel/PAModel.hpp +467 -0
  386. data/vendor/tomotopy/src/TopicModel/PLDA.h +17 -0
  387. data/vendor/tomotopy/src/TopicModel/PLDAModel.cpp +13 -0
  388. data/vendor/tomotopy/src/TopicModel/PLDAModel.hpp +214 -0
  389. data/vendor/tomotopy/src/TopicModel/SLDA.h +54 -0
  390. data/vendor/tomotopy/src/TopicModel/SLDAModel.cpp +17 -0
  391. data/vendor/tomotopy/src/TopicModel/SLDAModel.hpp +456 -0
  392. data/vendor/tomotopy/src/TopicModel/TopicModel.hpp +692 -0
  393. data/vendor/tomotopy/src/Utils/AliasMethod.hpp +169 -0
  394. data/vendor/tomotopy/src/Utils/Dictionary.h +80 -0
  395. data/vendor/tomotopy/src/Utils/EigenAddonOps.hpp +181 -0
  396. data/vendor/tomotopy/src/Utils/LBFGS.h +202 -0
  397. data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBacktracking.h +120 -0
  398. data/vendor/tomotopy/src/Utils/LBFGS/LineSearchBracketing.h +122 -0
  399. data/vendor/tomotopy/src/Utils/LBFGS/Param.h +213 -0
  400. data/vendor/tomotopy/src/Utils/LUT.hpp +82 -0
  401. data/vendor/tomotopy/src/Utils/MultiNormalDistribution.hpp +69 -0
  402. data/vendor/tomotopy/src/Utils/PolyaGamma.hpp +200 -0
  403. data/vendor/tomotopy/src/Utils/PolyaGammaHybrid.hpp +672 -0
  404. data/vendor/tomotopy/src/Utils/ThreadPool.hpp +150 -0
  405. data/vendor/tomotopy/src/Utils/Trie.hpp +220 -0
  406. data/vendor/tomotopy/src/Utils/TruncMultiNormal.hpp +94 -0
  407. data/vendor/tomotopy/src/Utils/Utils.hpp +337 -0
  408. data/vendor/tomotopy/src/Utils/avx_gamma.h +46 -0
  409. data/vendor/tomotopy/src/Utils/avx_mathfun.h +736 -0
  410. data/vendor/tomotopy/src/Utils/exception.h +28 -0
  411. data/vendor/tomotopy/src/Utils/math.h +281 -0
  412. data/vendor/tomotopy/src/Utils/rtnorm.hpp +2690 -0
  413. data/vendor/tomotopy/src/Utils/sample.hpp +192 -0
  414. data/vendor/tomotopy/src/Utils/serializer.hpp +695 -0
  415. data/vendor/tomotopy/src/Utils/slp.hpp +131 -0
  416. data/vendor/tomotopy/src/Utils/sse_gamma.h +48 -0
  417. data/vendor/tomotopy/src/Utils/sse_mathfun.h +710 -0
  418. data/vendor/tomotopy/src/Utils/text.hpp +49 -0
  419. data/vendor/tomotopy/src/Utils/tvector.hpp +543 -0
  420. metadata +531 -0
@@ -0,0 +1,131 @@
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ namespace tomoto
5
+ {
6
+ namespace slp
7
+ {
8
+ template<int n, int k>
9
+ struct combination
10
+ {
11
+ static constexpr int64_t value = combination<n - 1, k - 1>::value + combination<n - 1, k>::value;
12
+ };
13
+
14
+ template<int n>
15
+ struct combination<n, 0>
16
+ {
17
+ static constexpr int64_t value = 1;
18
+ };
19
+
20
+ template<int n>
21
+ struct combination<0, n>
22
+ {
23
+ static constexpr int64_t value = 1;
24
+ };
25
+
26
+ template<int n>
27
+ struct combination<n, n>
28
+ {
29
+ static constexpr int64_t value = 1;
30
+ };
31
+
32
+ template<>
33
+ struct combination<0, 0>
34
+ {
35
+ static constexpr int64_t value = 1;
36
+ };
37
+
38
+ template<int _Order, class _Type, int n = 0>
39
+ struct shiftedLegendre
40
+ {
41
+ inline static _Type at(_Type x)
42
+ {
43
+ return shiftedLegendre<_Order, _Type, n + 1>::at(x) * x + combination<_Order, n>::value * combination<_Order + n, n>::value * ((_Order + n) % 2 ? -1 : 1);
44
+ }
45
+
46
+ inline static _Type atDerivative(_Type x)
47
+ {
48
+ return shiftedLegendre<_Order, _Type, n + 1>::atDerivation(x) * x + combination<_Order, n>::value * combination<_Order + n, n>::value * ((_Order + n) % 2 ? -1 : 1) * (int)n;
49
+ }
50
+ };
51
+
52
+ template<int _Order, class _Type>
53
+ struct shiftedLegendre<_Order, _Type, _Order>
54
+ {
55
+ inline static _Type at(_Type x)
56
+ {
57
+ return combination<_Order + _Order, _Order>::value;
58
+ }
59
+
60
+ inline static _Type atDerivative(_Type x)
61
+ {
62
+ return combination<_Order + _Order, _Order>::value * _Order;
63
+ }
64
+ };
65
+
66
+ template<class _Type>
67
+ struct shiftedLegendre<0, _Type, 0>
68
+ {
69
+ inline static _Type at(_Type x)
70
+ {
71
+ return 1;
72
+ }
73
+ };
74
+
75
+ template<class _Type>
76
+ struct shiftedLegendre<0, _Type, 1>
77
+ {
78
+ inline static _Type atDerivative(_Type x)
79
+ {
80
+ return 0;
81
+ }
82
+ };
83
+
84
+ template<int _Order, class _Type> inline _Type shiftedLegendreFunc(_Type x)
85
+ {
86
+ return shiftedLegendre<_Order, _Type, 0>::at(x);
87
+ }
88
+
89
+ template<int _Order, class _Type> inline _Type shiftedLegendreFuncDerivative(_Type x)
90
+ {
91
+ return shiftedLegendre<_Order, _Type, 1>::atDerivative(x);
92
+ }
93
+
94
+
95
+ template<class _Type> _Type slpGet(size_t order, _Type x)
96
+ {
97
+ switch (order)
98
+ {
99
+ case 0: return shiftedLegendreFunc<0>(x);
100
+ case 1: return shiftedLegendreFunc<1>(x);
101
+ case 2: return shiftedLegendreFunc<2>(x);
102
+ case 3: return shiftedLegendreFunc<3>(x);
103
+ case 4: return shiftedLegendreFunc<4>(x);
104
+ case 5: return shiftedLegendreFunc<5>(x);
105
+ case 6: return shiftedLegendreFunc<6>(x);
106
+ case 7: return shiftedLegendreFunc<7>(x);
107
+ case 8: return shiftedLegendreFunc<8>(x);
108
+ case 9: return shiftedLegendreFunc<9>(x);
109
+ case 10: return shiftedLegendreFunc<10>(x);
110
+ case 11: return shiftedLegendreFunc<11>(x);
111
+ case 12: return shiftedLegendreFunc<12>(x);
112
+ case 13: return shiftedLegendreFunc<13>(x);
113
+ case 14: return shiftedLegendreFunc<14>(x);
114
+ case 15: return shiftedLegendreFunc<15>(x);
115
+ }
116
+ return _Type{};
117
+ }
118
+
119
+ inline size_t partialProductDown(size_t n, size_t k)
120
+ {
121
+ size_t ret = 1;
122
+ for (size_t i = 0; i < k; ++i) ret *= n--;
123
+ return ret;
124
+ }
125
+
126
+ inline int slpGetCoef(size_t n, size_t k)
127
+ {
128
+ return ((n + k) & 1 ? -1 : 1) * (int)(partialProductDown(n, k) / partialProductDown(k, k) * partialProductDown(n + k, k) / partialProductDown(k, k));
129
+ }
130
+ }
131
+ }
@@ -0,0 +1,48 @@
1
+ #pragma once
2
+ #if _WIN64 || __x86_64__
3
+ #define __SSE2__
4
+ #endif
5
+ #include "sse_mathfun.h"
6
+
7
+ // approximation : lgamma(z) ~= (z+2.5)ln(z+3) - z - 3 + 0.5 ln (2pi) + 1/12/(z + 3) - ln (z(z+1)(z+2))
8
+ inline __m128 lgamma_ps(__m128 x)
9
+ {
10
+ __m128 x_3 = _mm_add_ps(x, _mm_set1_ps(3));
11
+ __m128 ret = _mm_mul_ps(_mm_add_ps(x_3, _mm_set1_ps(-0.5f)), log_ps(x_3));
12
+ ret = _mm_sub_ps(ret, x_3);
13
+ ret = _mm_add_ps(ret, _mm_set1_ps(0.91893853f));
14
+ ret = _mm_add_ps(ret, _mm_div_ps(_mm_set1_ps(1 / 12.f), x_3));
15
+ ret = _mm_sub_ps(ret, log_ps(_mm_mul_ps(
16
+ _mm_mul_ps(_mm_sub_ps(x_3, _mm_set1_ps(1)), _mm_sub_ps(x_3, _mm_set1_ps(2))), x)));
17
+ return ret;
18
+ }
19
+
20
+ // approximation : lgamma(z + a) - lgamma(z) = (z + a + 1.5) * log(z + a + 2) - (z + 1.5) * log(z + 2) - a + (1. / (z + a + 2) - 1. / (z + 2)) / 12. - log(((z + a) * (z + a + 1)) / (z * (z + 1)))
21
+ inline __m128 lgamma_subt(__m128 z, __m128 a)
22
+ {
23
+ __m128 _1 = _mm_set1_ps(1);
24
+ __m128 _1p5 = _mm_set1_ps(1.5);
25
+ __m128 _2 = _mm_set1_ps(2);
26
+ __m128 _1_12 = _mm_set1_ps(1 / 12.f);
27
+ __m128 za = _mm_add_ps(z, a);
28
+ __m128 ret = _mm_mul_ps(_mm_add_ps(za, _1p5), log_ps(_mm_add_ps(za, _2)));
29
+ ret = _mm_sub_ps(ret, _mm_mul_ps(_mm_add_ps(z, _1p5), log_ps(_mm_add_ps(z, _2))));
30
+ ret = _mm_sub_ps(ret, a);
31
+ ret = _mm_add_ps(ret, _mm_sub_ps(_mm_div_ps(_1_12, _mm_add_ps(za, _2)), _mm_div_ps(_1_12, _mm_add_ps(z, _2))));
32
+ ret = _mm_sub_ps(ret, log_ps(_mm_div_ps(_mm_div_ps(_mm_mul_ps(za, _mm_add_ps(za, _1)), z), _mm_add_ps(z, _1))));
33
+ return ret;
34
+ }
35
+
36
+ // approximation : digamma(z) ~= ln(z+4) - 1/2/(z+4) - 1/12/(z+4)^2 - 1/z - 1/(z+1) - 1/(z+2) - 1/(z+3)
37
+ inline __m128 digamma_ps(__m128 x)
38
+ {
39
+ __m128 x_4 = _mm_add_ps(x, _mm_set1_ps(4));
40
+ __m128 ret = log_ps(x_4);
41
+ ret = _mm_sub_ps(ret, _mm_div_ps(_mm_set1_ps(1 / 2.f), x_4));
42
+ ret = _mm_sub_ps(ret, _mm_div_ps(_mm_div_ps(_mm_set1_ps(1 / 12.f), x_4), x_4));
43
+ ret = _mm_sub_ps(ret, _mm_rcp_ps(_mm_sub_ps(x_4, _mm_set1_ps(1))));
44
+ ret = _mm_sub_ps(ret, _mm_rcp_ps(_mm_sub_ps(x_4, _mm_set1_ps(2))));
45
+ ret = _mm_sub_ps(ret, _mm_rcp_ps(_mm_sub_ps(x_4, _mm_set1_ps(3))));
46
+ ret = _mm_sub_ps(ret, _mm_rcp_ps(_mm_sub_ps(x_4, _mm_set1_ps(4))));
47
+ return ret;
48
+ }
@@ -0,0 +1,710 @@
1
+ /* SIMD (SSE1+MMX or SSE2) implementation of sin, cos, exp and log
2
+
3
+ Inspired by Intel Approximate Math library, and based on the
4
+ corresponding algorithms of the cephes math library
5
+
6
+ The default is to use the SSE1 version. If you define __SSE2__ the
7
+ the SSE2 intrinsics will be used in place of the MMX intrinsics. Do
8
+ not expect any significant performance improvement with SSE2.
9
+ */
10
+
11
+ /* Copyright (C) 2007 Julien Pommier
12
+
13
+ This software is provided 'as-is', without any express or implied
14
+ warranty. In no event will the authors be held liable for any damages
15
+ arising from the use of this software.
16
+
17
+ Permission is granted to anyone to use this software for any purpose,
18
+ including commercial applications, and to alter it and redistribute it
19
+ freely, subject to the following restrictions:
20
+
21
+ 1. The origin of this software must not be misrepresented; you must not
22
+ claim that you wrote the original software. If you use this software
23
+ in a product, an acknowledgment in the product documentation would be
24
+ appreciated but is not required.
25
+ 2. Altered source versions must be plainly marked as such, and must not be
26
+ misrepresented as being the original software.
27
+ 3. This notice may not be removed or altered from any source distribution.
28
+
29
+ (this is the zlib license)
30
+ */
31
+
32
+ #include <xmmintrin.h>
33
+
34
+ /* yes I know, the top of this file is quite ugly */
35
+
36
+ #ifdef _MSC_VER /* visual c++ */
37
+ # define ALIGN16_BEG __declspec(align(16))
38
+ # define ALIGN16_END
39
+ #else /* gcc or icc */
40
+ # define ALIGN16_BEG
41
+ # define ALIGN16_END __attribute__((aligned(16)))
42
+ #endif
43
+
44
+ /* __m128 is ugly to write */
45
+ typedef __m128 v4sf; // vector of 4 float (sse1)
46
+
47
+ #ifdef __SSE2__
48
+ # include <emmintrin.h>
49
+ typedef __m128i v4si; // vector of 4 int (sse2)
50
+ #else
51
+ typedef __m64 v2si; // vector of 2 int (mmx)
52
+ #endif
53
+
54
+ /* declare some SSE constants -- why can't I figure a better way to do that? */
55
+ #define _PS_CONST(Name, Val) \
56
+ static const ALIGN16_BEG float _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
57
+ #define _PI32_CONST(Name, Val) \
58
+ static const ALIGN16_BEG int _pi32_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
59
+ #define _PS_CONST_TYPE(Name, Type, Val) \
60
+ static const ALIGN16_BEG Type _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
61
+
62
+ _PS_CONST(1, 1.0f);
63
+ _PS_CONST(0p5, 0.5f);
64
+ /* the smallest non denormalized float number */
65
+ _PS_CONST_TYPE(min_norm_pos, int, 0x00800000);
66
+ _PS_CONST_TYPE(mant_mask, int, 0x7f800000);
67
+ _PS_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
68
+
69
+ _PS_CONST_TYPE(sign_mask, int, (int)0x80000000);
70
+ _PS_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
71
+
72
+ _PI32_CONST(1, 1);
73
+ _PI32_CONST(inv1, ~1);
74
+ _PI32_CONST(2, 2);
75
+ _PI32_CONST(4, 4);
76
+ _PI32_CONST(0x7f, 0x7f);
77
+
78
+ _PS_CONST(cephes_SQRTHF, 0.707106781186547524f);
79
+ _PS_CONST(cephes_log_p0, 7.0376836292E-2f);
80
+ _PS_CONST(cephes_log_p1, -1.1514610310E-1f);
81
+ _PS_CONST(cephes_log_p2, 1.1676998740E-1f);
82
+ _PS_CONST(cephes_log_p3, -1.2420140846E-1f);
83
+ _PS_CONST(cephes_log_p4, +1.4249322787E-1f);
84
+ _PS_CONST(cephes_log_p5, -1.6668057665E-1f);
85
+ _PS_CONST(cephes_log_p6, +2.0000714765E-1f);
86
+ _PS_CONST(cephes_log_p7, -2.4999993993E-1f);
87
+ _PS_CONST(cephes_log_p8, +3.3333331174E-1f);
88
+ _PS_CONST(cephes_log_q1, -2.12194440e-4f);
89
+ _PS_CONST(cephes_log_q2, 0.693359375f);
90
+
91
+ #ifndef __SSE2__
92
+ typedef union xmm_mm_union {
93
+ __m128 xmm;
94
+ __m64 mm[2];
95
+ } xmm_mm_union;
96
+
97
+ #define COPY_XMM_TO_MM(xmm_, mm0_, mm1_) { \
98
+ xmm_mm_union u; u.xmm = xmm_; \
99
+ mm0_ = u.mm[0]; \
100
+ mm1_ = u.mm[1]; \
101
+ }
102
+
103
+ #define COPY_MM_TO_XMM(mm0_, mm1_, xmm_) { \
104
+ xmm_mm_union u; u.mm[0]=mm0_; u.mm[1]=mm1_; xmm_ = u.xmm; \
105
+ }
106
+
107
+ #endif // __SSE2__
108
+
109
+ /* natural logarithm computed for 4 simultaneous float
110
+ return NaN for x <= 0
111
+ */
112
+ inline v4sf log_ps(v4sf x) {
113
+ #ifdef __SSE2__
114
+ v4si emm0;
115
+ #else
116
+ v2si mm0, mm1;
117
+ #endif
118
+ v4sf one = *(v4sf*)_ps_1;
119
+
120
+ v4sf invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
121
+
122
+ x = _mm_max_ps(x, *(v4sf*)_ps_min_norm_pos); /* cut off denormalized stuff */
123
+
124
+ #ifndef __SSE2__
125
+ /* part 1: x = frexpf(x, &e); */
126
+ COPY_XMM_TO_MM(x, mm0, mm1);
127
+ mm0 = _mm_srli_pi32(mm0, 23);
128
+ mm1 = _mm_srli_pi32(mm1, 23);
129
+ #else
130
+ emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
131
+ #endif
132
+ /* keep only the fractional part */
133
+ x = _mm_and_ps(x, *(v4sf*)_ps_inv_mant_mask);
134
+ x = _mm_or_ps(x, *(v4sf*)_ps_0p5);
135
+
136
+ #ifndef __SSE2__
137
+ /* now e=mm0:mm1 contain the really base-2 exponent */
138
+ mm0 = _mm_sub_pi32(mm0, *(v2si*)_pi32_0x7f);
139
+ mm1 = _mm_sub_pi32(mm1, *(v2si*)_pi32_0x7f);
140
+ v4sf e = _mm_cvtpi32x2_ps(mm0, mm1);
141
+ _mm_empty(); /* bye bye mmx */
142
+ #else
143
+ emm0 = _mm_sub_epi32(emm0, *(v4si*)_pi32_0x7f);
144
+ v4sf e = _mm_cvtepi32_ps(emm0);
145
+ #endif
146
+
147
+ e = _mm_add_ps(e, one);
148
+
149
+ /* part2:
150
+ if( x < SQRTHF ) {
151
+ e -= 1;
152
+ x = x + x - 1.0;
153
+ } else { x = x - 1.0; }
154
+ */
155
+ v4sf mask = _mm_cmplt_ps(x, *(v4sf*)_ps_cephes_SQRTHF);
156
+ v4sf tmp = _mm_and_ps(x, mask);
157
+ x = _mm_sub_ps(x, one);
158
+ e = _mm_sub_ps(e, _mm_and_ps(one, mask));
159
+ x = _mm_add_ps(x, tmp);
160
+
161
+
162
+ v4sf z = _mm_mul_ps(x, x);
163
+
164
+ v4sf y = *(v4sf*)_ps_cephes_log_p0;
165
+ y = _mm_mul_ps(y, x);
166
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p1);
167
+ y = _mm_mul_ps(y, x);
168
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p2);
169
+ y = _mm_mul_ps(y, x);
170
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p3);
171
+ y = _mm_mul_ps(y, x);
172
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p4);
173
+ y = _mm_mul_ps(y, x);
174
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p5);
175
+ y = _mm_mul_ps(y, x);
176
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p6);
177
+ y = _mm_mul_ps(y, x);
178
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p7);
179
+ y = _mm_mul_ps(y, x);
180
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p8);
181
+ y = _mm_mul_ps(y, x);
182
+
183
+ y = _mm_mul_ps(y, z);
184
+
185
+
186
+ tmp = _mm_mul_ps(e, *(v4sf*)_ps_cephes_log_q1);
187
+ y = _mm_add_ps(y, tmp);
188
+
189
+
190
+ tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
191
+ y = _mm_sub_ps(y, tmp);
192
+
193
+ tmp = _mm_mul_ps(e, *(v4sf*)_ps_cephes_log_q2);
194
+ x = _mm_add_ps(x, y);
195
+ x = _mm_add_ps(x, tmp);
196
+ x = _mm_or_ps(x, invalid_mask); // negative arg will be NAN
197
+ return x;
198
+ }
199
+
200
+ _PS_CONST(exp_hi, 88.3762626647949f);
201
+ _PS_CONST(exp_lo, -88.3762626647949f);
202
+
203
+ _PS_CONST(cephes_LOG2EF, 1.44269504088896341f);
204
+ _PS_CONST(cephes_exp_C1, 0.693359375f);
205
+ _PS_CONST(cephes_exp_C2, -2.12194440e-4f);
206
+
207
+ _PS_CONST(cephes_exp_p0, 1.9875691500E-4f);
208
+ _PS_CONST(cephes_exp_p1, 1.3981999507E-3f);
209
+ _PS_CONST(cephes_exp_p2, 8.3334519073E-3f);
210
+ _PS_CONST(cephes_exp_p3, 4.1665795894E-2f);
211
+ _PS_CONST(cephes_exp_p4, 1.6666665459E-1f);
212
+ _PS_CONST(cephes_exp_p5, 5.0000001201E-1f);
213
+
214
+ inline v4sf exp_ps(v4sf x) {
215
+ v4sf tmp = _mm_setzero_ps(), fx;
216
+ #ifdef __SSE2__
217
+ v4si emm0;
218
+ #else
219
+ v2si mm0, mm1;
220
+ #endif
221
+ v4sf one = *(v4sf*)_ps_1;
222
+
223
+ x = _mm_min_ps(x, *(v4sf*)_ps_exp_hi);
224
+ x = _mm_max_ps(x, *(v4sf*)_ps_exp_lo);
225
+
226
+ /* express exp(x) as exp(g + n*log(2)) */
227
+ fx = _mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF);
228
+ fx = _mm_add_ps(fx, *(v4sf*)_ps_0p5);
229
+
230
+ /* how to perform a floorf with SSE: just below */
231
+ #ifndef __SSE2__
232
+ /* step 1 : cast to int */
233
+ tmp = _mm_movehl_ps(tmp, fx);
234
+ mm0 = _mm_cvttps_pi32(fx);
235
+ mm1 = _mm_cvttps_pi32(tmp);
236
+ /* step 2 : cast back to float */
237
+ tmp = _mm_cvtpi32x2_ps(mm0, mm1);
238
+ #else
239
+ emm0 = _mm_cvttps_epi32(fx);
240
+ tmp = _mm_cvtepi32_ps(emm0);
241
+ #endif
242
+ /* if greater, substract 1 */
243
+ v4sf mask = _mm_cmpgt_ps(tmp, fx);
244
+ mask = _mm_and_ps(mask, one);
245
+ fx = _mm_sub_ps(tmp, mask);
246
+
247
+ tmp = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1);
248
+ v4sf z = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2);
249
+ x = _mm_sub_ps(x, tmp);
250
+ x = _mm_sub_ps(x, z);
251
+
252
+ z = _mm_mul_ps(x, x);
253
+
254
+ v4sf y = *(v4sf*)_ps_cephes_exp_p0;
255
+ y = _mm_mul_ps(y, x);
256
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p1);
257
+ y = _mm_mul_ps(y, x);
258
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p2);
259
+ y = _mm_mul_ps(y, x);
260
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p3);
261
+ y = _mm_mul_ps(y, x);
262
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p4);
263
+ y = _mm_mul_ps(y, x);
264
+ y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p5);
265
+ y = _mm_mul_ps(y, z);
266
+ y = _mm_add_ps(y, x);
267
+ y = _mm_add_ps(y, one);
268
+
269
+ /* build 2^n */
270
+ #ifndef __SSE2__
271
+ z = _mm_movehl_ps(z, fx);
272
+ mm0 = _mm_cvttps_pi32(fx);
273
+ mm1 = _mm_cvttps_pi32(z);
274
+ mm0 = _mm_add_pi32(mm0, *(v2si*)_pi32_0x7f);
275
+ mm1 = _mm_add_pi32(mm1, *(v2si*)_pi32_0x7f);
276
+ mm0 = _mm_slli_pi32(mm0, 23);
277
+ mm1 = _mm_slli_pi32(mm1, 23);
278
+
279
+ v4sf pow2n;
280
+ COPY_MM_TO_XMM(mm0, mm1, pow2n);
281
+ _mm_empty();
282
+ #else
283
+ emm0 = _mm_cvttps_epi32(fx);
284
+ emm0 = _mm_add_epi32(emm0, *(v4si*)_pi32_0x7f);
285
+ emm0 = _mm_slli_epi32(emm0, 23);
286
+ v4sf pow2n = _mm_castsi128_ps(emm0);
287
+ #endif
288
+ y = _mm_mul_ps(y, pow2n);
289
+ return y;
290
+ }
291
+
292
+ _PS_CONST(minus_cephes_DP1, -0.78515625f);
293
+ _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4f);
294
+ _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8f);
295
+ _PS_CONST(sincof_p0, -1.9515295891E-4f);
296
+ _PS_CONST(sincof_p1, 8.3321608736E-3f);
297
+ _PS_CONST(sincof_p2, -1.6666654611E-1f);
298
+ _PS_CONST(coscof_p0, 2.443315711809948E-005f);
299
+ _PS_CONST(coscof_p1, -1.388731625493765E-003f);
300
+ _PS_CONST(coscof_p2, 4.166664568298827E-002f);
301
+ _PS_CONST(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
302
+
303
+
304
+ /* evaluation of 4 sines at onces, using only SSE1+MMX intrinsics so
305
+ it runs also on old athlons XPs and the pentium III of your grand
306
+ mother.
307
+
308
+ The code is the exact rewriting of the cephes sinf function.
309
+ Precision is excellent as long as x < 8192 (I did not bother to
310
+ take into account the special handling they have for greater values
311
+ -- it does not return garbage for arguments over 8192, though, but
312
+ the extra precision is missing).
313
+
314
+ Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
315
+ surprising but correct result.
316
+
317
+ Performance is also surprisingly good, 1.33 times faster than the
318
+ macos vsinf SSE2 function, and 1.5 times faster than the
319
+ __vrs4_sinf of amd's ACML (which is only available in 64 bits). Not
320
+ too bad for an SSE1 function (with no special tuning) !
321
+ However the latter libraries probably have a much better handling of NaN,
322
+ Inf, denormalized and other special arguments..
323
+
324
+ On my core 1 duo, the execution of this function takes approximately 95 cycles.
325
+
326
+ From what I have observed on the experiments with Intel AMath lib, switching to an
327
+ SSE2 version would improve the perf by only 10%.
328
+
329
+ Since it is based on SSE intrinsics, it has to be compiled at -O2 to
330
+ deliver full speed.
331
+ */
332
+ inline v4sf sin_ps(v4sf x) { // any x
333
+ v4sf xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
334
+
335
+ #ifdef __SSE2__
336
+ v4si emm0, emm2;
337
+ #else
338
+ v2si mm0, mm1, mm2, mm3;
339
+ #endif
340
+ sign_bit = x;
341
+ /* take the absolute value */
342
+ x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
343
+ /* extract the sign bit (upper one) */
344
+ sign_bit = _mm_and_ps(sign_bit, *(v4sf*)_ps_sign_mask);
345
+
346
+ /* scale by 4/Pi */
347
+ y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
348
+
349
+ #ifdef __SSE2__
350
+ /* store the integer part of y in mm0 */
351
+ emm2 = _mm_cvttps_epi32(y);
352
+ /* j=(j+1) & (~1) (see the cephes sources) */
353
+ emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
354
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
355
+ y = _mm_cvtepi32_ps(emm2);
356
+
357
+ /* get the swap sign flag */
358
+ emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
359
+ emm0 = _mm_slli_epi32(emm0, 29);
360
+ /* get the polynom selection mask
361
+ there is one polynom for 0 <= x <= Pi/4
362
+ and another one for Pi/4<x<=Pi/2
363
+
364
+ Both branches will be computed.
365
+ */
366
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
367
+ emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
368
+
369
+ v4sf swap_sign_bit = _mm_castsi128_ps(emm0);
370
+ v4sf poly_mask = _mm_castsi128_ps(emm2);
371
+ sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
372
+
373
+ #else
374
+ /* store the integer part of y in mm0:mm1 */
375
+ xmm2 = _mm_movehl_ps(xmm2, y);
376
+ mm2 = _mm_cvttps_pi32(y);
377
+ mm3 = _mm_cvttps_pi32(xmm2);
378
+ /* j=(j+1) & (~1) (see the cephes sources) */
379
+ mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
380
+ mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
381
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
382
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
383
+ y = _mm_cvtpi32x2_ps(mm2, mm3);
384
+ /* get the swap sign flag */
385
+ mm0 = _mm_and_si64(mm2, *(v2si*)_pi32_4);
386
+ mm1 = _mm_and_si64(mm3, *(v2si*)_pi32_4);
387
+ mm0 = _mm_slli_pi32(mm0, 29);
388
+ mm1 = _mm_slli_pi32(mm1, 29);
389
+ /* get the polynom selection mask */
390
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
391
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
392
+ mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
393
+ mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
394
+ v4sf swap_sign_bit, poly_mask;
395
+ COPY_MM_TO_XMM(mm0, mm1, swap_sign_bit);
396
+ COPY_MM_TO_XMM(mm2, mm3, poly_mask);
397
+ sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
398
+ _mm_empty(); /* good-bye mmx */
399
+ #endif
400
+
401
+ /* The magic pass: "Extended precision modular arithmetic"
402
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
403
+ xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
404
+ xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
405
+ xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
406
+ xmm1 = _mm_mul_ps(y, xmm1);
407
+ xmm2 = _mm_mul_ps(y, xmm2);
408
+ xmm3 = _mm_mul_ps(y, xmm3);
409
+ x = _mm_add_ps(x, xmm1);
410
+ x = _mm_add_ps(x, xmm2);
411
+ x = _mm_add_ps(x, xmm3);
412
+
413
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
414
+ y = *(v4sf*)_ps_coscof_p0;
415
+ v4sf z = _mm_mul_ps(x, x);
416
+
417
+ y = _mm_mul_ps(y, z);
418
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
419
+ y = _mm_mul_ps(y, z);
420
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
421
+ y = _mm_mul_ps(y, z);
422
+ y = _mm_mul_ps(y, z);
423
+ v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
424
+ y = _mm_sub_ps(y, tmp);
425
+ y = _mm_add_ps(y, *(v4sf*)_ps_1);
426
+
427
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
428
+
429
+ v4sf y2 = *(v4sf*)_ps_sincof_p0;
430
+ y2 = _mm_mul_ps(y2, z);
431
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
432
+ y2 = _mm_mul_ps(y2, z);
433
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
434
+ y2 = _mm_mul_ps(y2, z);
435
+ y2 = _mm_mul_ps(y2, x);
436
+ y2 = _mm_add_ps(y2, x);
437
+
438
+ /* select the correct result from the two polynoms */
439
+ xmm3 = poly_mask;
440
+ y2 = _mm_and_ps(xmm3, y2); //, xmm3);
441
+ y = _mm_andnot_ps(xmm3, y);
442
+ y = _mm_add_ps(y, y2);
443
+ /* update the sign */
444
+ y = _mm_xor_ps(y, sign_bit);
445
+ return y;
446
+ }
447
+
448
+ /* almost the same as sin_ps */
449
+ inline v4sf cos_ps(v4sf x) { // any x
450
+ v4sf xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
451
+ #ifdef __SSE2__
452
+ v4si emm0, emm2;
453
+ #else
454
+ v2si mm0, mm1, mm2, mm3;
455
+ #endif
456
+ /* take the absolute value */
457
+ x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
458
+
459
+ /* scale by 4/Pi */
460
+ y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
461
+
462
+ #ifdef __SSE2__
463
+ /* store the integer part of y in mm0 */
464
+ emm2 = _mm_cvttps_epi32(y);
465
+ /* j=(j+1) & (~1) (see the cephes sources) */
466
+ emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
467
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
468
+ y = _mm_cvtepi32_ps(emm2);
469
+
470
+ emm2 = _mm_sub_epi32(emm2, *(v4si*)_pi32_2);
471
+
472
+ /* get the swap sign flag */
473
+ emm0 = _mm_andnot_si128(emm2, *(v4si*)_pi32_4);
474
+ emm0 = _mm_slli_epi32(emm0, 29);
475
+ /* get the polynom selection mask */
476
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
477
+ emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
478
+
479
+ v4sf sign_bit = _mm_castsi128_ps(emm0);
480
+ v4sf poly_mask = _mm_castsi128_ps(emm2);
481
+ #else
482
+ /* store the integer part of y in mm0:mm1 */
483
+ xmm2 = _mm_movehl_ps(xmm2, y);
484
+ mm2 = _mm_cvttps_pi32(y);
485
+ mm3 = _mm_cvttps_pi32(xmm2);
486
+
487
+ /* j=(j+1) & (~1) (see the cephes sources) */
488
+ mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
489
+ mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
490
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
491
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
492
+
493
+ y = _mm_cvtpi32x2_ps(mm2, mm3);
494
+
495
+
496
+ mm2 = _mm_sub_pi32(mm2, *(v2si*)_pi32_2);
497
+ mm3 = _mm_sub_pi32(mm3, *(v2si*)_pi32_2);
498
+
499
+ /* get the swap sign flag in mm0:mm1 and the
500
+ polynom selection mask in mm2:mm3 */
501
+
502
+ mm0 = _mm_andnot_si64(mm2, *(v2si*)_pi32_4);
503
+ mm1 = _mm_andnot_si64(mm3, *(v2si*)_pi32_4);
504
+ mm0 = _mm_slli_pi32(mm0, 29);
505
+ mm1 = _mm_slli_pi32(mm1, 29);
506
+
507
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
508
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
509
+
510
+ mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
511
+ mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
512
+
513
+ v4sf sign_bit, poly_mask;
514
+ COPY_MM_TO_XMM(mm0, mm1, sign_bit);
515
+ COPY_MM_TO_XMM(mm2, mm3, poly_mask);
516
+ _mm_empty(); /* good-bye mmx */
517
+ #endif
518
+ /* The magic pass: "Extended precision modular arithmetic"
519
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
520
+ xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
521
+ xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
522
+ xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
523
+ xmm1 = _mm_mul_ps(y, xmm1);
524
+ xmm2 = _mm_mul_ps(y, xmm2);
525
+ xmm3 = _mm_mul_ps(y, xmm3);
526
+ x = _mm_add_ps(x, xmm1);
527
+ x = _mm_add_ps(x, xmm2);
528
+ x = _mm_add_ps(x, xmm3);
529
+
530
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
531
+ y = *(v4sf*)_ps_coscof_p0;
532
+ v4sf z = _mm_mul_ps(x, x);
533
+
534
+ y = _mm_mul_ps(y, z);
535
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
536
+ y = _mm_mul_ps(y, z);
537
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
538
+ y = _mm_mul_ps(y, z);
539
+ y = _mm_mul_ps(y, z);
540
+ v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
541
+ y = _mm_sub_ps(y, tmp);
542
+ y = _mm_add_ps(y, *(v4sf*)_ps_1);
543
+
544
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
545
+
546
+ v4sf y2 = *(v4sf*)_ps_sincof_p0;
547
+ y2 = _mm_mul_ps(y2, z);
548
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
549
+ y2 = _mm_mul_ps(y2, z);
550
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
551
+ y2 = _mm_mul_ps(y2, z);
552
+ y2 = _mm_mul_ps(y2, x);
553
+ y2 = _mm_add_ps(y2, x);
554
+
555
+ /* select the correct result from the two polynoms */
556
+ xmm3 = poly_mask;
557
+ y2 = _mm_and_ps(xmm3, y2); //, xmm3);
558
+ y = _mm_andnot_ps(xmm3, y);
559
+ y = _mm_add_ps(y, y2);
560
+ /* update the sign */
561
+ y = _mm_xor_ps(y, sign_bit);
562
+
563
+ return y;
564
+ }
565
+
566
+ /* since sin_ps and cos_ps are almost identical, sincos_ps could replace both of them..
567
+ it is almost as fast, and gives you a free cosine with your sine */
568
+ inline void sincos_ps(v4sf x, v4sf *s, v4sf *c) {
569
+ v4sf xmm1, xmm2, xmm3 = _mm_setzero_ps(), sign_bit_sin, y;
570
+ #ifdef __SSE2__
571
+ v4si emm0, emm2, emm4;
572
+ #else
573
+ v2si mm0, mm1, mm2, mm3, mm4, mm5;
574
+ #endif
575
+ sign_bit_sin = x;
576
+ /* take the absolute value */
577
+ x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
578
+ /* extract the sign bit (upper one) */
579
+ sign_bit_sin = _mm_and_ps(sign_bit_sin, *(v4sf*)_ps_sign_mask);
580
+
581
+ /* scale by 4/Pi */
582
+ y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
583
+
584
+ #ifdef __SSE2__
585
+ /* store the integer part of y in emm2 */
586
+ emm2 = _mm_cvttps_epi32(y);
587
+
588
+ /* j=(j+1) & (~1) (see the cephes sources) */
589
+ emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
590
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
591
+ y = _mm_cvtepi32_ps(emm2);
592
+
593
+ emm4 = emm2;
594
+
595
+ /* get the swap sign flag for the sine */
596
+ emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
597
+ emm0 = _mm_slli_epi32(emm0, 29);
598
+ v4sf swap_sign_bit_sin = _mm_castsi128_ps(emm0);
599
+
600
+ /* get the polynom selection mask for the sine*/
601
+ emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
602
+ emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
603
+ v4sf poly_mask = _mm_castsi128_ps(emm2);
604
+ #else
605
+ /* store the integer part of y in mm2:mm3 */
606
+ xmm3 = _mm_movehl_ps(xmm3, y);
607
+ mm2 = _mm_cvttps_pi32(y);
608
+ mm3 = _mm_cvttps_pi32(xmm3);
609
+
610
+ /* j=(j+1) & (~1) (see the cephes sources) */
611
+ mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
612
+ mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
613
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
614
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
615
+
616
+ y = _mm_cvtpi32x2_ps(mm2, mm3);
617
+
618
+ mm4 = mm2;
619
+ mm5 = mm3;
620
+
621
+ /* get the swap sign flag for the sine */
622
+ mm0 = _mm_and_si64(mm2, *(v2si*)_pi32_4);
623
+ mm1 = _mm_and_si64(mm3, *(v2si*)_pi32_4);
624
+ mm0 = _mm_slli_pi32(mm0, 29);
625
+ mm1 = _mm_slli_pi32(mm1, 29);
626
+ v4sf swap_sign_bit_sin;
627
+ COPY_MM_TO_XMM(mm0, mm1, swap_sign_bit_sin);
628
+
629
+ /* get the polynom selection mask for the sine */
630
+
631
+ mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
632
+ mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
633
+ mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
634
+ mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
635
+ v4sf poly_mask;
636
+ COPY_MM_TO_XMM(mm2, mm3, poly_mask);
637
+ #endif
638
+
639
+ /* The magic pass: "Extended precision modular arithmetic"
640
+ x = ((x - y * DP1) - y * DP2) - y * DP3; */
641
+ xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
642
+ xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
643
+ xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
644
+ xmm1 = _mm_mul_ps(y, xmm1);
645
+ xmm2 = _mm_mul_ps(y, xmm2);
646
+ xmm3 = _mm_mul_ps(y, xmm3);
647
+ x = _mm_add_ps(x, xmm1);
648
+ x = _mm_add_ps(x, xmm2);
649
+ x = _mm_add_ps(x, xmm3);
650
+
651
+ #ifdef __SSE2__
652
+ emm4 = _mm_sub_epi32(emm4, *(v4si*)_pi32_2);
653
+ emm4 = _mm_andnot_si128(emm4, *(v4si*)_pi32_4);
654
+ emm4 = _mm_slli_epi32(emm4, 29);
655
+ v4sf sign_bit_cos = _mm_castsi128_ps(emm4);
656
+ #else
657
+ /* get the sign flag for the cosine */
658
+ mm4 = _mm_sub_pi32(mm4, *(v2si*)_pi32_2);
659
+ mm5 = _mm_sub_pi32(mm5, *(v2si*)_pi32_2);
660
+ mm4 = _mm_andnot_si64(mm4, *(v2si*)_pi32_4);
661
+ mm5 = _mm_andnot_si64(mm5, *(v2si*)_pi32_4);
662
+ mm4 = _mm_slli_pi32(mm4, 29);
663
+ mm5 = _mm_slli_pi32(mm5, 29);
664
+ v4sf sign_bit_cos;
665
+ COPY_MM_TO_XMM(mm4, mm5, sign_bit_cos);
666
+ _mm_empty(); /* good-bye mmx */
667
+ #endif
668
+
669
+ sign_bit_sin = _mm_xor_ps(sign_bit_sin, swap_sign_bit_sin);
670
+
671
+
672
+ /* Evaluate the first polynom (0 <= x <= Pi/4) */
673
+ v4sf z = _mm_mul_ps(x, x);
674
+ y = *(v4sf*)_ps_coscof_p0;
675
+
676
+ y = _mm_mul_ps(y, z);
677
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
678
+ y = _mm_mul_ps(y, z);
679
+ y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
680
+ y = _mm_mul_ps(y, z);
681
+ y = _mm_mul_ps(y, z);
682
+ v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
683
+ y = _mm_sub_ps(y, tmp);
684
+ y = _mm_add_ps(y, *(v4sf*)_ps_1);
685
+
686
+ /* Evaluate the second polynom (Pi/4 <= x <= 0) */
687
+
688
+ v4sf y2 = *(v4sf*)_ps_sincof_p0;
689
+ y2 = _mm_mul_ps(y2, z);
690
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
691
+ y2 = _mm_mul_ps(y2, z);
692
+ y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
693
+ y2 = _mm_mul_ps(y2, z);
694
+ y2 = _mm_mul_ps(y2, x);
695
+ y2 = _mm_add_ps(y2, x);
696
+
697
+ /* select the correct result from the two polynoms */
698
+ xmm3 = poly_mask;
699
+ v4sf ysin2 = _mm_and_ps(xmm3, y2);
700
+ v4sf ysin1 = _mm_andnot_ps(xmm3, y);
701
+ y2 = _mm_sub_ps(y2, ysin2);
702
+ y = _mm_sub_ps(y, ysin1);
703
+
704
+ xmm1 = _mm_add_ps(ysin1, ysin2);
705
+ xmm2 = _mm_add_ps(y, y2);
706
+
707
+ /* update the sign */
708
+ *s = _mm_xor_ps(xmm1, sign_bit_sin);
709
+ *c = _mm_xor_ps(xmm2, sign_bit_cos);
710
+ }