numpy 2.4.1__cp314-cp314t-win_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (932) hide show
  1. numpy/__config__.py +170 -0
  2. numpy/__config__.pyi +108 -0
  3. numpy/__init__.cython-30.pxd +1242 -0
  4. numpy/__init__.pxd +1155 -0
  5. numpy/__init__.py +955 -0
  6. numpy/__init__.pyi +6202 -0
  7. numpy/_array_api_info.py +346 -0
  8. numpy/_array_api_info.pyi +206 -0
  9. numpy/_configtool.py +39 -0
  10. numpy/_configtool.pyi +1 -0
  11. numpy/_core/__init__.py +201 -0
  12. numpy/_core/__init__.pyi +666 -0
  13. numpy/_core/_add_newdocs.py +7151 -0
  14. numpy/_core/_add_newdocs.pyi +2 -0
  15. numpy/_core/_add_newdocs_scalars.py +381 -0
  16. numpy/_core/_add_newdocs_scalars.pyi +16 -0
  17. numpy/_core/_asarray.py +130 -0
  18. numpy/_core/_asarray.pyi +43 -0
  19. numpy/_core/_dtype.py +366 -0
  20. numpy/_core/_dtype.pyi +56 -0
  21. numpy/_core/_dtype_ctypes.py +120 -0
  22. numpy/_core/_dtype_ctypes.pyi +83 -0
  23. numpy/_core/_exceptions.py +162 -0
  24. numpy/_core/_exceptions.pyi +54 -0
  25. numpy/_core/_internal.py +968 -0
  26. numpy/_core/_internal.pyi +61 -0
  27. numpy/_core/_methods.py +252 -0
  28. numpy/_core/_methods.pyi +22 -0
  29. numpy/_core/_multiarray_tests.cp314t-win_arm64.lib +0 -0
  30. numpy/_core/_multiarray_tests.cp314t-win_arm64.pyd +0 -0
  31. numpy/_core/_multiarray_umath.cp314t-win_arm64.lib +0 -0
  32. numpy/_core/_multiarray_umath.cp314t-win_arm64.pyd +0 -0
  33. numpy/_core/_operand_flag_tests.cp314t-win_arm64.lib +0 -0
  34. numpy/_core/_operand_flag_tests.cp314t-win_arm64.pyd +0 -0
  35. numpy/_core/_rational_tests.cp314t-win_arm64.lib +0 -0
  36. numpy/_core/_rational_tests.cp314t-win_arm64.pyd +0 -0
  37. numpy/_core/_simd.cp314t-win_arm64.lib +0 -0
  38. numpy/_core/_simd.cp314t-win_arm64.pyd +0 -0
  39. numpy/_core/_simd.pyi +35 -0
  40. numpy/_core/_string_helpers.py +100 -0
  41. numpy/_core/_string_helpers.pyi +12 -0
  42. numpy/_core/_struct_ufunc_tests.cp314t-win_arm64.lib +0 -0
  43. numpy/_core/_struct_ufunc_tests.cp314t-win_arm64.pyd +0 -0
  44. numpy/_core/_type_aliases.py +131 -0
  45. numpy/_core/_type_aliases.pyi +86 -0
  46. numpy/_core/_ufunc_config.py +515 -0
  47. numpy/_core/_ufunc_config.pyi +69 -0
  48. numpy/_core/_umath_tests.cp314t-win_arm64.lib +0 -0
  49. numpy/_core/_umath_tests.cp314t-win_arm64.pyd +0 -0
  50. numpy/_core/_umath_tests.pyi +47 -0
  51. numpy/_core/arrayprint.py +1779 -0
  52. numpy/_core/arrayprint.pyi +158 -0
  53. numpy/_core/cversions.py +13 -0
  54. numpy/_core/defchararray.py +1414 -0
  55. numpy/_core/defchararray.pyi +1150 -0
  56. numpy/_core/einsumfunc.py +1650 -0
  57. numpy/_core/einsumfunc.pyi +184 -0
  58. numpy/_core/fromnumeric.py +4233 -0
  59. numpy/_core/fromnumeric.pyi +1735 -0
  60. numpy/_core/function_base.py +547 -0
  61. numpy/_core/function_base.pyi +276 -0
  62. numpy/_core/getlimits.py +462 -0
  63. numpy/_core/getlimits.pyi +124 -0
  64. numpy/_core/include/numpy/__multiarray_api.c +376 -0
  65. numpy/_core/include/numpy/__multiarray_api.h +1628 -0
  66. numpy/_core/include/numpy/__ufunc_api.c +55 -0
  67. numpy/_core/include/numpy/__ufunc_api.h +349 -0
  68. numpy/_core/include/numpy/_neighborhood_iterator_imp.h +90 -0
  69. numpy/_core/include/numpy/_numpyconfig.h +33 -0
  70. numpy/_core/include/numpy/_public_dtype_api_table.h +86 -0
  71. numpy/_core/include/numpy/arrayobject.h +7 -0
  72. numpy/_core/include/numpy/arrayscalars.h +198 -0
  73. numpy/_core/include/numpy/dtype_api.h +547 -0
  74. numpy/_core/include/numpy/halffloat.h +70 -0
  75. numpy/_core/include/numpy/ndarrayobject.h +304 -0
  76. numpy/_core/include/numpy/ndarraytypes.h +1982 -0
  77. numpy/_core/include/numpy/npy_2_compat.h +249 -0
  78. numpy/_core/include/numpy/npy_2_complexcompat.h +28 -0
  79. numpy/_core/include/numpy/npy_3kcompat.h +374 -0
  80. numpy/_core/include/numpy/npy_common.h +989 -0
  81. numpy/_core/include/numpy/npy_cpu.h +126 -0
  82. numpy/_core/include/numpy/npy_endian.h +79 -0
  83. numpy/_core/include/numpy/npy_math.h +602 -0
  84. numpy/_core/include/numpy/npy_no_deprecated_api.h +20 -0
  85. numpy/_core/include/numpy/npy_os.h +42 -0
  86. numpy/_core/include/numpy/numpyconfig.h +185 -0
  87. numpy/_core/include/numpy/random/LICENSE.txt +21 -0
  88. numpy/_core/include/numpy/random/bitgen.h +20 -0
  89. numpy/_core/include/numpy/random/distributions.h +209 -0
  90. numpy/_core/include/numpy/random/libdivide.h +2079 -0
  91. numpy/_core/include/numpy/ufuncobject.h +343 -0
  92. numpy/_core/include/numpy/utils.h +37 -0
  93. numpy/_core/lib/npy-pkg-config/mlib.ini +12 -0
  94. numpy/_core/lib/npy-pkg-config/npymath.ini +20 -0
  95. numpy/_core/lib/npymath.lib +0 -0
  96. numpy/_core/lib/pkgconfig/numpy.pc +7 -0
  97. numpy/_core/memmap.py +363 -0
  98. numpy/_core/memmap.pyi +3 -0
  99. numpy/_core/multiarray.py +1740 -0
  100. numpy/_core/multiarray.pyi +1316 -0
  101. numpy/_core/numeric.py +2758 -0
  102. numpy/_core/numeric.pyi +1276 -0
  103. numpy/_core/numerictypes.py +633 -0
  104. numpy/_core/numerictypes.pyi +196 -0
  105. numpy/_core/overrides.py +188 -0
  106. numpy/_core/overrides.pyi +47 -0
  107. numpy/_core/printoptions.py +32 -0
  108. numpy/_core/printoptions.pyi +28 -0
  109. numpy/_core/records.py +1088 -0
  110. numpy/_core/records.pyi +340 -0
  111. numpy/_core/shape_base.py +996 -0
  112. numpy/_core/shape_base.pyi +182 -0
  113. numpy/_core/strings.py +1813 -0
  114. numpy/_core/strings.pyi +536 -0
  115. numpy/_core/tests/_locales.py +72 -0
  116. numpy/_core/tests/_natype.py +144 -0
  117. numpy/_core/tests/data/astype_copy.pkl +0 -0
  118. numpy/_core/tests/data/generate_umath_validation_data.cpp +170 -0
  119. numpy/_core/tests/data/recarray_from_file.fits +0 -0
  120. numpy/_core/tests/data/umath-validation-set-README.txt +15 -0
  121. numpy/_core/tests/data/umath-validation-set-arccos.csv +1429 -0
  122. numpy/_core/tests/data/umath-validation-set-arccosh.csv +1429 -0
  123. numpy/_core/tests/data/umath-validation-set-arcsin.csv +1429 -0
  124. numpy/_core/tests/data/umath-validation-set-arcsinh.csv +1429 -0
  125. numpy/_core/tests/data/umath-validation-set-arctan.csv +1429 -0
  126. numpy/_core/tests/data/umath-validation-set-arctanh.csv +1429 -0
  127. numpy/_core/tests/data/umath-validation-set-cbrt.csv +1429 -0
  128. numpy/_core/tests/data/umath-validation-set-cos.csv +1375 -0
  129. numpy/_core/tests/data/umath-validation-set-cosh.csv +1429 -0
  130. numpy/_core/tests/data/umath-validation-set-exp.csv +412 -0
  131. numpy/_core/tests/data/umath-validation-set-exp2.csv +1429 -0
  132. numpy/_core/tests/data/umath-validation-set-expm1.csv +1429 -0
  133. numpy/_core/tests/data/umath-validation-set-log.csv +271 -0
  134. numpy/_core/tests/data/umath-validation-set-log10.csv +1629 -0
  135. numpy/_core/tests/data/umath-validation-set-log1p.csv +1429 -0
  136. numpy/_core/tests/data/umath-validation-set-log2.csv +1629 -0
  137. numpy/_core/tests/data/umath-validation-set-sin.csv +1370 -0
  138. numpy/_core/tests/data/umath-validation-set-sinh.csv +1429 -0
  139. numpy/_core/tests/data/umath-validation-set-tan.csv +1429 -0
  140. numpy/_core/tests/data/umath-validation-set-tanh.csv +1429 -0
  141. numpy/_core/tests/examples/cython/checks.pyx +373 -0
  142. numpy/_core/tests/examples/cython/meson.build +43 -0
  143. numpy/_core/tests/examples/cython/setup.py +39 -0
  144. numpy/_core/tests/examples/limited_api/limited_api1.c +17 -0
  145. numpy/_core/tests/examples/limited_api/limited_api2.pyx +11 -0
  146. numpy/_core/tests/examples/limited_api/limited_api_latest.c +19 -0
  147. numpy/_core/tests/examples/limited_api/meson.build +59 -0
  148. numpy/_core/tests/examples/limited_api/setup.py +24 -0
  149. numpy/_core/tests/test__exceptions.py +90 -0
  150. numpy/_core/tests/test_abc.py +54 -0
  151. numpy/_core/tests/test_api.py +655 -0
  152. numpy/_core/tests/test_argparse.py +90 -0
  153. numpy/_core/tests/test_array_api_info.py +113 -0
  154. numpy/_core/tests/test_array_coercion.py +928 -0
  155. numpy/_core/tests/test_array_interface.py +222 -0
  156. numpy/_core/tests/test_arraymethod.py +84 -0
  157. numpy/_core/tests/test_arrayobject.py +75 -0
  158. numpy/_core/tests/test_arrayprint.py +1324 -0
  159. numpy/_core/tests/test_casting_floatingpoint_errors.py +154 -0
  160. numpy/_core/tests/test_casting_unittests.py +955 -0
  161. numpy/_core/tests/test_conversion_utils.py +209 -0
  162. numpy/_core/tests/test_cpu_dispatcher.py +48 -0
  163. numpy/_core/tests/test_cpu_features.py +450 -0
  164. numpy/_core/tests/test_custom_dtypes.py +393 -0
  165. numpy/_core/tests/test_cython.py +352 -0
  166. numpy/_core/tests/test_datetime.py +2792 -0
  167. numpy/_core/tests/test_defchararray.py +858 -0
  168. numpy/_core/tests/test_deprecations.py +460 -0
  169. numpy/_core/tests/test_dlpack.py +190 -0
  170. numpy/_core/tests/test_dtype.py +2110 -0
  171. numpy/_core/tests/test_einsum.py +1351 -0
  172. numpy/_core/tests/test_errstate.py +131 -0
  173. numpy/_core/tests/test_extint128.py +217 -0
  174. numpy/_core/tests/test_finfo.py +86 -0
  175. numpy/_core/tests/test_function_base.py +504 -0
  176. numpy/_core/tests/test_getlimits.py +171 -0
  177. numpy/_core/tests/test_half.py +593 -0
  178. numpy/_core/tests/test_hashtable.py +36 -0
  179. numpy/_core/tests/test_indexerrors.py +122 -0
  180. numpy/_core/tests/test_indexing.py +1692 -0
  181. numpy/_core/tests/test_item_selection.py +167 -0
  182. numpy/_core/tests/test_limited_api.py +102 -0
  183. numpy/_core/tests/test_longdouble.py +370 -0
  184. numpy/_core/tests/test_mem_overlap.py +933 -0
  185. numpy/_core/tests/test_mem_policy.py +453 -0
  186. numpy/_core/tests/test_memmap.py +248 -0
  187. numpy/_core/tests/test_multiarray.py +11008 -0
  188. numpy/_core/tests/test_multiprocessing.py +55 -0
  189. numpy/_core/tests/test_multithreading.py +377 -0
  190. numpy/_core/tests/test_nditer.py +3533 -0
  191. numpy/_core/tests/test_nep50_promotions.py +287 -0
  192. numpy/_core/tests/test_numeric.py +4295 -0
  193. numpy/_core/tests/test_numerictypes.py +650 -0
  194. numpy/_core/tests/test_overrides.py +800 -0
  195. numpy/_core/tests/test_print.py +202 -0
  196. numpy/_core/tests/test_protocols.py +46 -0
  197. numpy/_core/tests/test_records.py +544 -0
  198. numpy/_core/tests/test_regression.py +2677 -0
  199. numpy/_core/tests/test_scalar_ctors.py +203 -0
  200. numpy/_core/tests/test_scalar_methods.py +328 -0
  201. numpy/_core/tests/test_scalarbuffer.py +153 -0
  202. numpy/_core/tests/test_scalarinherit.py +105 -0
  203. numpy/_core/tests/test_scalarmath.py +1168 -0
  204. numpy/_core/tests/test_scalarprint.py +403 -0
  205. numpy/_core/tests/test_shape_base.py +904 -0
  206. numpy/_core/tests/test_simd.py +1345 -0
  207. numpy/_core/tests/test_simd_module.py +105 -0
  208. numpy/_core/tests/test_stringdtype.py +1855 -0
  209. numpy/_core/tests/test_strings.py +1523 -0
  210. numpy/_core/tests/test_ufunc.py +3405 -0
  211. numpy/_core/tests/test_umath.py +4962 -0
  212. numpy/_core/tests/test_umath_accuracy.py +132 -0
  213. numpy/_core/tests/test_umath_complex.py +631 -0
  214. numpy/_core/tests/test_unicode.py +369 -0
  215. numpy/_core/umath.py +60 -0
  216. numpy/_core/umath.pyi +232 -0
  217. numpy/_distributor_init.py +15 -0
  218. numpy/_distributor_init.pyi +1 -0
  219. numpy/_expired_attrs_2_0.py +78 -0
  220. numpy/_expired_attrs_2_0.pyi +61 -0
  221. numpy/_globals.py +121 -0
  222. numpy/_globals.pyi +17 -0
  223. numpy/_pyinstaller/__init__.py +0 -0
  224. numpy/_pyinstaller/__init__.pyi +0 -0
  225. numpy/_pyinstaller/hook-numpy.py +36 -0
  226. numpy/_pyinstaller/hook-numpy.pyi +6 -0
  227. numpy/_pyinstaller/tests/__init__.py +16 -0
  228. numpy/_pyinstaller/tests/pyinstaller-smoke.py +32 -0
  229. numpy/_pyinstaller/tests/test_pyinstaller.py +35 -0
  230. numpy/_pytesttester.py +201 -0
  231. numpy/_pytesttester.pyi +18 -0
  232. numpy/_typing/__init__.py +173 -0
  233. numpy/_typing/_add_docstring.py +153 -0
  234. numpy/_typing/_array_like.py +106 -0
  235. numpy/_typing/_char_codes.py +213 -0
  236. numpy/_typing/_dtype_like.py +114 -0
  237. numpy/_typing/_extended_precision.py +15 -0
  238. numpy/_typing/_nbit.py +19 -0
  239. numpy/_typing/_nbit_base.py +94 -0
  240. numpy/_typing/_nbit_base.pyi +39 -0
  241. numpy/_typing/_nested_sequence.py +79 -0
  242. numpy/_typing/_scalars.py +20 -0
  243. numpy/_typing/_shape.py +8 -0
  244. numpy/_typing/_ufunc.py +7 -0
  245. numpy/_typing/_ufunc.pyi +975 -0
  246. numpy/_utils/__init__.py +95 -0
  247. numpy/_utils/__init__.pyi +28 -0
  248. numpy/_utils/_convertions.py +18 -0
  249. numpy/_utils/_convertions.pyi +4 -0
  250. numpy/_utils/_inspect.py +192 -0
  251. numpy/_utils/_inspect.pyi +70 -0
  252. numpy/_utils/_pep440.py +486 -0
  253. numpy/_utils/_pep440.pyi +118 -0
  254. numpy/char/__init__.py +2 -0
  255. numpy/char/__init__.pyi +111 -0
  256. numpy/conftest.py +248 -0
  257. numpy/core/__init__.py +33 -0
  258. numpy/core/__init__.pyi +0 -0
  259. numpy/core/_dtype.py +10 -0
  260. numpy/core/_dtype.pyi +0 -0
  261. numpy/core/_dtype_ctypes.py +10 -0
  262. numpy/core/_dtype_ctypes.pyi +0 -0
  263. numpy/core/_internal.py +27 -0
  264. numpy/core/_multiarray_umath.py +57 -0
  265. numpy/core/_utils.py +21 -0
  266. numpy/core/arrayprint.py +10 -0
  267. numpy/core/defchararray.py +10 -0
  268. numpy/core/einsumfunc.py +10 -0
  269. numpy/core/fromnumeric.py +10 -0
  270. numpy/core/function_base.py +10 -0
  271. numpy/core/getlimits.py +10 -0
  272. numpy/core/multiarray.py +25 -0
  273. numpy/core/numeric.py +12 -0
  274. numpy/core/numerictypes.py +10 -0
  275. numpy/core/overrides.py +10 -0
  276. numpy/core/overrides.pyi +7 -0
  277. numpy/core/records.py +10 -0
  278. numpy/core/shape_base.py +10 -0
  279. numpy/core/umath.py +10 -0
  280. numpy/ctypeslib/__init__.py +13 -0
  281. numpy/ctypeslib/__init__.pyi +15 -0
  282. numpy/ctypeslib/_ctypeslib.py +603 -0
  283. numpy/ctypeslib/_ctypeslib.pyi +236 -0
  284. numpy/doc/ufuncs.py +138 -0
  285. numpy/dtypes.py +41 -0
  286. numpy/dtypes.pyi +630 -0
  287. numpy/exceptions.py +246 -0
  288. numpy/exceptions.pyi +27 -0
  289. numpy/f2py/__init__.py +86 -0
  290. numpy/f2py/__init__.pyi +5 -0
  291. numpy/f2py/__main__.py +5 -0
  292. numpy/f2py/__version__.py +1 -0
  293. numpy/f2py/__version__.pyi +1 -0
  294. numpy/f2py/_backends/__init__.py +9 -0
  295. numpy/f2py/_backends/__init__.pyi +5 -0
  296. numpy/f2py/_backends/_backend.py +44 -0
  297. numpy/f2py/_backends/_backend.pyi +46 -0
  298. numpy/f2py/_backends/_distutils.py +76 -0
  299. numpy/f2py/_backends/_distutils.pyi +13 -0
  300. numpy/f2py/_backends/_meson.py +244 -0
  301. numpy/f2py/_backends/_meson.pyi +62 -0
  302. numpy/f2py/_backends/meson.build.template +58 -0
  303. numpy/f2py/_isocbind.py +62 -0
  304. numpy/f2py/_isocbind.pyi +13 -0
  305. numpy/f2py/_src_pyf.py +247 -0
  306. numpy/f2py/_src_pyf.pyi +28 -0
  307. numpy/f2py/auxfuncs.py +1004 -0
  308. numpy/f2py/auxfuncs.pyi +262 -0
  309. numpy/f2py/capi_maps.py +811 -0
  310. numpy/f2py/capi_maps.pyi +33 -0
  311. numpy/f2py/cb_rules.py +665 -0
  312. numpy/f2py/cb_rules.pyi +17 -0
  313. numpy/f2py/cfuncs.py +1563 -0
  314. numpy/f2py/cfuncs.pyi +31 -0
  315. numpy/f2py/common_rules.py +143 -0
  316. numpy/f2py/common_rules.pyi +9 -0
  317. numpy/f2py/crackfortran.py +3725 -0
  318. numpy/f2py/crackfortran.pyi +266 -0
  319. numpy/f2py/diagnose.py +149 -0
  320. numpy/f2py/diagnose.pyi +1 -0
  321. numpy/f2py/f2py2e.py +788 -0
  322. numpy/f2py/f2py2e.pyi +74 -0
  323. numpy/f2py/f90mod_rules.py +269 -0
  324. numpy/f2py/f90mod_rules.pyi +16 -0
  325. numpy/f2py/func2subr.py +329 -0
  326. numpy/f2py/func2subr.pyi +7 -0
  327. numpy/f2py/rules.py +1629 -0
  328. numpy/f2py/rules.pyi +41 -0
  329. numpy/f2py/setup.cfg +3 -0
  330. numpy/f2py/src/fortranobject.c +1436 -0
  331. numpy/f2py/src/fortranobject.h +173 -0
  332. numpy/f2py/symbolic.py +1518 -0
  333. numpy/f2py/symbolic.pyi +219 -0
  334. numpy/f2py/tests/__init__.py +16 -0
  335. numpy/f2py/tests/src/abstract_interface/foo.f90 +34 -0
  336. numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 +6 -0
  337. numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +235 -0
  338. numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap +1 -0
  339. numpy/f2py/tests/src/assumed_shape/foo_free.f90 +34 -0
  340. numpy/f2py/tests/src/assumed_shape/foo_mod.f90 +41 -0
  341. numpy/f2py/tests/src/assumed_shape/foo_use.f90 +19 -0
  342. numpy/f2py/tests/src/assumed_shape/precision.f90 +4 -0
  343. numpy/f2py/tests/src/block_docstring/foo.f +6 -0
  344. numpy/f2py/tests/src/callback/foo.f +62 -0
  345. numpy/f2py/tests/src/callback/gh17797.f90 +7 -0
  346. numpy/f2py/tests/src/callback/gh18335.f90 +17 -0
  347. numpy/f2py/tests/src/callback/gh25211.f +10 -0
  348. numpy/f2py/tests/src/callback/gh25211.pyf +18 -0
  349. numpy/f2py/tests/src/callback/gh26681.f90 +18 -0
  350. numpy/f2py/tests/src/cli/gh_22819.pyf +6 -0
  351. numpy/f2py/tests/src/cli/hi77.f +3 -0
  352. numpy/f2py/tests/src/cli/hiworld.f90 +3 -0
  353. numpy/f2py/tests/src/common/block.f +11 -0
  354. numpy/f2py/tests/src/common/gh19161.f90 +10 -0
  355. numpy/f2py/tests/src/crackfortran/accesstype.f90 +13 -0
  356. numpy/f2py/tests/src/crackfortran/common_with_division.f +17 -0
  357. numpy/f2py/tests/src/crackfortran/data_common.f +8 -0
  358. numpy/f2py/tests/src/crackfortran/data_multiplier.f +5 -0
  359. numpy/f2py/tests/src/crackfortran/data_stmts.f90 +20 -0
  360. numpy/f2py/tests/src/crackfortran/data_with_comments.f +8 -0
  361. numpy/f2py/tests/src/crackfortran/foo_deps.f90 +6 -0
  362. numpy/f2py/tests/src/crackfortran/gh15035.f +16 -0
  363. numpy/f2py/tests/src/crackfortran/gh17859.f +12 -0
  364. numpy/f2py/tests/src/crackfortran/gh22648.pyf +7 -0
  365. numpy/f2py/tests/src/crackfortran/gh23533.f +5 -0
  366. numpy/f2py/tests/src/crackfortran/gh23598.f90 +4 -0
  367. numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 +11 -0
  368. numpy/f2py/tests/src/crackfortran/gh23879.f90 +20 -0
  369. numpy/f2py/tests/src/crackfortran/gh27697.f90 +12 -0
  370. numpy/f2py/tests/src/crackfortran/gh2848.f90 +13 -0
  371. numpy/f2py/tests/src/crackfortran/operators.f90 +49 -0
  372. numpy/f2py/tests/src/crackfortran/privatemod.f90 +11 -0
  373. numpy/f2py/tests/src/crackfortran/publicmod.f90 +10 -0
  374. numpy/f2py/tests/src/crackfortran/pubprivmod.f90 +10 -0
  375. numpy/f2py/tests/src/crackfortran/unicode_comment.f90 +4 -0
  376. numpy/f2py/tests/src/f2cmap/.f2py_f2cmap +1 -0
  377. numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 +9 -0
  378. numpy/f2py/tests/src/isocintrin/isoCtests.f90 +34 -0
  379. numpy/f2py/tests/src/kind/foo.f90 +20 -0
  380. numpy/f2py/tests/src/mixed/foo.f +5 -0
  381. numpy/f2py/tests/src/mixed/foo_fixed.f90 +8 -0
  382. numpy/f2py/tests/src/mixed/foo_free.f90 +8 -0
  383. numpy/f2py/tests/src/modules/gh25337/data.f90 +8 -0
  384. numpy/f2py/tests/src/modules/gh25337/use_data.f90 +6 -0
  385. numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 +21 -0
  386. numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 +21 -0
  387. numpy/f2py/tests/src/modules/module_data_docstring.f90 +12 -0
  388. numpy/f2py/tests/src/modules/use_modules.f90 +20 -0
  389. numpy/f2py/tests/src/negative_bounds/issue_20853.f90 +7 -0
  390. numpy/f2py/tests/src/parameter/constant_array.f90 +45 -0
  391. numpy/f2py/tests/src/parameter/constant_both.f90 +57 -0
  392. numpy/f2py/tests/src/parameter/constant_compound.f90 +15 -0
  393. numpy/f2py/tests/src/parameter/constant_integer.f90 +22 -0
  394. numpy/f2py/tests/src/parameter/constant_non_compound.f90 +23 -0
  395. numpy/f2py/tests/src/parameter/constant_real.f90 +23 -0
  396. numpy/f2py/tests/src/quoted_character/foo.f +14 -0
  397. numpy/f2py/tests/src/regression/AB.inc +1 -0
  398. numpy/f2py/tests/src/regression/assignOnlyModule.f90 +25 -0
  399. numpy/f2py/tests/src/regression/datonly.f90 +17 -0
  400. numpy/f2py/tests/src/regression/f77comments.f +26 -0
  401. numpy/f2py/tests/src/regression/f77fixedform.f95 +5 -0
  402. numpy/f2py/tests/src/regression/f90continuation.f90 +9 -0
  403. numpy/f2py/tests/src/regression/incfile.f90 +5 -0
  404. numpy/f2py/tests/src/regression/inout.f90 +9 -0
  405. numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 +5 -0
  406. numpy/f2py/tests/src/regression/mod_derived_types.f90 +23 -0
  407. numpy/f2py/tests/src/return_character/foo77.f +45 -0
  408. numpy/f2py/tests/src/return_character/foo90.f90 +48 -0
  409. numpy/f2py/tests/src/return_complex/foo77.f +45 -0
  410. numpy/f2py/tests/src/return_complex/foo90.f90 +48 -0
  411. numpy/f2py/tests/src/return_integer/foo77.f +56 -0
  412. numpy/f2py/tests/src/return_integer/foo90.f90 +59 -0
  413. numpy/f2py/tests/src/return_logical/foo77.f +56 -0
  414. numpy/f2py/tests/src/return_logical/foo90.f90 +59 -0
  415. numpy/f2py/tests/src/return_real/foo77.f +45 -0
  416. numpy/f2py/tests/src/return_real/foo90.f90 +48 -0
  417. numpy/f2py/tests/src/routines/funcfortranname.f +5 -0
  418. numpy/f2py/tests/src/routines/funcfortranname.pyf +11 -0
  419. numpy/f2py/tests/src/routines/subrout.f +4 -0
  420. numpy/f2py/tests/src/routines/subrout.pyf +10 -0
  421. numpy/f2py/tests/src/size/foo.f90 +44 -0
  422. numpy/f2py/tests/src/string/char.f90 +29 -0
  423. numpy/f2py/tests/src/string/fixed_string.f90 +34 -0
  424. numpy/f2py/tests/src/string/gh24008.f +8 -0
  425. numpy/f2py/tests/src/string/gh24662.f90 +7 -0
  426. numpy/f2py/tests/src/string/gh25286.f90 +14 -0
  427. numpy/f2py/tests/src/string/gh25286.pyf +12 -0
  428. numpy/f2py/tests/src/string/gh25286_bc.pyf +12 -0
  429. numpy/f2py/tests/src/string/scalar_string.f90 +9 -0
  430. numpy/f2py/tests/src/string/string.f +12 -0
  431. numpy/f2py/tests/src/value_attrspec/gh21665.f90 +9 -0
  432. numpy/f2py/tests/test_abstract_interface.py +26 -0
  433. numpy/f2py/tests/test_array_from_pyobj.py +678 -0
  434. numpy/f2py/tests/test_assumed_shape.py +50 -0
  435. numpy/f2py/tests/test_block_docstring.py +20 -0
  436. numpy/f2py/tests/test_callback.py +263 -0
  437. numpy/f2py/tests/test_character.py +641 -0
  438. numpy/f2py/tests/test_common.py +23 -0
  439. numpy/f2py/tests/test_crackfortran.py +421 -0
  440. numpy/f2py/tests/test_data.py +71 -0
  441. numpy/f2py/tests/test_docs.py +66 -0
  442. numpy/f2py/tests/test_f2cmap.py +17 -0
  443. numpy/f2py/tests/test_f2py2e.py +983 -0
  444. numpy/f2py/tests/test_isoc.py +56 -0
  445. numpy/f2py/tests/test_kind.py +52 -0
  446. numpy/f2py/tests/test_mixed.py +35 -0
  447. numpy/f2py/tests/test_modules.py +83 -0
  448. numpy/f2py/tests/test_parameter.py +129 -0
  449. numpy/f2py/tests/test_pyf_src.py +43 -0
  450. numpy/f2py/tests/test_quoted_character.py +18 -0
  451. numpy/f2py/tests/test_regression.py +187 -0
  452. numpy/f2py/tests/test_return_character.py +48 -0
  453. numpy/f2py/tests/test_return_complex.py +67 -0
  454. numpy/f2py/tests/test_return_integer.py +55 -0
  455. numpy/f2py/tests/test_return_logical.py +65 -0
  456. numpy/f2py/tests/test_return_real.py +109 -0
  457. numpy/f2py/tests/test_routines.py +29 -0
  458. numpy/f2py/tests/test_semicolon_split.py +75 -0
  459. numpy/f2py/tests/test_size.py +45 -0
  460. numpy/f2py/tests/test_string.py +100 -0
  461. numpy/f2py/tests/test_symbolic.py +500 -0
  462. numpy/f2py/tests/test_value_attrspec.py +15 -0
  463. numpy/f2py/tests/util.py +442 -0
  464. numpy/f2py/use_rules.py +99 -0
  465. numpy/f2py/use_rules.pyi +9 -0
  466. numpy/fft/__init__.py +213 -0
  467. numpy/fft/__init__.pyi +38 -0
  468. numpy/fft/_helper.py +235 -0
  469. numpy/fft/_helper.pyi +44 -0
  470. numpy/fft/_pocketfft.py +1693 -0
  471. numpy/fft/_pocketfft.pyi +137 -0
  472. numpy/fft/_pocketfft_umath.cp314t-win_arm64.lib +0 -0
  473. numpy/fft/_pocketfft_umath.cp314t-win_arm64.pyd +0 -0
  474. numpy/fft/tests/__init__.py +0 -0
  475. numpy/fft/tests/test_helper.py +167 -0
  476. numpy/fft/tests/test_pocketfft.py +589 -0
  477. numpy/lib/__init__.py +97 -0
  478. numpy/lib/__init__.pyi +52 -0
  479. numpy/lib/_array_utils_impl.py +62 -0
  480. numpy/lib/_array_utils_impl.pyi +10 -0
  481. numpy/lib/_arraypad_impl.py +926 -0
  482. numpy/lib/_arraypad_impl.pyi +88 -0
  483. numpy/lib/_arraysetops_impl.py +1158 -0
  484. numpy/lib/_arraysetops_impl.pyi +462 -0
  485. numpy/lib/_arrayterator_impl.py +224 -0
  486. numpy/lib/_arrayterator_impl.pyi +45 -0
  487. numpy/lib/_datasource.py +700 -0
  488. numpy/lib/_datasource.pyi +30 -0
  489. numpy/lib/_format_impl.py +1036 -0
  490. numpy/lib/_format_impl.pyi +56 -0
  491. numpy/lib/_function_base_impl.py +5760 -0
  492. numpy/lib/_function_base_impl.pyi +2324 -0
  493. numpy/lib/_histograms_impl.py +1085 -0
  494. numpy/lib/_histograms_impl.pyi +40 -0
  495. numpy/lib/_index_tricks_impl.py +1048 -0
  496. numpy/lib/_index_tricks_impl.pyi +267 -0
  497. numpy/lib/_iotools.py +900 -0
  498. numpy/lib/_iotools.pyi +116 -0
  499. numpy/lib/_nanfunctions_impl.py +2006 -0
  500. numpy/lib/_nanfunctions_impl.pyi +48 -0
  501. numpy/lib/_npyio_impl.py +2583 -0
  502. numpy/lib/_npyio_impl.pyi +299 -0
  503. numpy/lib/_polynomial_impl.py +1465 -0
  504. numpy/lib/_polynomial_impl.pyi +338 -0
  505. numpy/lib/_scimath_impl.py +642 -0
  506. numpy/lib/_scimath_impl.pyi +93 -0
  507. numpy/lib/_shape_base_impl.py +1289 -0
  508. numpy/lib/_shape_base_impl.pyi +236 -0
  509. numpy/lib/_stride_tricks_impl.py +582 -0
  510. numpy/lib/_stride_tricks_impl.pyi +73 -0
  511. numpy/lib/_twodim_base_impl.py +1201 -0
  512. numpy/lib/_twodim_base_impl.pyi +408 -0
  513. numpy/lib/_type_check_impl.py +710 -0
  514. numpy/lib/_type_check_impl.pyi +348 -0
  515. numpy/lib/_ufunclike_impl.py +199 -0
  516. numpy/lib/_ufunclike_impl.pyi +60 -0
  517. numpy/lib/_user_array_impl.py +310 -0
  518. numpy/lib/_user_array_impl.pyi +226 -0
  519. numpy/lib/_utils_impl.py +784 -0
  520. numpy/lib/_utils_impl.pyi +22 -0
  521. numpy/lib/_version.py +153 -0
  522. numpy/lib/_version.pyi +17 -0
  523. numpy/lib/array_utils.py +7 -0
  524. numpy/lib/array_utils.pyi +6 -0
  525. numpy/lib/format.py +24 -0
  526. numpy/lib/format.pyi +24 -0
  527. numpy/lib/introspect.py +94 -0
  528. numpy/lib/introspect.pyi +3 -0
  529. numpy/lib/mixins.py +180 -0
  530. numpy/lib/mixins.pyi +78 -0
  531. numpy/lib/npyio.py +1 -0
  532. numpy/lib/npyio.pyi +5 -0
  533. numpy/lib/recfunctions.py +1681 -0
  534. numpy/lib/recfunctions.pyi +444 -0
  535. numpy/lib/scimath.py +13 -0
  536. numpy/lib/scimath.pyi +12 -0
  537. numpy/lib/stride_tricks.py +1 -0
  538. numpy/lib/stride_tricks.pyi +4 -0
  539. numpy/lib/tests/__init__.py +0 -0
  540. numpy/lib/tests/data/py2-np0-objarr.npy +0 -0
  541. numpy/lib/tests/data/py2-objarr.npy +0 -0
  542. numpy/lib/tests/data/py2-objarr.npz +0 -0
  543. numpy/lib/tests/data/py3-objarr.npy +0 -0
  544. numpy/lib/tests/data/py3-objarr.npz +0 -0
  545. numpy/lib/tests/data/python3.npy +0 -0
  546. numpy/lib/tests/data/win64python2.npy +0 -0
  547. numpy/lib/tests/test__datasource.py +328 -0
  548. numpy/lib/tests/test__iotools.py +358 -0
  549. numpy/lib/tests/test__version.py +64 -0
  550. numpy/lib/tests/test_array_utils.py +32 -0
  551. numpy/lib/tests/test_arraypad.py +1427 -0
  552. numpy/lib/tests/test_arraysetops.py +1302 -0
  553. numpy/lib/tests/test_arrayterator.py +45 -0
  554. numpy/lib/tests/test_format.py +1054 -0
  555. numpy/lib/tests/test_function_base.py +4750 -0
  556. numpy/lib/tests/test_histograms.py +855 -0
  557. numpy/lib/tests/test_index_tricks.py +693 -0
  558. numpy/lib/tests/test_io.py +2857 -0
  559. numpy/lib/tests/test_loadtxt.py +1099 -0
  560. numpy/lib/tests/test_mixins.py +215 -0
  561. numpy/lib/tests/test_nanfunctions.py +1438 -0
  562. numpy/lib/tests/test_packbits.py +376 -0
  563. numpy/lib/tests/test_polynomial.py +325 -0
  564. numpy/lib/tests/test_recfunctions.py +1042 -0
  565. numpy/lib/tests/test_regression.py +231 -0
  566. numpy/lib/tests/test_shape_base.py +813 -0
  567. numpy/lib/tests/test_stride_tricks.py +655 -0
  568. numpy/lib/tests/test_twodim_base.py +559 -0
  569. numpy/lib/tests/test_type_check.py +473 -0
  570. numpy/lib/tests/test_ufunclike.py +97 -0
  571. numpy/lib/tests/test_utils.py +80 -0
  572. numpy/lib/user_array.py +1 -0
  573. numpy/lib/user_array.pyi +1 -0
  574. numpy/linalg/__init__.py +95 -0
  575. numpy/linalg/__init__.pyi +71 -0
  576. numpy/linalg/_linalg.py +3657 -0
  577. numpy/linalg/_linalg.pyi +548 -0
  578. numpy/linalg/_umath_linalg.cp314t-win_arm64.lib +0 -0
  579. numpy/linalg/_umath_linalg.cp314t-win_arm64.pyd +0 -0
  580. numpy/linalg/_umath_linalg.pyi +60 -0
  581. numpy/linalg/lapack_lite.cp314t-win_arm64.lib +0 -0
  582. numpy/linalg/lapack_lite.cp314t-win_arm64.pyd +0 -0
  583. numpy/linalg/lapack_lite.pyi +143 -0
  584. numpy/linalg/tests/__init__.py +0 -0
  585. numpy/linalg/tests/test_deprecations.py +21 -0
  586. numpy/linalg/tests/test_linalg.py +2442 -0
  587. numpy/linalg/tests/test_regression.py +182 -0
  588. numpy/ma/API_CHANGES.txt +135 -0
  589. numpy/ma/LICENSE +24 -0
  590. numpy/ma/README.rst +236 -0
  591. numpy/ma/__init__.py +53 -0
  592. numpy/ma/__init__.pyi +458 -0
  593. numpy/ma/core.py +8929 -0
  594. numpy/ma/core.pyi +3720 -0
  595. numpy/ma/extras.py +2266 -0
  596. numpy/ma/extras.pyi +297 -0
  597. numpy/ma/mrecords.py +762 -0
  598. numpy/ma/mrecords.pyi +96 -0
  599. numpy/ma/tests/__init__.py +0 -0
  600. numpy/ma/tests/test_arrayobject.py +40 -0
  601. numpy/ma/tests/test_core.py +6008 -0
  602. numpy/ma/tests/test_deprecations.py +65 -0
  603. numpy/ma/tests/test_extras.py +1945 -0
  604. numpy/ma/tests/test_mrecords.py +495 -0
  605. numpy/ma/tests/test_old_ma.py +939 -0
  606. numpy/ma/tests/test_regression.py +83 -0
  607. numpy/ma/tests/test_subclassing.py +469 -0
  608. numpy/ma/testutils.py +294 -0
  609. numpy/ma/testutils.pyi +69 -0
  610. numpy/matlib.py +380 -0
  611. numpy/matlib.pyi +580 -0
  612. numpy/matrixlib/__init__.py +12 -0
  613. numpy/matrixlib/__init__.pyi +3 -0
  614. numpy/matrixlib/defmatrix.py +1119 -0
  615. numpy/matrixlib/defmatrix.pyi +218 -0
  616. numpy/matrixlib/tests/__init__.py +0 -0
  617. numpy/matrixlib/tests/test_defmatrix.py +455 -0
  618. numpy/matrixlib/tests/test_interaction.py +360 -0
  619. numpy/matrixlib/tests/test_masked_matrix.py +240 -0
  620. numpy/matrixlib/tests/test_matrix_linalg.py +110 -0
  621. numpy/matrixlib/tests/test_multiarray.py +17 -0
  622. numpy/matrixlib/tests/test_numeric.py +18 -0
  623. numpy/matrixlib/tests/test_regression.py +31 -0
  624. numpy/polynomial/__init__.py +187 -0
  625. numpy/polynomial/__init__.pyi +31 -0
  626. numpy/polynomial/_polybase.py +1191 -0
  627. numpy/polynomial/_polybase.pyi +262 -0
  628. numpy/polynomial/_polytypes.pyi +501 -0
  629. numpy/polynomial/chebyshev.py +2001 -0
  630. numpy/polynomial/chebyshev.pyi +180 -0
  631. numpy/polynomial/hermite.py +1738 -0
  632. numpy/polynomial/hermite.pyi +106 -0
  633. numpy/polynomial/hermite_e.py +1640 -0
  634. numpy/polynomial/hermite_e.pyi +106 -0
  635. numpy/polynomial/laguerre.py +1673 -0
  636. numpy/polynomial/laguerre.pyi +100 -0
  637. numpy/polynomial/legendre.py +1603 -0
  638. numpy/polynomial/legendre.pyi +100 -0
  639. numpy/polynomial/polynomial.py +1625 -0
  640. numpy/polynomial/polynomial.pyi +109 -0
  641. numpy/polynomial/polyutils.py +759 -0
  642. numpy/polynomial/polyutils.pyi +307 -0
  643. numpy/polynomial/tests/__init__.py +0 -0
  644. numpy/polynomial/tests/test_chebyshev.py +618 -0
  645. numpy/polynomial/tests/test_classes.py +613 -0
  646. numpy/polynomial/tests/test_hermite.py +553 -0
  647. numpy/polynomial/tests/test_hermite_e.py +554 -0
  648. numpy/polynomial/tests/test_laguerre.py +535 -0
  649. numpy/polynomial/tests/test_legendre.py +566 -0
  650. numpy/polynomial/tests/test_polynomial.py +691 -0
  651. numpy/polynomial/tests/test_polyutils.py +123 -0
  652. numpy/polynomial/tests/test_printing.py +557 -0
  653. numpy/polynomial/tests/test_symbol.py +217 -0
  654. numpy/py.typed +0 -0
  655. numpy/random/LICENSE.md +71 -0
  656. numpy/random/__init__.pxd +14 -0
  657. numpy/random/__init__.py +213 -0
  658. numpy/random/__init__.pyi +124 -0
  659. numpy/random/_bounded_integers.cp314t-win_arm64.lib +0 -0
  660. numpy/random/_bounded_integers.cp314t-win_arm64.pyd +0 -0
  661. numpy/random/_bounded_integers.pxd +38 -0
  662. numpy/random/_bounded_integers.pyi +1 -0
  663. numpy/random/_common.cp314t-win_arm64.lib +0 -0
  664. numpy/random/_common.cp314t-win_arm64.pyd +0 -0
  665. numpy/random/_common.pxd +110 -0
  666. numpy/random/_common.pyi +16 -0
  667. numpy/random/_examples/cffi/extending.py +44 -0
  668. numpy/random/_examples/cffi/parse.py +53 -0
  669. numpy/random/_examples/cython/extending.pyx +77 -0
  670. numpy/random/_examples/cython/extending_distributions.pyx +117 -0
  671. numpy/random/_examples/cython/meson.build +53 -0
  672. numpy/random/_examples/numba/extending.py +86 -0
  673. numpy/random/_examples/numba/extending_distributions.py +67 -0
  674. numpy/random/_generator.cp314t-win_arm64.lib +0 -0
  675. numpy/random/_generator.cp314t-win_arm64.pyd +0 -0
  676. numpy/random/_generator.pyi +862 -0
  677. numpy/random/_mt19937.cp314t-win_arm64.lib +0 -0
  678. numpy/random/_mt19937.cp314t-win_arm64.pyd +0 -0
  679. numpy/random/_mt19937.pyi +27 -0
  680. numpy/random/_pcg64.cp314t-win_arm64.lib +0 -0
  681. numpy/random/_pcg64.cp314t-win_arm64.pyd +0 -0
  682. numpy/random/_pcg64.pyi +41 -0
  683. numpy/random/_philox.cp314t-win_arm64.lib +0 -0
  684. numpy/random/_philox.cp314t-win_arm64.pyd +0 -0
  685. numpy/random/_philox.pyi +36 -0
  686. numpy/random/_pickle.py +88 -0
  687. numpy/random/_pickle.pyi +43 -0
  688. numpy/random/_sfc64.cp314t-win_arm64.lib +0 -0
  689. numpy/random/_sfc64.cp314t-win_arm64.pyd +0 -0
  690. numpy/random/_sfc64.pyi +25 -0
  691. numpy/random/bit_generator.cp314t-win_arm64.lib +0 -0
  692. numpy/random/bit_generator.cp314t-win_arm64.pyd +0 -0
  693. numpy/random/bit_generator.pxd +40 -0
  694. numpy/random/bit_generator.pyi +123 -0
  695. numpy/random/c_distributions.pxd +119 -0
  696. numpy/random/lib/npyrandom.lib +0 -0
  697. numpy/random/mtrand.cp314t-win_arm64.lib +0 -0
  698. numpy/random/mtrand.cp314t-win_arm64.pyd +0 -0
  699. numpy/random/mtrand.pyi +759 -0
  700. numpy/random/tests/__init__.py +0 -0
  701. numpy/random/tests/data/__init__.py +0 -0
  702. numpy/random/tests/data/generator_pcg64_np121.pkl.gz +0 -0
  703. numpy/random/tests/data/generator_pcg64_np126.pkl.gz +0 -0
  704. numpy/random/tests/data/mt19937-testset-1.csv +1001 -0
  705. numpy/random/tests/data/mt19937-testset-2.csv +1001 -0
  706. numpy/random/tests/data/pcg64-testset-1.csv +1001 -0
  707. numpy/random/tests/data/pcg64-testset-2.csv +1001 -0
  708. numpy/random/tests/data/pcg64dxsm-testset-1.csv +1001 -0
  709. numpy/random/tests/data/pcg64dxsm-testset-2.csv +1001 -0
  710. numpy/random/tests/data/philox-testset-1.csv +1001 -0
  711. numpy/random/tests/data/philox-testset-2.csv +1001 -0
  712. numpy/random/tests/data/sfc64-testset-1.csv +1001 -0
  713. numpy/random/tests/data/sfc64-testset-2.csv +1001 -0
  714. numpy/random/tests/data/sfc64_np126.pkl.gz +0 -0
  715. numpy/random/tests/test_direct.py +595 -0
  716. numpy/random/tests/test_extending.py +131 -0
  717. numpy/random/tests/test_generator_mt19937.py +2825 -0
  718. numpy/random/tests/test_generator_mt19937_regressions.py +221 -0
  719. numpy/random/tests/test_random.py +1724 -0
  720. numpy/random/tests/test_randomstate.py +2099 -0
  721. numpy/random/tests/test_randomstate_regression.py +213 -0
  722. numpy/random/tests/test_regression.py +175 -0
  723. numpy/random/tests/test_seed_sequence.py +79 -0
  724. numpy/random/tests/test_smoke.py +882 -0
  725. numpy/rec/__init__.py +2 -0
  726. numpy/rec/__init__.pyi +23 -0
  727. numpy/strings/__init__.py +2 -0
  728. numpy/strings/__init__.pyi +97 -0
  729. numpy/testing/__init__.py +22 -0
  730. numpy/testing/__init__.pyi +107 -0
  731. numpy/testing/_private/__init__.py +0 -0
  732. numpy/testing/_private/__init__.pyi +0 -0
  733. numpy/testing/_private/extbuild.py +250 -0
  734. numpy/testing/_private/extbuild.pyi +25 -0
  735. numpy/testing/_private/utils.py +2830 -0
  736. numpy/testing/_private/utils.pyi +505 -0
  737. numpy/testing/overrides.py +84 -0
  738. numpy/testing/overrides.pyi +10 -0
  739. numpy/testing/print_coercion_tables.py +207 -0
  740. numpy/testing/print_coercion_tables.pyi +26 -0
  741. numpy/testing/tests/__init__.py +0 -0
  742. numpy/testing/tests/test_utils.py +2123 -0
  743. numpy/tests/__init__.py +0 -0
  744. numpy/tests/test__all__.py +10 -0
  745. numpy/tests/test_configtool.py +51 -0
  746. numpy/tests/test_ctypeslib.py +383 -0
  747. numpy/tests/test_lazyloading.py +42 -0
  748. numpy/tests/test_matlib.py +59 -0
  749. numpy/tests/test_numpy_config.py +47 -0
  750. numpy/tests/test_numpy_version.py +54 -0
  751. numpy/tests/test_public_api.py +807 -0
  752. numpy/tests/test_reloading.py +76 -0
  753. numpy/tests/test_scripts.py +48 -0
  754. numpy/tests/test_warnings.py +79 -0
  755. numpy/typing/__init__.py +233 -0
  756. numpy/typing/__init__.pyi +3 -0
  757. numpy/typing/mypy_plugin.py +200 -0
  758. numpy/typing/tests/__init__.py +0 -0
  759. numpy/typing/tests/data/fail/arithmetic.pyi +126 -0
  760. numpy/typing/tests/data/fail/array_constructors.pyi +34 -0
  761. numpy/typing/tests/data/fail/array_like.pyi +15 -0
  762. numpy/typing/tests/data/fail/array_pad.pyi +6 -0
  763. numpy/typing/tests/data/fail/arrayprint.pyi +15 -0
  764. numpy/typing/tests/data/fail/arrayterator.pyi +14 -0
  765. numpy/typing/tests/data/fail/bitwise_ops.pyi +17 -0
  766. numpy/typing/tests/data/fail/char.pyi +63 -0
  767. numpy/typing/tests/data/fail/chararray.pyi +61 -0
  768. numpy/typing/tests/data/fail/comparisons.pyi +27 -0
  769. numpy/typing/tests/data/fail/constants.pyi +3 -0
  770. numpy/typing/tests/data/fail/datasource.pyi +16 -0
  771. numpy/typing/tests/data/fail/dtype.pyi +17 -0
  772. numpy/typing/tests/data/fail/einsumfunc.pyi +12 -0
  773. numpy/typing/tests/data/fail/flatiter.pyi +38 -0
  774. numpy/typing/tests/data/fail/fromnumeric.pyi +148 -0
  775. numpy/typing/tests/data/fail/histograms.pyi +12 -0
  776. numpy/typing/tests/data/fail/index_tricks.pyi +14 -0
  777. numpy/typing/tests/data/fail/lib_function_base.pyi +60 -0
  778. numpy/typing/tests/data/fail/lib_polynomial.pyi +29 -0
  779. numpy/typing/tests/data/fail/lib_utils.pyi +3 -0
  780. numpy/typing/tests/data/fail/lib_version.pyi +6 -0
  781. numpy/typing/tests/data/fail/linalg.pyi +52 -0
  782. numpy/typing/tests/data/fail/ma.pyi +155 -0
  783. numpy/typing/tests/data/fail/memmap.pyi +5 -0
  784. numpy/typing/tests/data/fail/modules.pyi +17 -0
  785. numpy/typing/tests/data/fail/multiarray.pyi +52 -0
  786. numpy/typing/tests/data/fail/ndarray.pyi +11 -0
  787. numpy/typing/tests/data/fail/ndarray_misc.pyi +49 -0
  788. numpy/typing/tests/data/fail/nditer.pyi +8 -0
  789. numpy/typing/tests/data/fail/nested_sequence.pyi +17 -0
  790. numpy/typing/tests/data/fail/npyio.pyi +24 -0
  791. numpy/typing/tests/data/fail/numerictypes.pyi +5 -0
  792. numpy/typing/tests/data/fail/random.pyi +62 -0
  793. numpy/typing/tests/data/fail/rec.pyi +17 -0
  794. numpy/typing/tests/data/fail/scalars.pyi +86 -0
  795. numpy/typing/tests/data/fail/shape.pyi +7 -0
  796. numpy/typing/tests/data/fail/shape_base.pyi +8 -0
  797. numpy/typing/tests/data/fail/stride_tricks.pyi +9 -0
  798. numpy/typing/tests/data/fail/strings.pyi +52 -0
  799. numpy/typing/tests/data/fail/testing.pyi +28 -0
  800. numpy/typing/tests/data/fail/twodim_base.pyi +39 -0
  801. numpy/typing/tests/data/fail/type_check.pyi +12 -0
  802. numpy/typing/tests/data/fail/ufunc_config.pyi +21 -0
  803. numpy/typing/tests/data/fail/ufunclike.pyi +21 -0
  804. numpy/typing/tests/data/fail/ufuncs.pyi +17 -0
  805. numpy/typing/tests/data/fail/warnings_and_errors.pyi +5 -0
  806. numpy/typing/tests/data/misc/extended_precision.pyi +9 -0
  807. numpy/typing/tests/data/mypy.ini +8 -0
  808. numpy/typing/tests/data/pass/arithmetic.py +614 -0
  809. numpy/typing/tests/data/pass/array_constructors.py +138 -0
  810. numpy/typing/tests/data/pass/array_like.py +43 -0
  811. numpy/typing/tests/data/pass/arrayprint.py +37 -0
  812. numpy/typing/tests/data/pass/arrayterator.py +28 -0
  813. numpy/typing/tests/data/pass/bitwise_ops.py +131 -0
  814. numpy/typing/tests/data/pass/comparisons.py +316 -0
  815. numpy/typing/tests/data/pass/dtype.py +57 -0
  816. numpy/typing/tests/data/pass/einsumfunc.py +36 -0
  817. numpy/typing/tests/data/pass/flatiter.py +26 -0
  818. numpy/typing/tests/data/pass/fromnumeric.py +272 -0
  819. numpy/typing/tests/data/pass/index_tricks.py +62 -0
  820. numpy/typing/tests/data/pass/lib_user_array.py +22 -0
  821. numpy/typing/tests/data/pass/lib_utils.py +19 -0
  822. numpy/typing/tests/data/pass/lib_version.py +18 -0
  823. numpy/typing/tests/data/pass/literal.py +52 -0
  824. numpy/typing/tests/data/pass/ma.py +199 -0
  825. numpy/typing/tests/data/pass/mod.py +149 -0
  826. numpy/typing/tests/data/pass/modules.py +45 -0
  827. numpy/typing/tests/data/pass/multiarray.py +77 -0
  828. numpy/typing/tests/data/pass/ndarray_conversion.py +81 -0
  829. numpy/typing/tests/data/pass/ndarray_misc.py +199 -0
  830. numpy/typing/tests/data/pass/ndarray_shape_manipulation.py +47 -0
  831. numpy/typing/tests/data/pass/nditer.py +4 -0
  832. numpy/typing/tests/data/pass/numeric.py +90 -0
  833. numpy/typing/tests/data/pass/numerictypes.py +17 -0
  834. numpy/typing/tests/data/pass/random.py +1498 -0
  835. numpy/typing/tests/data/pass/recfunctions.py +164 -0
  836. numpy/typing/tests/data/pass/scalars.py +249 -0
  837. numpy/typing/tests/data/pass/shape.py +19 -0
  838. numpy/typing/tests/data/pass/simple.py +170 -0
  839. numpy/typing/tests/data/pass/ufunc_config.py +64 -0
  840. numpy/typing/tests/data/pass/ufunclike.py +52 -0
  841. numpy/typing/tests/data/pass/ufuncs.py +16 -0
  842. numpy/typing/tests/data/pass/warnings_and_errors.py +6 -0
  843. numpy/typing/tests/data/reveal/arithmetic.pyi +719 -0
  844. numpy/typing/tests/data/reveal/array_api_info.pyi +70 -0
  845. numpy/typing/tests/data/reveal/array_constructors.pyi +277 -0
  846. numpy/typing/tests/data/reveal/arraypad.pyi +27 -0
  847. numpy/typing/tests/data/reveal/arrayprint.pyi +25 -0
  848. numpy/typing/tests/data/reveal/arraysetops.pyi +74 -0
  849. numpy/typing/tests/data/reveal/arrayterator.pyi +27 -0
  850. numpy/typing/tests/data/reveal/bitwise_ops.pyi +166 -0
  851. numpy/typing/tests/data/reveal/char.pyi +225 -0
  852. numpy/typing/tests/data/reveal/chararray.pyi +138 -0
  853. numpy/typing/tests/data/reveal/comparisons.pyi +264 -0
  854. numpy/typing/tests/data/reveal/constants.pyi +14 -0
  855. numpy/typing/tests/data/reveal/ctypeslib.pyi +81 -0
  856. numpy/typing/tests/data/reveal/datasource.pyi +23 -0
  857. numpy/typing/tests/data/reveal/dtype.pyi +132 -0
  858. numpy/typing/tests/data/reveal/einsumfunc.pyi +39 -0
  859. numpy/typing/tests/data/reveal/emath.pyi +54 -0
  860. numpy/typing/tests/data/reveal/fft.pyi +37 -0
  861. numpy/typing/tests/data/reveal/flatiter.pyi +86 -0
  862. numpy/typing/tests/data/reveal/fromnumeric.pyi +347 -0
  863. numpy/typing/tests/data/reveal/getlimits.pyi +53 -0
  864. numpy/typing/tests/data/reveal/histograms.pyi +25 -0
  865. numpy/typing/tests/data/reveal/index_tricks.pyi +70 -0
  866. numpy/typing/tests/data/reveal/lib_function_base.pyi +409 -0
  867. numpy/typing/tests/data/reveal/lib_polynomial.pyi +147 -0
  868. numpy/typing/tests/data/reveal/lib_utils.pyi +17 -0
  869. numpy/typing/tests/data/reveal/lib_version.pyi +20 -0
  870. numpy/typing/tests/data/reveal/linalg.pyi +154 -0
  871. numpy/typing/tests/data/reveal/ma.pyi +1098 -0
  872. numpy/typing/tests/data/reveal/matrix.pyi +73 -0
  873. numpy/typing/tests/data/reveal/memmap.pyi +19 -0
  874. numpy/typing/tests/data/reveal/mod.pyi +178 -0
  875. numpy/typing/tests/data/reveal/modules.pyi +51 -0
  876. numpy/typing/tests/data/reveal/multiarray.pyi +197 -0
  877. numpy/typing/tests/data/reveal/nbit_base_example.pyi +20 -0
  878. numpy/typing/tests/data/reveal/ndarray_assignability.pyi +82 -0
  879. numpy/typing/tests/data/reveal/ndarray_conversion.pyi +83 -0
  880. numpy/typing/tests/data/reveal/ndarray_misc.pyi +246 -0
  881. numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +47 -0
  882. numpy/typing/tests/data/reveal/nditer.pyi +49 -0
  883. numpy/typing/tests/data/reveal/nested_sequence.pyi +25 -0
  884. numpy/typing/tests/data/reveal/npyio.pyi +83 -0
  885. numpy/typing/tests/data/reveal/numeric.pyi +170 -0
  886. numpy/typing/tests/data/reveal/numerictypes.pyi +16 -0
  887. numpy/typing/tests/data/reveal/polynomial_polybase.pyi +217 -0
  888. numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +218 -0
  889. numpy/typing/tests/data/reveal/polynomial_series.pyi +138 -0
  890. numpy/typing/tests/data/reveal/random.pyi +1546 -0
  891. numpy/typing/tests/data/reveal/rec.pyi +171 -0
  892. numpy/typing/tests/data/reveal/scalars.pyi +191 -0
  893. numpy/typing/tests/data/reveal/shape.pyi +13 -0
  894. numpy/typing/tests/data/reveal/shape_base.pyi +52 -0
  895. numpy/typing/tests/data/reveal/stride_tricks.pyi +27 -0
  896. numpy/typing/tests/data/reveal/strings.pyi +196 -0
  897. numpy/typing/tests/data/reveal/testing.pyi +198 -0
  898. numpy/typing/tests/data/reveal/twodim_base.pyi +225 -0
  899. numpy/typing/tests/data/reveal/type_check.pyi +67 -0
  900. numpy/typing/tests/data/reveal/ufunc_config.pyi +29 -0
  901. numpy/typing/tests/data/reveal/ufunclike.pyi +31 -0
  902. numpy/typing/tests/data/reveal/ufuncs.pyi +142 -0
  903. numpy/typing/tests/data/reveal/warnings_and_errors.pyi +11 -0
  904. numpy/typing/tests/test_isfile.py +38 -0
  905. numpy/typing/tests/test_runtime.py +110 -0
  906. numpy/typing/tests/test_typing.py +205 -0
  907. numpy/version.py +11 -0
  908. numpy/version.pyi +9 -0
  909. numpy-2.4.1.dist-info/DELVEWHEEL +2 -0
  910. numpy-2.4.1.dist-info/METADATA +139 -0
  911. numpy-2.4.1.dist-info/RECORD +932 -0
  912. numpy-2.4.1.dist-info/WHEEL +4 -0
  913. numpy-2.4.1.dist-info/entry_points.txt +13 -0
  914. numpy-2.4.1.dist-info/licenses/LICENSE.txt +914 -0
  915. numpy-2.4.1.dist-info/licenses/numpy/_core/include/numpy/libdivide/LICENSE.txt +21 -0
  916. numpy-2.4.1.dist-info/licenses/numpy/_core/src/common/pythoncapi-compat/COPYING +14 -0
  917. numpy-2.4.1.dist-info/licenses/numpy/_core/src/highway/LICENSE +371 -0
  918. numpy-2.4.1.dist-info/licenses/numpy/_core/src/multiarray/dragon4_LICENSE.txt +27 -0
  919. numpy-2.4.1.dist-info/licenses/numpy/_core/src/npysort/x86-simd-sort/LICENSE.md +28 -0
  920. numpy-2.4.1.dist-info/licenses/numpy/_core/src/umath/svml/LICENSE +30 -0
  921. numpy-2.4.1.dist-info/licenses/numpy/fft/pocketfft/LICENSE.md +25 -0
  922. numpy-2.4.1.dist-info/licenses/numpy/linalg/lapack_lite/LICENSE.txt +48 -0
  923. numpy-2.4.1.dist-info/licenses/numpy/ma/LICENSE +24 -0
  924. numpy-2.4.1.dist-info/licenses/numpy/random/LICENSE.md +71 -0
  925. numpy-2.4.1.dist-info/licenses/numpy/random/src/distributions/LICENSE.md +61 -0
  926. numpy-2.4.1.dist-info/licenses/numpy/random/src/mt19937/LICENSE.md +61 -0
  927. numpy-2.4.1.dist-info/licenses/numpy/random/src/pcg64/LICENSE.md +22 -0
  928. numpy-2.4.1.dist-info/licenses/numpy/random/src/philox/LICENSE.md +31 -0
  929. numpy-2.4.1.dist-info/licenses/numpy/random/src/sfc64/LICENSE.md +27 -0
  930. numpy-2.4.1.dist-info/licenses/numpy/random/src/splitmix64/LICENSE.md +9 -0
  931. numpy.libs/msvcp140-5f1c5dd31916990d94181e07bc3afb32.dll +0 -0
  932. numpy.libs/scipy_openblas-7b69cbfd2599e6035f1310f2a72d59a6.dll +0 -0
@@ -0,0 +1,2079 @@
1
+ // libdivide.h - Optimized integer division
2
+ // https://libdivide.com
3
+ //
4
+ // Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com>
5
+ // Copyright (C) 2016 - 2019 Kim Walisch, <kim.walisch@gmail.com>
6
+ //
7
+ // libdivide is dual-licensed under the Boost or zlib licenses.
8
+ // You may use libdivide under the terms of either of these.
9
+ // See LICENSE.txt for more details.
10
+
11
+ #ifndef NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
12
+ #define NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
13
+
14
+ #define LIBDIVIDE_VERSION "3.0"
15
+ #define LIBDIVIDE_VERSION_MAJOR 3
16
+ #define LIBDIVIDE_VERSION_MINOR 0
17
+
18
+ #include <stdint.h>
19
+
20
+ #if defined(__cplusplus)
21
+ #include <cstdlib>
22
+ #include <cstdio>
23
+ #include <type_traits>
24
+ #else
25
+ #include <stdlib.h>
26
+ #include <stdio.h>
27
+ #endif
28
+
29
+ #if defined(LIBDIVIDE_AVX512)
30
+ #include <immintrin.h>
31
+ #elif defined(LIBDIVIDE_AVX2)
32
+ #include <immintrin.h>
33
+ #elif defined(LIBDIVIDE_SSE2)
34
+ #include <emmintrin.h>
35
+ #endif
36
+
37
+ #if defined(_MSC_VER)
38
+ #include <intrin.h>
39
+ // disable warning C4146: unary minus operator applied
40
+ // to unsigned type, result still unsigned
41
+ #pragma warning(disable: 4146)
42
+ #define LIBDIVIDE_VC
43
+ #endif
44
+
45
+ #if !defined(__has_builtin)
46
+ #define __has_builtin(x) 0
47
+ #endif
48
+
49
+ #if defined(__SIZEOF_INT128__)
50
+ #define HAS_INT128_T
51
+ // clang-cl on Windows does not yet support 128-bit division
52
+ #if !(defined(__clang__) && defined(LIBDIVIDE_VC))
53
+ #define HAS_INT128_DIV
54
+ #endif
55
+ #endif
56
+
57
+ #if defined(__x86_64__) || defined(_M_X64)
58
+ #define LIBDIVIDE_X86_64
59
+ #endif
60
+
61
+ #if defined(__i386__)
62
+ #define LIBDIVIDE_i386
63
+ #endif
64
+
65
+ #if defined(__GNUC__) || defined(__clang__)
66
+ #define LIBDIVIDE_GCC_STYLE_ASM
67
+ #endif
68
+
69
+ #if defined(__cplusplus) || defined(LIBDIVIDE_VC)
70
+ #define LIBDIVIDE_FUNCTION __FUNCTION__
71
+ #else
72
+ #define LIBDIVIDE_FUNCTION __func__
73
+ #endif
74
+
75
+ #define LIBDIVIDE_ERROR(msg) \
76
+ do { \
77
+ fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \
78
+ __LINE__, LIBDIVIDE_FUNCTION, msg); \
79
+ abort(); \
80
+ } while (0)
81
+
82
+ #if defined(LIBDIVIDE_ASSERTIONS_ON)
83
+ #define LIBDIVIDE_ASSERT(x) \
84
+ do { \
85
+ if (!(x)) { \
86
+ fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \
87
+ __LINE__, LIBDIVIDE_FUNCTION, #x); \
88
+ abort(); \
89
+ } \
90
+ } while (0)
91
+ #else
92
+ #define LIBDIVIDE_ASSERT(x)
93
+ #endif
94
+
95
+ #ifdef __cplusplus
96
+ namespace libdivide {
97
+ #endif
98
+
99
+ // pack divider structs to prevent compilers from padding.
100
+ // This reduces memory usage by up to 43% when using a large
101
+ // array of libdivide dividers and improves performance
102
+ // by up to 10% because of reduced memory bandwidth.
103
+ #pragma pack(push, 1)
104
+
105
+ struct libdivide_u32_t {
106
+ uint32_t magic;
107
+ uint8_t more;
108
+ };
109
+
110
+ struct libdivide_s32_t {
111
+ int32_t magic;
112
+ uint8_t more;
113
+ };
114
+
115
+ struct libdivide_u64_t {
116
+ uint64_t magic;
117
+ uint8_t more;
118
+ };
119
+
120
+ struct libdivide_s64_t {
121
+ int64_t magic;
122
+ uint8_t more;
123
+ };
124
+
125
+ struct libdivide_u32_branchfree_t {
126
+ uint32_t magic;
127
+ uint8_t more;
128
+ };
129
+
130
+ struct libdivide_s32_branchfree_t {
131
+ int32_t magic;
132
+ uint8_t more;
133
+ };
134
+
135
+ struct libdivide_u64_branchfree_t {
136
+ uint64_t magic;
137
+ uint8_t more;
138
+ };
139
+
140
+ struct libdivide_s64_branchfree_t {
141
+ int64_t magic;
142
+ uint8_t more;
143
+ };
144
+
145
+ #pragma pack(pop)
146
+
147
+ // Explanation of the "more" field:
148
+ //
149
+ // * Bits 0-5 is the shift value (for shift path or mult path).
150
+ // * Bit 6 is the add indicator for mult path.
151
+ // * Bit 7 is set if the divisor is negative. We use bit 7 as the negative
152
+ // divisor indicator so that we can efficiently use sign extension to
153
+ // create a bitmask with all bits set to 1 (if the divisor is negative)
154
+ // or 0 (if the divisor is positive).
155
+ //
156
+ // u32: [0-4] shift value
157
+ // [5] ignored
158
+ // [6] add indicator
159
+ // magic number of 0 indicates shift path
160
+ //
161
+ // s32: [0-4] shift value
162
+ // [5] ignored
163
+ // [6] add indicator
164
+ // [7] indicates negative divisor
165
+ // magic number of 0 indicates shift path
166
+ //
167
+ // u64: [0-5] shift value
168
+ // [6] add indicator
169
+ // magic number of 0 indicates shift path
170
+ //
171
+ // s64: [0-5] shift value
172
+ // [6] add indicator
173
+ // [7] indicates negative divisor
174
+ // magic number of 0 indicates shift path
175
+ //
176
+ // In s32 and s64 branchfree modes, the magic number is negated according to
177
+ // whether the divisor is negated. In branchfree strategy, it is not negated.
178
+
179
+ enum {
180
+ LIBDIVIDE_32_SHIFT_MASK = 0x1F,
181
+ LIBDIVIDE_64_SHIFT_MASK = 0x3F,
182
+ LIBDIVIDE_ADD_MARKER = 0x40,
183
+ LIBDIVIDE_NEGATIVE_DIVISOR = 0x80
184
+ };
185
+
186
+ static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d);
187
+ static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d);
188
+ static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d);
189
+ static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d);
190
+
191
+ static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d);
192
+ static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d);
193
+ static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d);
194
+ static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d);
195
+
196
+ static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom);
197
+ static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom);
198
+ static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom);
199
+ static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom);
200
+
201
+ static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom);
202
+ static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom);
203
+ static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom);
204
+ static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom);
205
+
206
+ static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom);
207
+ static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom);
208
+ static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom);
209
+ static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom);
210
+
211
+ static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom);
212
+ static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom);
213
+ static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom);
214
+ static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom);
215
+
216
+ //////// Internal Utility Functions
217
+
218
+ static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) {
219
+ uint64_t xl = x, yl = y;
220
+ uint64_t rl = xl * yl;
221
+ return (uint32_t)(rl >> 32);
222
+ }
223
+
224
+ static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) {
225
+ int64_t xl = x, yl = y;
226
+ int64_t rl = xl * yl;
227
+ // needs to be arithmetic shift
228
+ return (int32_t)(rl >> 32);
229
+ }
230
+
231
+ static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) {
232
+ #if defined(LIBDIVIDE_VC) && \
233
+ defined(LIBDIVIDE_X86_64)
234
+ return __umulh(x, y);
235
+ #elif defined(HAS_INT128_T)
236
+ __uint128_t xl = x, yl = y;
237
+ __uint128_t rl = xl * yl;
238
+ return (uint64_t)(rl >> 64);
239
+ #else
240
+ // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
241
+ uint32_t mask = 0xFFFFFFFF;
242
+ uint32_t x0 = (uint32_t)(x & mask);
243
+ uint32_t x1 = (uint32_t)(x >> 32);
244
+ uint32_t y0 = (uint32_t)(y & mask);
245
+ uint32_t y1 = (uint32_t)(y >> 32);
246
+ uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0);
247
+ uint64_t x0y1 = x0 * (uint64_t)y1;
248
+ uint64_t x1y0 = x1 * (uint64_t)y0;
249
+ uint64_t x1y1 = x1 * (uint64_t)y1;
250
+ uint64_t temp = x1y0 + x0y0_hi;
251
+ uint64_t temp_lo = temp & mask;
252
+ uint64_t temp_hi = temp >> 32;
253
+
254
+ return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32);
255
+ #endif
256
+ }
257
+
258
+ static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) {
259
+ #if defined(LIBDIVIDE_VC) && \
260
+ defined(LIBDIVIDE_X86_64)
261
+ return __mulh(x, y);
262
+ #elif defined(HAS_INT128_T)
263
+ __int128_t xl = x, yl = y;
264
+ __int128_t rl = xl * yl;
265
+ return (int64_t)(rl >> 64);
266
+ #else
267
+ // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
268
+ uint32_t mask = 0xFFFFFFFF;
269
+ uint32_t x0 = (uint32_t)(x & mask);
270
+ uint32_t y0 = (uint32_t)(y & mask);
271
+ int32_t x1 = (int32_t)(x >> 32);
272
+ int32_t y1 = (int32_t)(y >> 32);
273
+ uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0);
274
+ int64_t t = x1 * (int64_t)y0 + x0y0_hi;
275
+ int64_t w1 = x0 * (int64_t)y1 + (t & mask);
276
+
277
+ return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32);
278
+ #endif
279
+ }
280
+
281
+ static inline int32_t libdivide_count_leading_zeros32(uint32_t val) {
282
+ #if defined(__GNUC__) || \
283
+ __has_builtin(__builtin_clz)
284
+ // Fast way to count leading zeros
285
+ return __builtin_clz(val);
286
+ #elif defined(LIBDIVIDE_VC)
287
+ unsigned long result;
288
+ if (_BitScanReverse(&result, val)) {
289
+ return 31 - result;
290
+ }
291
+ return 0;
292
+ #else
293
+ if (val == 0)
294
+ return 32;
295
+ int32_t result = 8;
296
+ uint32_t hi = 0xFFU << 24;
297
+ while ((val & hi) == 0) {
298
+ hi >>= 8;
299
+ result += 8;
300
+ }
301
+ while (val & hi) {
302
+ result -= 1;
303
+ hi <<= 1;
304
+ }
305
+ return result;
306
+ #endif
307
+ }
308
+
309
+ static inline int32_t libdivide_count_leading_zeros64(uint64_t val) {
310
+ #if defined(__GNUC__) || \
311
+ __has_builtin(__builtin_clzll)
312
+ // Fast way to count leading zeros
313
+ return __builtin_clzll(val);
314
+ #elif defined(LIBDIVIDE_VC) && defined(_WIN64)
315
+ unsigned long result;
316
+ if (_BitScanReverse64(&result, val)) {
317
+ return 63 - result;
318
+ }
319
+ return 0;
320
+ #else
321
+ uint32_t hi = val >> 32;
322
+ uint32_t lo = val & 0xFFFFFFFF;
323
+ if (hi != 0) return libdivide_count_leading_zeros32(hi);
324
+ return 32 + libdivide_count_leading_zeros32(lo);
325
+ #endif
326
+ }
327
+
328
+ // libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit
329
+ // uint {v}. The result must fit in 32 bits.
330
+ // Returns the quotient directly and the remainder in *r
331
+ static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) {
332
+ #if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \
333
+ defined(LIBDIVIDE_GCC_STYLE_ASM)
334
+ uint32_t result;
335
+ __asm__("divl %[v]"
336
+ : "=a"(result), "=d"(*r)
337
+ : [v] "r"(v), "a"(u0), "d"(u1)
338
+ );
339
+ return result;
340
+ #else
341
+ uint64_t n = ((uint64_t)u1 << 32) | u0;
342
+ uint32_t result = (uint32_t)(n / v);
343
+ *r = (uint32_t)(n - result * (uint64_t)v);
344
+ return result;
345
+ #endif
346
+ }
347
+
348
+ // libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit
349
+ // uint {v}. The result must fit in 64 bits.
350
+ // Returns the quotient directly and the remainder in *r
351
+ static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) {
352
+ #if defined(LIBDIVIDE_X86_64) && \
353
+ defined(LIBDIVIDE_GCC_STYLE_ASM)
354
+ uint64_t result;
355
+ __asm__("divq %[v]"
356
+ : "=a"(result), "=d"(*r)
357
+ : [v] "r"(v), "a"(u0), "d"(u1)
358
+ );
359
+ return result;
360
+ #elif defined(HAS_INT128_T) && \
361
+ defined(HAS_INT128_DIV)
362
+ __uint128_t n = ((__uint128_t)u1 << 64) | u0;
363
+ uint64_t result = (uint64_t)(n / v);
364
+ *r = (uint64_t)(n - result * (__uint128_t)v);
365
+ return result;
366
+ #else
367
+ // Code taken from Hacker's Delight:
368
+ // http://www.hackersdelight.org/HDcode/divlu.c.
369
+ // License permits inclusion here per:
370
+ // http://www.hackersdelight.org/permissions.htm
371
+
372
+ const uint64_t b = (1ULL << 32); // Number base (32 bits)
373
+ uint64_t un1, un0; // Norm. dividend LSD's
374
+ uint64_t vn1, vn0; // Norm. divisor digits
375
+ uint64_t q1, q0; // Quotient digits
376
+ uint64_t un64, un21, un10; // Dividend digit pairs
377
+ uint64_t rhat; // A remainder
378
+ int32_t s; // Shift amount for norm
379
+
380
+ // If overflow, set rem. to an impossible value,
381
+ // and return the largest possible quotient
382
+ if (u1 >= v) {
383
+ *r = (uint64_t) -1;
384
+ return (uint64_t) -1;
385
+ }
386
+
387
+ // count leading zeros
388
+ s = libdivide_count_leading_zeros64(v);
389
+ if (s > 0) {
390
+ // Normalize divisor
391
+ v = v << s;
392
+ un64 = (u1 << s) | (u0 >> (64 - s));
393
+ un10 = u0 << s; // Shift dividend left
394
+ } else {
395
+ // Avoid undefined behavior of (u0 >> 64).
396
+ // The behavior is undefined if the right operand is
397
+ // negative, or greater than or equal to the length
398
+ // in bits of the promoted left operand.
399
+ un64 = u1;
400
+ un10 = u0;
401
+ }
402
+
403
+ // Break divisor up into two 32-bit digits
404
+ vn1 = v >> 32;
405
+ vn0 = v & 0xFFFFFFFF;
406
+
407
+ // Break right half of dividend into two digits
408
+ un1 = un10 >> 32;
409
+ un0 = un10 & 0xFFFFFFFF;
410
+
411
+ // Compute the first quotient digit, q1
412
+ q1 = un64 / vn1;
413
+ rhat = un64 - q1 * vn1;
414
+
415
+ while (q1 >= b || q1 * vn0 > b * rhat + un1) {
416
+ q1 = q1 - 1;
417
+ rhat = rhat + vn1;
418
+ if (rhat >= b)
419
+ break;
420
+ }
421
+
422
+ // Multiply and subtract
423
+ un21 = un64 * b + un1 - q1 * v;
424
+
425
+ // Compute the second quotient digit
426
+ q0 = un21 / vn1;
427
+ rhat = un21 - q0 * vn1;
428
+
429
+ while (q0 >= b || q0 * vn0 > b * rhat + un0) {
430
+ q0 = q0 - 1;
431
+ rhat = rhat + vn1;
432
+ if (rhat >= b)
433
+ break;
434
+ }
435
+
436
+ *r = (un21 * b + un0 - q0 * v) >> s;
437
+ return q1 * b + q0;
438
+ #endif
439
+ }
440
+
441
+ // Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0)
442
+ static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) {
443
+ if (signed_shift > 0) {
444
+ uint32_t shift = signed_shift;
445
+ *u1 <<= shift;
446
+ *u1 |= *u0 >> (64 - shift);
447
+ *u0 <<= shift;
448
+ }
449
+ else if (signed_shift < 0) {
450
+ uint32_t shift = -signed_shift;
451
+ *u0 >>= shift;
452
+ *u0 |= *u1 << (64 - shift);
453
+ *u1 >>= shift;
454
+ }
455
+ }
456
+
457
+ // Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder.
458
+ static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) {
459
+ #if defined(HAS_INT128_T) && \
460
+ defined(HAS_INT128_DIV)
461
+ __uint128_t ufull = u_hi;
462
+ __uint128_t vfull = v_hi;
463
+ ufull = (ufull << 64) | u_lo;
464
+ vfull = (vfull << 64) | v_lo;
465
+ uint64_t res = (uint64_t)(ufull / vfull);
466
+ __uint128_t remainder = ufull - (vfull * res);
467
+ *r_lo = (uint64_t)remainder;
468
+ *r_hi = (uint64_t)(remainder >> 64);
469
+ return res;
470
+ #else
471
+ // Adapted from "Unsigned Doubleword Division" in Hacker's Delight
472
+ // We want to compute u / v
473
+ typedef struct { uint64_t hi; uint64_t lo; } u128_t;
474
+ u128_t u = {u_hi, u_lo};
475
+ u128_t v = {v_hi, v_lo};
476
+
477
+ if (v.hi == 0) {
478
+ // divisor v is a 64 bit value, so we just need one 128/64 division
479
+ // Note that we are simpler than Hacker's Delight here, because we know
480
+ // the quotient fits in 64 bits whereas Hacker's Delight demands a full
481
+ // 128 bit quotient
482
+ *r_hi = 0;
483
+ return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo);
484
+ }
485
+ // Here v >= 2**64
486
+ // We know that v.hi != 0, so count leading zeros is OK
487
+ // We have 0 <= n <= 63
488
+ uint32_t n = libdivide_count_leading_zeros64(v.hi);
489
+
490
+ // Normalize the divisor so its MSB is 1
491
+ u128_t v1t = v;
492
+ libdivide_u128_shift(&v1t.hi, &v1t.lo, n);
493
+ uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64
494
+
495
+ // To ensure no overflow
496
+ u128_t u1 = u;
497
+ libdivide_u128_shift(&u1.hi, &u1.lo, -1);
498
+
499
+ // Get quotient from divide unsigned insn.
500
+ uint64_t rem_ignored;
501
+ uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored);
502
+
503
+ // Undo normalization and division of u by 2.
504
+ u128_t q0 = {0, q1};
505
+ libdivide_u128_shift(&q0.hi, &q0.lo, n);
506
+ libdivide_u128_shift(&q0.hi, &q0.lo, -63);
507
+
508
+ // Make q0 correct or too small by 1
509
+ // Equivalent to `if (q0 != 0) q0 = q0 - 1;`
510
+ if (q0.hi != 0 || q0.lo != 0) {
511
+ q0.hi -= (q0.lo == 0); // borrow
512
+ q0.lo -= 1;
513
+ }
514
+
515
+ // Now q0 is correct.
516
+ // Compute q0 * v as q0v
517
+ // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo)
518
+ // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) +
519
+ // (q0.lo * v.hi << 64) + q0.lo * v.lo)
520
+ // Each term is 128 bit
521
+ // High half of full product (upper 128 bits!) are dropped
522
+ u128_t q0v = {0, 0};
523
+ q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo);
524
+ q0v.lo = q0.lo*v.lo;
525
+
526
+ // Compute u - q0v as u_q0v
527
+ // This is the remainder
528
+ u128_t u_q0v = u;
529
+ u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow
530
+ u_q0v.lo -= q0v.lo;
531
+
532
+ // Check if u_q0v >= v
533
+ // This checks if our remainder is larger than the divisor
534
+ if ((u_q0v.hi > v.hi) ||
535
+ (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) {
536
+ // Increment q0
537
+ q0.lo += 1;
538
+ q0.hi += (q0.lo == 0); // carry
539
+
540
+ // Subtract v from remainder
541
+ u_q0v.hi -= v.hi + (u_q0v.lo < v.lo);
542
+ u_q0v.lo -= v.lo;
543
+ }
544
+
545
+ *r_hi = u_q0v.hi;
546
+ *r_lo = u_q0v.lo;
547
+
548
+ LIBDIVIDE_ASSERT(q0.hi == 0);
549
+ return q0.lo;
550
+ #endif
551
+ }
552
+
553
+ ////////// UINT32
554
+
555
+ static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) {
556
+ if (d == 0) {
557
+ LIBDIVIDE_ERROR("divider must be != 0");
558
+ }
559
+
560
+ struct libdivide_u32_t result;
561
+ uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d);
562
+
563
+ // Power of 2
564
+ if ((d & (d - 1)) == 0) {
565
+ // We need to subtract 1 from the shift value in case of an unsigned
566
+ // branchfree divider because there is a hardcoded right shift by 1
567
+ // in its division algorithm. Because of this we also need to add back
568
+ // 1 in its recovery algorithm.
569
+ result.magic = 0;
570
+ result.more = (uint8_t)(floor_log_2_d - (branchfree != 0));
571
+ } else {
572
+ uint8_t more;
573
+ uint32_t rem, proposed_m;
574
+ proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem);
575
+
576
+ LIBDIVIDE_ASSERT(rem > 0 && rem < d);
577
+ const uint32_t e = d - rem;
578
+
579
+ // This power works if e < 2**floor_log_2_d.
580
+ if (!branchfree && (e < (1U << floor_log_2_d))) {
581
+ // This power works
582
+ more = floor_log_2_d;
583
+ } else {
584
+ // We have to use the general 33-bit algorithm. We need to compute
585
+ // (2**power) / d. However, we already have (2**(power-1))/d and
586
+ // its remainder. By doubling both, and then correcting the
587
+ // remainder, we can compute the larger division.
588
+ // don't care about overflow here - in fact, we expect it
589
+ proposed_m += proposed_m;
590
+ const uint32_t twice_rem = rem + rem;
591
+ if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
592
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
593
+ }
594
+ result.magic = 1 + proposed_m;
595
+ result.more = more;
596
+ // result.more's shift should in general be ceil_log_2_d. But if we
597
+ // used the smaller power, we subtract one from the shift because we're
598
+ // using the smaller power. If we're using the larger power, we
599
+ // subtract one from the shift because it's taken care of by the add
600
+ // indicator. So floor_log_2_d happens to be correct in both cases.
601
+ }
602
+ return result;
603
+ }
604
+
605
+ struct libdivide_u32_t libdivide_u32_gen(uint32_t d) {
606
+ return libdivide_internal_u32_gen(d, 0);
607
+ }
608
+
609
+ struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) {
610
+ if (d == 1) {
611
+ LIBDIVIDE_ERROR("branchfree divider must be != 1");
612
+ }
613
+ struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1);
614
+ struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)};
615
+ return ret;
616
+ }
617
+
618
+ uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) {
619
+ uint8_t more = denom->more;
620
+ if (!denom->magic) {
621
+ return numer >> more;
622
+ }
623
+ else {
624
+ uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
625
+ if (more & LIBDIVIDE_ADD_MARKER) {
626
+ uint32_t t = ((numer - q) >> 1) + q;
627
+ return t >> (more & LIBDIVIDE_32_SHIFT_MASK);
628
+ }
629
+ else {
630
+ // All upper bits are 0,
631
+ // don't need to mask them off.
632
+ return q >> more;
633
+ }
634
+ }
635
+ }
636
+
637
+ uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) {
638
+ uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
639
+ uint32_t t = ((numer - q) >> 1) + q;
640
+ return t >> denom->more;
641
+ }
642
+
643
+ uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) {
644
+ uint8_t more = denom->more;
645
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
646
+
647
+ if (!denom->magic) {
648
+ return 1U << shift;
649
+ } else if (!(more & LIBDIVIDE_ADD_MARKER)) {
650
+ // We compute q = n/d = n*m / 2^(32 + shift)
651
+ // Therefore we have d = 2^(32 + shift) / m
652
+ // We need to ceil it.
653
+ // We know d is not a power of 2, so m is not a power of 2,
654
+ // so we can just add 1 to the floor
655
+ uint32_t hi_dividend = 1U << shift;
656
+ uint32_t rem_ignored;
657
+ return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored);
658
+ } else {
659
+ // Here we wish to compute d = 2^(32+shift+1)/(m+2^32).
660
+ // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now
661
+ // Also note that shift may be as high as 31, so shift + 1 will
662
+ // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and
663
+ // then double the quotient and remainder.
664
+ uint64_t half_n = 1ULL << (32 + shift);
665
+ uint64_t d = (1ULL << 32) | denom->magic;
666
+ // Note that the quotient is guaranteed <= 32 bits, but the remainder
667
+ // may need 33!
668
+ uint32_t half_q = (uint32_t)(half_n / d);
669
+ uint64_t rem = half_n % d;
670
+ // We computed 2^(32+shift)/(m+2^32)
671
+ // Need to double it, and then add 1 to the quotient if doubling th
672
+ // remainder would increase the quotient.
673
+ // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits
674
+ uint32_t full_q = half_q + half_q + ((rem<<1) >= d);
675
+
676
+ // We rounded down in gen (hence +1)
677
+ return full_q + 1;
678
+ }
679
+ }
680
+
681
+ uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) {
682
+ uint8_t more = denom->more;
683
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
684
+
685
+ if (!denom->magic) {
686
+ return 1U << (shift + 1);
687
+ } else {
688
+ // Here we wish to compute d = 2^(32+shift+1)/(m+2^32).
689
+ // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now
690
+ // Also note that shift may be as high as 31, so shift + 1 will
691
+ // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and
692
+ // then double the quotient and remainder.
693
+ uint64_t half_n = 1ULL << (32 + shift);
694
+ uint64_t d = (1ULL << 32) | denom->magic;
695
+ // Note that the quotient is guaranteed <= 32 bits, but the remainder
696
+ // may need 33!
697
+ uint32_t half_q = (uint32_t)(half_n / d);
698
+ uint64_t rem = half_n % d;
699
+ // We computed 2^(32+shift)/(m+2^32)
700
+ // Need to double it, and then add 1 to the quotient if doubling th
701
+ // remainder would increase the quotient.
702
+ // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits
703
+ uint32_t full_q = half_q + half_q + ((rem<<1) >= d);
704
+
705
+ // We rounded down in gen (hence +1)
706
+ return full_q + 1;
707
+ }
708
+ }
709
+
710
+ /////////// UINT64
711
+
712
+ static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) {
713
+ if (d == 0) {
714
+ LIBDIVIDE_ERROR("divider must be != 0");
715
+ }
716
+
717
+ struct libdivide_u64_t result;
718
+ uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d);
719
+
720
+ // Power of 2
721
+ if ((d & (d - 1)) == 0) {
722
+ // We need to subtract 1 from the shift value in case of an unsigned
723
+ // branchfree divider because there is a hardcoded right shift by 1
724
+ // in its division algorithm. Because of this we also need to add back
725
+ // 1 in its recovery algorithm.
726
+ result.magic = 0;
727
+ result.more = (uint8_t)(floor_log_2_d - (branchfree != 0));
728
+ } else {
729
+ uint64_t proposed_m, rem;
730
+ uint8_t more;
731
+ // (1 << (64 + floor_log_2_d)) / d
732
+ proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem);
733
+
734
+ LIBDIVIDE_ASSERT(rem > 0 && rem < d);
735
+ const uint64_t e = d - rem;
736
+
737
+ // This power works if e < 2**floor_log_2_d.
738
+ if (!branchfree && e < (1ULL << floor_log_2_d)) {
739
+ // This power works
740
+ more = floor_log_2_d;
741
+ } else {
742
+ // We have to use the general 65-bit algorithm. We need to compute
743
+ // (2**power) / d. However, we already have (2**(power-1))/d and
744
+ // its remainder. By doubling both, and then correcting the
745
+ // remainder, we can compute the larger division.
746
+ // don't care about overflow here - in fact, we expect it
747
+ proposed_m += proposed_m;
748
+ const uint64_t twice_rem = rem + rem;
749
+ if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
750
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
751
+ }
752
+ result.magic = 1 + proposed_m;
753
+ result.more = more;
754
+ // result.more's shift should in general be ceil_log_2_d. But if we
755
+ // used the smaller power, we subtract one from the shift because we're
756
+ // using the smaller power. If we're using the larger power, we
757
+ // subtract one from the shift because it's taken care of by the add
758
+ // indicator. So floor_log_2_d happens to be correct in both cases,
759
+ // which is why we do it outside of the if statement.
760
+ }
761
+ return result;
762
+ }
763
+
764
+ struct libdivide_u64_t libdivide_u64_gen(uint64_t d) {
765
+ return libdivide_internal_u64_gen(d, 0);
766
+ }
767
+
768
+ struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) {
769
+ if (d == 1) {
770
+ LIBDIVIDE_ERROR("branchfree divider must be != 1");
771
+ }
772
+ struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1);
773
+ struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)};
774
+ return ret;
775
+ }
776
+
777
+ uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) {
778
+ uint8_t more = denom->more;
779
+ if (!denom->magic) {
780
+ return numer >> more;
781
+ }
782
+ else {
783
+ uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
784
+ if (more & LIBDIVIDE_ADD_MARKER) {
785
+ uint64_t t = ((numer - q) >> 1) + q;
786
+ return t >> (more & LIBDIVIDE_64_SHIFT_MASK);
787
+ }
788
+ else {
789
+ // All upper bits are 0,
790
+ // don't need to mask them off.
791
+ return q >> more;
792
+ }
793
+ }
794
+ }
795
+
796
+ uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) {
797
+ uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
798
+ uint64_t t = ((numer - q) >> 1) + q;
799
+ return t >> denom->more;
800
+ }
801
+
802
+ uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) {
803
+ uint8_t more = denom->more;
804
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
805
+
806
+ if (!denom->magic) {
807
+ return 1ULL << shift;
808
+ } else if (!(more & LIBDIVIDE_ADD_MARKER)) {
809
+ // We compute q = n/d = n*m / 2^(64 + shift)
810
+ // Therefore we have d = 2^(64 + shift) / m
811
+ // We need to ceil it.
812
+ // We know d is not a power of 2, so m is not a power of 2,
813
+ // so we can just add 1 to the floor
814
+ uint64_t hi_dividend = 1ULL << shift;
815
+ uint64_t rem_ignored;
816
+ return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored);
817
+ } else {
818
+ // Here we wish to compute d = 2^(64+shift+1)/(m+2^64).
819
+ // Notice (m + 2^64) is a 65 bit number. This gets hairy. See
820
+ // libdivide_u32_recover for more on what we do here.
821
+ // TODO: do something better than 128 bit math
822
+
823
+ // Full n is a (potentially) 129 bit value
824
+ // half_n is a 128 bit value
825
+ // Compute the hi half of half_n. Low half is 0.
826
+ uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0;
827
+ // d is a 65 bit value. The high bit is always set to 1.
828
+ const uint64_t d_hi = 1, d_lo = denom->magic;
829
+ // Note that the quotient is guaranteed <= 64 bits,
830
+ // but the remainder may need 65!
831
+ uint64_t r_hi, r_lo;
832
+ uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo);
833
+ // We computed 2^(64+shift)/(m+2^64)
834
+ // Double the remainder ('dr') and check if that is larger than d
835
+ // Note that d is a 65 bit value, so r1 is small and so r1 + r1
836
+ // cannot overflow
837
+ uint64_t dr_lo = r_lo + r_lo;
838
+ uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry
839
+ int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo);
840
+ uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0);
841
+ return full_q + 1;
842
+ }
843
+ }
844
+
845
+ uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) {
846
+ uint8_t more = denom->more;
847
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
848
+
849
+ if (!denom->magic) {
850
+ return 1ULL << (shift + 1);
851
+ } else {
852
+ // Here we wish to compute d = 2^(64+shift+1)/(m+2^64).
853
+ // Notice (m + 2^64) is a 65 bit number. This gets hairy. See
854
+ // libdivide_u32_recover for more on what we do here.
855
+ // TODO: do something better than 128 bit math
856
+
857
+ // Full n is a (potentially) 129 bit value
858
+ // half_n is a 128 bit value
859
+ // Compute the hi half of half_n. Low half is 0.
860
+ uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0;
861
+ // d is a 65 bit value. The high bit is always set to 1.
862
+ const uint64_t d_hi = 1, d_lo = denom->magic;
863
+ // Note that the quotient is guaranteed <= 64 bits,
864
+ // but the remainder may need 65!
865
+ uint64_t r_hi, r_lo;
866
+ uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo);
867
+ // We computed 2^(64+shift)/(m+2^64)
868
+ // Double the remainder ('dr') and check if that is larger than d
869
+ // Note that d is a 65 bit value, so r1 is small and so r1 + r1
870
+ // cannot overflow
871
+ uint64_t dr_lo = r_lo + r_lo;
872
+ uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry
873
+ int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo);
874
+ uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0);
875
+ return full_q + 1;
876
+ }
877
+ }
878
+
879
+ /////////// SINT32
880
+
881
+ static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) {
882
+ if (d == 0) {
883
+ LIBDIVIDE_ERROR("divider must be != 0");
884
+ }
885
+
886
+ struct libdivide_s32_t result;
887
+
888
+ // If d is a power of 2, or negative a power of 2, we have to use a shift.
889
+ // This is especially important because the magic algorithm fails for -1.
890
+ // To check if d is a power of 2 or its inverse, it suffices to check
891
+ // whether its absolute value has exactly one bit set. This works even for
892
+ // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set
893
+ // and is a power of 2.
894
+ uint32_t ud = (uint32_t)d;
895
+ uint32_t absD = (d < 0) ? -ud : ud;
896
+ uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD);
897
+ // check if exactly one bit is set,
898
+ // don't care if absD is 0 since that's divide by zero
899
+ if ((absD & (absD - 1)) == 0) {
900
+ // Branchfree and normal paths are exactly the same
901
+ result.magic = 0;
902
+ result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
903
+ } else {
904
+ LIBDIVIDE_ASSERT(floor_log_2_d >= 1);
905
+
906
+ uint8_t more;
907
+ // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word
908
+ // is 0 and the high word is floor_log_2_d - 1
909
+ uint32_t rem, proposed_m;
910
+ proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem);
911
+ const uint32_t e = absD - rem;
912
+
913
+ // We are going to start with a power of floor_log_2_d - 1.
914
+ // This works if works if e < 2**floor_log_2_d.
915
+ if (!branchfree && e < (1U << floor_log_2_d)) {
916
+ // This power works
917
+ more = floor_log_2_d - 1;
918
+ } else {
919
+ // We need to go one higher. This should not make proposed_m
920
+ // overflow, but it will make it negative when interpreted as an
921
+ // int32_t.
922
+ proposed_m += proposed_m;
923
+ const uint32_t twice_rem = rem + rem;
924
+ if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
925
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
926
+ }
927
+
928
+ proposed_m += 1;
929
+ int32_t magic = (int32_t)proposed_m;
930
+
931
+ // Mark if we are negative. Note we only negate the magic number in the
932
+ // branchfull case.
933
+ if (d < 0) {
934
+ more |= LIBDIVIDE_NEGATIVE_DIVISOR;
935
+ if (!branchfree) {
936
+ magic = -magic;
937
+ }
938
+ }
939
+
940
+ result.more = more;
941
+ result.magic = magic;
942
+ }
943
+ return result;
944
+ }
945
+
946
+ struct libdivide_s32_t libdivide_s32_gen(int32_t d) {
947
+ return libdivide_internal_s32_gen(d, 0);
948
+ }
949
+
950
+ struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) {
951
+ struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1);
952
+ struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more};
953
+ return result;
954
+ }
955
+
956
+ int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) {
957
+ uint8_t more = denom->more;
958
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
959
+
960
+ if (!denom->magic) {
961
+ uint32_t sign = (int8_t)more >> 7;
962
+ uint32_t mask = (1U << shift) - 1;
963
+ uint32_t uq = numer + ((numer >> 31) & mask);
964
+ int32_t q = (int32_t)uq;
965
+ q >>= shift;
966
+ q = (q ^ sign) - sign;
967
+ return q;
968
+ } else {
969
+ uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer);
970
+ if (more & LIBDIVIDE_ADD_MARKER) {
971
+ // must be arithmetic shift and then sign extend
972
+ int32_t sign = (int8_t)more >> 7;
973
+ // q += (more < 0 ? -numer : numer)
974
+ // cast required to avoid UB
975
+ uq += ((uint32_t)numer ^ sign) - sign;
976
+ }
977
+ int32_t q = (int32_t)uq;
978
+ q >>= shift;
979
+ q += (q < 0);
980
+ return q;
981
+ }
982
+ }
983
+
984
+ int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) {
985
+ uint8_t more = denom->more;
986
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
987
+ // must be arithmetic shift and then sign extend
988
+ int32_t sign = (int8_t)more >> 7;
989
+ int32_t magic = denom->magic;
990
+ int32_t q = libdivide_mullhi_s32(magic, numer);
991
+ q += numer;
992
+
993
+ // If q is non-negative, we have nothing to do
994
+ // If q is negative, we want to add either (2**shift)-1 if d is a power of
995
+ // 2, or (2**shift) if it is not a power of 2
996
+ uint32_t is_power_of_2 = (magic == 0);
997
+ uint32_t q_sign = (uint32_t)(q >> 31);
998
+ q += q_sign & ((1U << shift) - is_power_of_2);
999
+
1000
+ // Now arithmetic right shift
1001
+ q >>= shift;
1002
+ // Negate if needed
1003
+ q = (q ^ sign) - sign;
1004
+
1005
+ return q;
1006
+ }
1007
+
1008
+ int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) {
1009
+ uint8_t more = denom->more;
1010
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1011
+ if (!denom->magic) {
1012
+ uint32_t absD = 1U << shift;
1013
+ if (more & LIBDIVIDE_NEGATIVE_DIVISOR) {
1014
+ absD = -absD;
1015
+ }
1016
+ return (int32_t)absD;
1017
+ } else {
1018
+ // Unsigned math is much easier
1019
+ // We negate the magic number only in the branchfull case, and we don't
1020
+ // know which case we're in. However we have enough information to
1021
+ // determine the correct sign of the magic number. The divisor was
1022
+ // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set,
1023
+ // the magic number's sign is opposite that of the divisor.
1024
+ // We want to compute the positive magic number.
1025
+ int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR);
1026
+ int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER)
1027
+ ? denom->magic > 0 : denom->magic < 0;
1028
+
1029
+ // Handle the power of 2 case (including branchfree)
1030
+ if (denom->magic == 0) {
1031
+ int32_t result = 1U << shift;
1032
+ return negative_divisor ? -result : result;
1033
+ }
1034
+
1035
+ uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic);
1036
+ uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30
1037
+ uint32_t q = (uint32_t)(n / d);
1038
+ int32_t result = (int32_t)q;
1039
+ result += 1;
1040
+ return negative_divisor ? -result : result;
1041
+ }
1042
+ }
1043
+
1044
+ int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) {
1045
+ return libdivide_s32_recover((const struct libdivide_s32_t *)denom);
1046
+ }
1047
+
1048
+ ///////////// SINT64
1049
+
1050
+ static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) {
1051
+ if (d == 0) {
1052
+ LIBDIVIDE_ERROR("divider must be != 0");
1053
+ }
1054
+
1055
+ struct libdivide_s64_t result;
1056
+
1057
+ // If d is a power of 2, or negative a power of 2, we have to use a shift.
1058
+ // This is especially important because the magic algorithm fails for -1.
1059
+ // To check if d is a power of 2 or its inverse, it suffices to check
1060
+ // whether its absolute value has exactly one bit set. This works even for
1061
+ // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set
1062
+ // and is a power of 2.
1063
+ uint64_t ud = (uint64_t)d;
1064
+ uint64_t absD = (d < 0) ? -ud : ud;
1065
+ uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD);
1066
+ // check if exactly one bit is set,
1067
+ // don't care if absD is 0 since that's divide by zero
1068
+ if ((absD & (absD - 1)) == 0) {
1069
+ // Branchfree and non-branchfree cases are the same
1070
+ result.magic = 0;
1071
+ result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
1072
+ } else {
1073
+ // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word
1074
+ // is 0 and the high word is floor_log_2_d - 1
1075
+ uint8_t more;
1076
+ uint64_t rem, proposed_m;
1077
+ proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem);
1078
+ const uint64_t e = absD - rem;
1079
+
1080
+ // We are going to start with a power of floor_log_2_d - 1.
1081
+ // This works if works if e < 2**floor_log_2_d.
1082
+ if (!branchfree && e < (1ULL << floor_log_2_d)) {
1083
+ // This power works
1084
+ more = floor_log_2_d - 1;
1085
+ } else {
1086
+ // We need to go one higher. This should not make proposed_m
1087
+ // overflow, but it will make it negative when interpreted as an
1088
+ // int32_t.
1089
+ proposed_m += proposed_m;
1090
+ const uint64_t twice_rem = rem + rem;
1091
+ if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
1092
+ // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we
1093
+ // also set ADD_MARKER this is an annoying optimization that
1094
+ // enables algorithm #4 to avoid the mask. However we always set it
1095
+ // in the branchfree case
1096
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
1097
+ }
1098
+ proposed_m += 1;
1099
+ int64_t magic = (int64_t)proposed_m;
1100
+
1101
+ // Mark if we are negative
1102
+ if (d < 0) {
1103
+ more |= LIBDIVIDE_NEGATIVE_DIVISOR;
1104
+ if (!branchfree) {
1105
+ magic = -magic;
1106
+ }
1107
+ }
1108
+
1109
+ result.more = more;
1110
+ result.magic = magic;
1111
+ }
1112
+ return result;
1113
+ }
1114
+
1115
+ struct libdivide_s64_t libdivide_s64_gen(int64_t d) {
1116
+ return libdivide_internal_s64_gen(d, 0);
1117
+ }
1118
+
1119
+ struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) {
1120
+ struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1);
1121
+ struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more};
1122
+ return ret;
1123
+ }
1124
+
1125
+ int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) {
1126
+ uint8_t more = denom->more;
1127
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1128
+
1129
+ if (!denom->magic) { // shift path
1130
+ uint64_t mask = (1ULL << shift) - 1;
1131
+ uint64_t uq = numer + ((numer >> 63) & mask);
1132
+ int64_t q = (int64_t)uq;
1133
+ q >>= shift;
1134
+ // must be arithmetic shift and then sign-extend
1135
+ int64_t sign = (int8_t)more >> 7;
1136
+ q = (q ^ sign) - sign;
1137
+ return q;
1138
+ } else {
1139
+ uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer);
1140
+ if (more & LIBDIVIDE_ADD_MARKER) {
1141
+ // must be arithmetic shift and then sign extend
1142
+ int64_t sign = (int8_t)more >> 7;
1143
+ // q += (more < 0 ? -numer : numer)
1144
+ // cast required to avoid UB
1145
+ uq += ((uint64_t)numer ^ sign) - sign;
1146
+ }
1147
+ int64_t q = (int64_t)uq;
1148
+ q >>= shift;
1149
+ q += (q < 0);
1150
+ return q;
1151
+ }
1152
+ }
1153
+
1154
+ int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) {
1155
+ uint8_t more = denom->more;
1156
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1157
+ // must be arithmetic shift and then sign extend
1158
+ int64_t sign = (int8_t)more >> 7;
1159
+ int64_t magic = denom->magic;
1160
+ int64_t q = libdivide_mullhi_s64(magic, numer);
1161
+ q += numer;
1162
+
1163
+ // If q is non-negative, we have nothing to do.
1164
+ // If q is negative, we want to add either (2**shift)-1 if d is a power of
1165
+ // 2, or (2**shift) if it is not a power of 2.
1166
+ uint64_t is_power_of_2 = (magic == 0);
1167
+ uint64_t q_sign = (uint64_t)(q >> 63);
1168
+ q += q_sign & ((1ULL << shift) - is_power_of_2);
1169
+
1170
+ // Arithmetic right shift
1171
+ q >>= shift;
1172
+ // Negate if needed
1173
+ q = (q ^ sign) - sign;
1174
+
1175
+ return q;
1176
+ }
1177
+
1178
+ int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) {
1179
+ uint8_t more = denom->more;
1180
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1181
+ if (denom->magic == 0) { // shift path
1182
+ uint64_t absD = 1ULL << shift;
1183
+ if (more & LIBDIVIDE_NEGATIVE_DIVISOR) {
1184
+ absD = -absD;
1185
+ }
1186
+ return (int64_t)absD;
1187
+ } else {
1188
+ // Unsigned math is much easier
1189
+ int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR);
1190
+ int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER)
1191
+ ? denom->magic > 0 : denom->magic < 0;
1192
+
1193
+ uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic);
1194
+ uint64_t n_hi = 1ULL << shift, n_lo = 0;
1195
+ uint64_t rem_ignored;
1196
+ uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored);
1197
+ int64_t result = (int64_t)(q + 1);
1198
+ if (negative_divisor) {
1199
+ result = -result;
1200
+ }
1201
+ return result;
1202
+ }
1203
+ }
1204
+
1205
+ int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) {
1206
+ return libdivide_s64_recover((const struct libdivide_s64_t *)denom);
1207
+ }
1208
+
1209
+ #if defined(LIBDIVIDE_AVX512)
1210
+
1211
+ static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom);
1212
+ static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom);
1213
+ static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom);
1214
+ static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom);
1215
+
1216
+ static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom);
1217
+ static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom);
1218
+ static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom);
1219
+ static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom);
1220
+
1221
+ //////// Internal Utility Functions
1222
+
1223
+ static inline __m512i libdivide_s64_signbits(__m512i v) {;
1224
+ return _mm512_srai_epi64(v, 63);
1225
+ }
1226
+
1227
+ static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) {
1228
+ return _mm512_srai_epi64(v, amt);
1229
+ }
1230
+
1231
+ // Here, b is assumed to contain one 32-bit value repeated.
1232
+ static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) {
1233
+ __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32);
1234
+ __m512i a1X3X = _mm512_srli_epi64(a, 32);
1235
+ __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0);
1236
+ __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask);
1237
+ return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3);
1238
+ }
1239
+
1240
+ // b is one 32-bit value repeated.
1241
+ static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) {
1242
+ __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32);
1243
+ __m512i a1X3X = _mm512_srli_epi64(a, 32);
1244
+ __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0);
1245
+ __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask);
1246
+ return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3);
1247
+ }
1248
+
1249
+ // Here, y is assumed to contain one 64-bit value repeated.
1250
+ // https://stackoverflow.com/a/28827013
1251
+ static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) {
1252
+ __m512i lomask = _mm512_set1_epi64(0xffffffff);
1253
+ __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1);
1254
+ __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1);
1255
+ __m512i w0 = _mm512_mul_epu32(x, y);
1256
+ __m512i w1 = _mm512_mul_epu32(x, yh);
1257
+ __m512i w2 = _mm512_mul_epu32(xh, y);
1258
+ __m512i w3 = _mm512_mul_epu32(xh, yh);
1259
+ __m512i w0h = _mm512_srli_epi64(w0, 32);
1260
+ __m512i s1 = _mm512_add_epi64(w1, w0h);
1261
+ __m512i s1l = _mm512_and_si512(s1, lomask);
1262
+ __m512i s1h = _mm512_srli_epi64(s1, 32);
1263
+ __m512i s2 = _mm512_add_epi64(w2, s1l);
1264
+ __m512i s2h = _mm512_srli_epi64(s2, 32);
1265
+ __m512i hi = _mm512_add_epi64(w3, s1h);
1266
+ hi = _mm512_add_epi64(hi, s2h);
1267
+
1268
+ return hi;
1269
+ }
1270
+
1271
+ // y is one 64-bit value repeated.
1272
+ static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) {
1273
+ __m512i p = libdivide_mullhi_u64_vector(x, y);
1274
+ __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y);
1275
+ __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x);
1276
+ p = _mm512_sub_epi64(p, t1);
1277
+ p = _mm512_sub_epi64(p, t2);
1278
+ return p;
1279
+ }
1280
+
1281
+ ////////// UINT32
1282
+
1283
+ __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) {
1284
+ uint8_t more = denom->more;
1285
+ if (!denom->magic) {
1286
+ return _mm512_srli_epi32(numers, more);
1287
+ }
1288
+ else {
1289
+ __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic));
1290
+ if (more & LIBDIVIDE_ADD_MARKER) {
1291
+ // uint32_t t = ((numer - q) >> 1) + q;
1292
+ // return t >> denom->shift;
1293
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1294
+ __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q);
1295
+ return _mm512_srli_epi32(t, shift);
1296
+ }
1297
+ else {
1298
+ return _mm512_srli_epi32(q, more);
1299
+ }
1300
+ }
1301
+ }
1302
+
1303
+ __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) {
1304
+ __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic));
1305
+ __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q);
1306
+ return _mm512_srli_epi32(t, denom->more);
1307
+ }
1308
+
1309
+ ////////// UINT64
1310
+
1311
+ __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) {
1312
+ uint8_t more = denom->more;
1313
+ if (!denom->magic) {
1314
+ return _mm512_srli_epi64(numers, more);
1315
+ }
1316
+ else {
1317
+ __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic));
1318
+ if (more & LIBDIVIDE_ADD_MARKER) {
1319
+ // uint32_t t = ((numer - q) >> 1) + q;
1320
+ // return t >> denom->shift;
1321
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1322
+ __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q);
1323
+ return _mm512_srli_epi64(t, shift);
1324
+ }
1325
+ else {
1326
+ return _mm512_srli_epi64(q, more);
1327
+ }
1328
+ }
1329
+ }
1330
+
1331
+ __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) {
1332
+ __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic));
1333
+ __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q);
1334
+ return _mm512_srli_epi64(t, denom->more);
1335
+ }
1336
+
1337
+ ////////// SINT32
1338
+
1339
+ __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) {
1340
+ uint8_t more = denom->more;
1341
+ if (!denom->magic) {
1342
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1343
+ uint32_t mask = (1U << shift) - 1;
1344
+ __m512i roundToZeroTweak = _mm512_set1_epi32(mask);
1345
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
1346
+ __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak));
1347
+ q = _mm512_srai_epi32(q, shift);
1348
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1349
+ // q = (q ^ sign) - sign;
1350
+ q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign);
1351
+ return q;
1352
+ }
1353
+ else {
1354
+ __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic));
1355
+ if (more & LIBDIVIDE_ADD_MARKER) {
1356
+ // must be arithmetic shift
1357
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1358
+ // q += ((numer ^ sign) - sign);
1359
+ q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign));
1360
+ }
1361
+ // q >>= shift
1362
+ q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
1363
+ q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0)
1364
+ return q;
1365
+ }
1366
+ }
1367
+
1368
+ __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) {
1369
+ int32_t magic = denom->magic;
1370
+ uint8_t more = denom->more;
1371
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1372
+ // must be arithmetic shift
1373
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1374
+ __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic));
1375
+ q = _mm512_add_epi32(q, numers); // q += numers
1376
+
1377
+ // If q is non-negative, we have nothing to do
1378
+ // If q is negative, we want to add either (2**shift)-1 if d is
1379
+ // a power of 2, or (2**shift) if it is not a power of 2
1380
+ uint32_t is_power_of_2 = (magic == 0);
1381
+ __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31
1382
+ __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2);
1383
+ q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask)
1384
+ q = _mm512_srai_epi32(q, shift); // q >>= shift
1385
+ q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign
1386
+ return q;
1387
+ }
1388
+
1389
+ ////////// SINT64
1390
+
1391
+ __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) {
1392
+ uint8_t more = denom->more;
1393
+ int64_t magic = denom->magic;
1394
+ if (magic == 0) { // shift path
1395
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1396
+ uint64_t mask = (1ULL << shift) - 1;
1397
+ __m512i roundToZeroTweak = _mm512_set1_epi64(mask);
1398
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
1399
+ __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak));
1400
+ q = libdivide_s64_shift_right_vector(q, shift);
1401
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1402
+ // q = (q ^ sign) - sign;
1403
+ q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign);
1404
+ return q;
1405
+ }
1406
+ else {
1407
+ __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic));
1408
+ if (more & LIBDIVIDE_ADD_MARKER) {
1409
+ // must be arithmetic shift
1410
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1411
+ // q += ((numer ^ sign) - sign);
1412
+ q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign));
1413
+ }
1414
+ // q >>= denom->mult_path.shift
1415
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
1416
+ q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0)
1417
+ return q;
1418
+ }
1419
+ }
1420
+
1421
+ __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) {
1422
+ int64_t magic = denom->magic;
1423
+ uint8_t more = denom->more;
1424
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1425
+ // must be arithmetic shift
1426
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
1427
+
1428
+ // libdivide_mullhi_s64(numers, magic);
1429
+ __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic));
1430
+ q = _mm512_add_epi64(q, numers); // q += numers
1431
+
1432
+ // If q is non-negative, we have nothing to do.
1433
+ // If q is negative, we want to add either (2**shift)-1 if d is
1434
+ // a power of 2, or (2**shift) if it is not a power of 2.
1435
+ uint32_t is_power_of_2 = (magic == 0);
1436
+ __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
1437
+ __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2);
1438
+ q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask)
1439
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
1440
+ q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign
1441
+ return q;
1442
+ }
1443
+
1444
+ #elif defined(LIBDIVIDE_AVX2)
1445
+
1446
+ static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom);
1447
+ static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom);
1448
+ static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom);
1449
+ static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom);
1450
+
1451
+ static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom);
1452
+ static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom);
1453
+ static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom);
1454
+ static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom);
1455
+
1456
+ //////// Internal Utility Functions
1457
+
1458
+ // Implementation of _mm256_srai_epi64(v, 63) (from AVX512).
1459
+ static inline __m256i libdivide_s64_signbits(__m256i v) {
1460
+ __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
1461
+ __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31);
1462
+ return signBits;
1463
+ }
1464
+
1465
+ // Implementation of _mm256_srai_epi64 (from AVX512).
1466
+ static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) {
1467
+ const int b = 64 - amt;
1468
+ __m256i m = _mm256_set1_epi64x(1ULL << (b - 1));
1469
+ __m256i x = _mm256_srli_epi64(v, amt);
1470
+ __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m);
1471
+ return result;
1472
+ }
1473
+
1474
+ // Here, b is assumed to contain one 32-bit value repeated.
1475
+ static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) {
1476
+ __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32);
1477
+ __m256i a1X3X = _mm256_srli_epi64(a, 32);
1478
+ __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0);
1479
+ __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask);
1480
+ return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3);
1481
+ }
1482
+
1483
+ // b is one 32-bit value repeated.
1484
+ static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) {
1485
+ __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32);
1486
+ __m256i a1X3X = _mm256_srli_epi64(a, 32);
1487
+ __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0);
1488
+ __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask);
1489
+ return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3);
1490
+ }
1491
+
1492
+ // Here, y is assumed to contain one 64-bit value repeated.
1493
+ // https://stackoverflow.com/a/28827013
1494
+ static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) {
1495
+ __m256i lomask = _mm256_set1_epi64x(0xffffffff);
1496
+ __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h
1497
+ __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h
1498
+ __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l
1499
+ __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h
1500
+ __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l
1501
+ __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h
1502
+ __m256i w0h = _mm256_srli_epi64(w0, 32);
1503
+ __m256i s1 = _mm256_add_epi64(w1, w0h);
1504
+ __m256i s1l = _mm256_and_si256(s1, lomask);
1505
+ __m256i s1h = _mm256_srli_epi64(s1, 32);
1506
+ __m256i s2 = _mm256_add_epi64(w2, s1l);
1507
+ __m256i s2h = _mm256_srli_epi64(s2, 32);
1508
+ __m256i hi = _mm256_add_epi64(w3, s1h);
1509
+ hi = _mm256_add_epi64(hi, s2h);
1510
+
1511
+ return hi;
1512
+ }
1513
+
1514
+ // y is one 64-bit value repeated.
1515
+ static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) {
1516
+ __m256i p = libdivide_mullhi_u64_vector(x, y);
1517
+ __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y);
1518
+ __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x);
1519
+ p = _mm256_sub_epi64(p, t1);
1520
+ p = _mm256_sub_epi64(p, t2);
1521
+ return p;
1522
+ }
1523
+
1524
+ ////////// UINT32
1525
+
1526
+ __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) {
1527
+ uint8_t more = denom->more;
1528
+ if (!denom->magic) {
1529
+ return _mm256_srli_epi32(numers, more);
1530
+ }
1531
+ else {
1532
+ __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic));
1533
+ if (more & LIBDIVIDE_ADD_MARKER) {
1534
+ // uint32_t t = ((numer - q) >> 1) + q;
1535
+ // return t >> denom->shift;
1536
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1537
+ __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q);
1538
+ return _mm256_srli_epi32(t, shift);
1539
+ }
1540
+ else {
1541
+ return _mm256_srli_epi32(q, more);
1542
+ }
1543
+ }
1544
+ }
1545
+
1546
+ __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) {
1547
+ __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic));
1548
+ __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q);
1549
+ return _mm256_srli_epi32(t, denom->more);
1550
+ }
1551
+
1552
+ ////////// UINT64
1553
+
1554
+ __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) {
1555
+ uint8_t more = denom->more;
1556
+ if (!denom->magic) {
1557
+ return _mm256_srli_epi64(numers, more);
1558
+ }
1559
+ else {
1560
+ __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic));
1561
+ if (more & LIBDIVIDE_ADD_MARKER) {
1562
+ // uint32_t t = ((numer - q) >> 1) + q;
1563
+ // return t >> denom->shift;
1564
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1565
+ __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q);
1566
+ return _mm256_srli_epi64(t, shift);
1567
+ }
1568
+ else {
1569
+ return _mm256_srli_epi64(q, more);
1570
+ }
1571
+ }
1572
+ }
1573
+
1574
+ __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) {
1575
+ __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic));
1576
+ __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q);
1577
+ return _mm256_srli_epi64(t, denom->more);
1578
+ }
1579
+
1580
+ ////////// SINT32
1581
+
1582
+ __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) {
1583
+ uint8_t more = denom->more;
1584
+ if (!denom->magic) {
1585
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1586
+ uint32_t mask = (1U << shift) - 1;
1587
+ __m256i roundToZeroTweak = _mm256_set1_epi32(mask);
1588
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
1589
+ __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak));
1590
+ q = _mm256_srai_epi32(q, shift);
1591
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1592
+ // q = (q ^ sign) - sign;
1593
+ q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign);
1594
+ return q;
1595
+ }
1596
+ else {
1597
+ __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic));
1598
+ if (more & LIBDIVIDE_ADD_MARKER) {
1599
+ // must be arithmetic shift
1600
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1601
+ // q += ((numer ^ sign) - sign);
1602
+ q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign));
1603
+ }
1604
+ // q >>= shift
1605
+ q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
1606
+ q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0)
1607
+ return q;
1608
+ }
1609
+ }
1610
+
1611
+ __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) {
1612
+ int32_t magic = denom->magic;
1613
+ uint8_t more = denom->more;
1614
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1615
+ // must be arithmetic shift
1616
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1617
+ __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic));
1618
+ q = _mm256_add_epi32(q, numers); // q += numers
1619
+
1620
+ // If q is non-negative, we have nothing to do
1621
+ // If q is negative, we want to add either (2**shift)-1 if d is
1622
+ // a power of 2, or (2**shift) if it is not a power of 2
1623
+ uint32_t is_power_of_2 = (magic == 0);
1624
+ __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31
1625
+ __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2);
1626
+ q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask)
1627
+ q = _mm256_srai_epi32(q, shift); // q >>= shift
1628
+ q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign
1629
+ return q;
1630
+ }
1631
+
1632
+ ////////// SINT64
1633
+
1634
+ __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) {
1635
+ uint8_t more = denom->more;
1636
+ int64_t magic = denom->magic;
1637
+ if (magic == 0) { // shift path
1638
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1639
+ uint64_t mask = (1ULL << shift) - 1;
1640
+ __m256i roundToZeroTweak = _mm256_set1_epi64x(mask);
1641
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
1642
+ __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak));
1643
+ q = libdivide_s64_shift_right_vector(q, shift);
1644
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1645
+ // q = (q ^ sign) - sign;
1646
+ q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign);
1647
+ return q;
1648
+ }
1649
+ else {
1650
+ __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic));
1651
+ if (more & LIBDIVIDE_ADD_MARKER) {
1652
+ // must be arithmetic shift
1653
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1654
+ // q += ((numer ^ sign) - sign);
1655
+ q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign));
1656
+ }
1657
+ // q >>= denom->mult_path.shift
1658
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
1659
+ q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0)
1660
+ return q;
1661
+ }
1662
+ }
1663
+
1664
+ __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) {
1665
+ int64_t magic = denom->magic;
1666
+ uint8_t more = denom->more;
1667
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1668
+ // must be arithmetic shift
1669
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
1670
+
1671
+ // libdivide_mullhi_s64(numers, magic);
1672
+ __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic));
1673
+ q = _mm256_add_epi64(q, numers); // q += numers
1674
+
1675
+ // If q is non-negative, we have nothing to do.
1676
+ // If q is negative, we want to add either (2**shift)-1 if d is
1677
+ // a power of 2, or (2**shift) if it is not a power of 2.
1678
+ uint32_t is_power_of_2 = (magic == 0);
1679
+ __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
1680
+ __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2);
1681
+ q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask)
1682
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
1683
+ q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign
1684
+ return q;
1685
+ }
1686
+
1687
+ #elif defined(LIBDIVIDE_SSE2)
1688
+
1689
+ static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom);
1690
+ static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom);
1691
+ static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom);
1692
+ static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom);
1693
+
1694
+ static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom);
1695
+ static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom);
1696
+ static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom);
1697
+ static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom);
1698
+
1699
+ //////// Internal Utility Functions
1700
+
1701
+ // Implementation of _mm_srai_epi64(v, 63) (from AVX512).
1702
+ static inline __m128i libdivide_s64_signbits(__m128i v) {
1703
+ __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
1704
+ __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31);
1705
+ return signBits;
1706
+ }
1707
+
1708
+ // Implementation of _mm_srai_epi64 (from AVX512).
1709
+ static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) {
1710
+ const int b = 64 - amt;
1711
+ __m128i m = _mm_set1_epi64x(1ULL << (b - 1));
1712
+ __m128i x = _mm_srli_epi64(v, amt);
1713
+ __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m);
1714
+ return result;
1715
+ }
1716
+
1717
+ // Here, b is assumed to contain one 32-bit value repeated.
1718
+ static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) {
1719
+ __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32);
1720
+ __m128i a1X3X = _mm_srli_epi64(a, 32);
1721
+ __m128i mask = _mm_set_epi32(-1, 0, -1, 0);
1722
+ __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask);
1723
+ return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3);
1724
+ }
1725
+
1726
+ // SSE2 does not have a signed multiplication instruction, but we can convert
1727
+ // unsigned to signed pretty efficiently. Again, b is just a 32 bit value
1728
+ // repeated four times.
1729
+ static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) {
1730
+ __m128i p = libdivide_mullhi_u32_vector(a, b);
1731
+ // t1 = (a >> 31) & y, arithmetic shift
1732
+ __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b);
1733
+ __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a);
1734
+ p = _mm_sub_epi32(p, t1);
1735
+ p = _mm_sub_epi32(p, t2);
1736
+ return p;
1737
+ }
1738
+
1739
+ // Here, y is assumed to contain one 64-bit value repeated.
1740
+ // https://stackoverflow.com/a/28827013
1741
+ static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) {
1742
+ __m128i lomask = _mm_set1_epi64x(0xffffffff);
1743
+ __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h
1744
+ __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h
1745
+ __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l
1746
+ __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h
1747
+ __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l
1748
+ __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h
1749
+ __m128i w0h = _mm_srli_epi64(w0, 32);
1750
+ __m128i s1 = _mm_add_epi64(w1, w0h);
1751
+ __m128i s1l = _mm_and_si128(s1, lomask);
1752
+ __m128i s1h = _mm_srli_epi64(s1, 32);
1753
+ __m128i s2 = _mm_add_epi64(w2, s1l);
1754
+ __m128i s2h = _mm_srli_epi64(s2, 32);
1755
+ __m128i hi = _mm_add_epi64(w3, s1h);
1756
+ hi = _mm_add_epi64(hi, s2h);
1757
+
1758
+ return hi;
1759
+ }
1760
+
1761
+ // y is one 64-bit value repeated.
1762
+ static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) {
1763
+ __m128i p = libdivide_mullhi_u64_vector(x, y);
1764
+ __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y);
1765
+ __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x);
1766
+ p = _mm_sub_epi64(p, t1);
1767
+ p = _mm_sub_epi64(p, t2);
1768
+ return p;
1769
+ }
1770
+
1771
+ ////////// UINT32
1772
+
1773
+ __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) {
1774
+ uint8_t more = denom->more;
1775
+ if (!denom->magic) {
1776
+ return _mm_srli_epi32(numers, more);
1777
+ }
1778
+ else {
1779
+ __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic));
1780
+ if (more & LIBDIVIDE_ADD_MARKER) {
1781
+ // uint32_t t = ((numer - q) >> 1) + q;
1782
+ // return t >> denom->shift;
1783
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1784
+ __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
1785
+ return _mm_srli_epi32(t, shift);
1786
+ }
1787
+ else {
1788
+ return _mm_srli_epi32(q, more);
1789
+ }
1790
+ }
1791
+ }
1792
+
1793
+ __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) {
1794
+ __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic));
1795
+ __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
1796
+ return _mm_srli_epi32(t, denom->more);
1797
+ }
1798
+
1799
+ ////////// UINT64
1800
+
1801
+ __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) {
1802
+ uint8_t more = denom->more;
1803
+ if (!denom->magic) {
1804
+ return _mm_srli_epi64(numers, more);
1805
+ }
1806
+ else {
1807
+ __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic));
1808
+ if (more & LIBDIVIDE_ADD_MARKER) {
1809
+ // uint32_t t = ((numer - q) >> 1) + q;
1810
+ // return t >> denom->shift;
1811
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1812
+ __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
1813
+ return _mm_srli_epi64(t, shift);
1814
+ }
1815
+ else {
1816
+ return _mm_srli_epi64(q, more);
1817
+ }
1818
+ }
1819
+ }
1820
+
1821
+ __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) {
1822
+ __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic));
1823
+ __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
1824
+ return _mm_srli_epi64(t, denom->more);
1825
+ }
1826
+
1827
+ ////////// SINT32
1828
+
1829
+ __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) {
1830
+ uint8_t more = denom->more;
1831
+ if (!denom->magic) {
1832
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1833
+ uint32_t mask = (1U << shift) - 1;
1834
+ __m128i roundToZeroTweak = _mm_set1_epi32(mask);
1835
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
1836
+ __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak));
1837
+ q = _mm_srai_epi32(q, shift);
1838
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1839
+ // q = (q ^ sign) - sign;
1840
+ q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign);
1841
+ return q;
1842
+ }
1843
+ else {
1844
+ __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic));
1845
+ if (more & LIBDIVIDE_ADD_MARKER) {
1846
+ // must be arithmetic shift
1847
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1848
+ // q += ((numer ^ sign) - sign);
1849
+ q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign));
1850
+ }
1851
+ // q >>= shift
1852
+ q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
1853
+ q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0)
1854
+ return q;
1855
+ }
1856
+ }
1857
+
1858
+ __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) {
1859
+ int32_t magic = denom->magic;
1860
+ uint8_t more = denom->more;
1861
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
1862
+ // must be arithmetic shift
1863
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1864
+ __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic));
1865
+ q = _mm_add_epi32(q, numers); // q += numers
1866
+
1867
+ // If q is non-negative, we have nothing to do
1868
+ // If q is negative, we want to add either (2**shift)-1 if d is
1869
+ // a power of 2, or (2**shift) if it is not a power of 2
1870
+ uint32_t is_power_of_2 = (magic == 0);
1871
+ __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31
1872
+ __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2);
1873
+ q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask)
1874
+ q = _mm_srai_epi32(q, shift); // q >>= shift
1875
+ q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign
1876
+ return q;
1877
+ }
1878
+
1879
+ ////////// SINT64
1880
+
1881
+ __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) {
1882
+ uint8_t more = denom->more;
1883
+ int64_t magic = denom->magic;
1884
+ if (magic == 0) { // shift path
1885
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1886
+ uint64_t mask = (1ULL << shift) - 1;
1887
+ __m128i roundToZeroTweak = _mm_set1_epi64x(mask);
1888
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
1889
+ __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak));
1890
+ q = libdivide_s64_shift_right_vector(q, shift);
1891
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1892
+ // q = (q ^ sign) - sign;
1893
+ q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign);
1894
+ return q;
1895
+ }
1896
+ else {
1897
+ __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic));
1898
+ if (more & LIBDIVIDE_ADD_MARKER) {
1899
+ // must be arithmetic shift
1900
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1901
+ // q += ((numer ^ sign) - sign);
1902
+ q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign));
1903
+ }
1904
+ // q >>= denom->mult_path.shift
1905
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
1906
+ q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0)
1907
+ return q;
1908
+ }
1909
+ }
1910
+
1911
+ __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) {
1912
+ int64_t magic = denom->magic;
1913
+ uint8_t more = denom->more;
1914
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
1915
+ // must be arithmetic shift
1916
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
1917
+
1918
+ // libdivide_mullhi_s64(numers, magic);
1919
+ __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic));
1920
+ q = _mm_add_epi64(q, numers); // q += numers
1921
+
1922
+ // If q is non-negative, we have nothing to do.
1923
+ // If q is negative, we want to add either (2**shift)-1 if d is
1924
+ // a power of 2, or (2**shift) if it is not a power of 2.
1925
+ uint32_t is_power_of_2 = (magic == 0);
1926
+ __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
1927
+ __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2);
1928
+ q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask)
1929
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
1930
+ q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign
1931
+ return q;
1932
+ }
1933
+
1934
+ #endif
1935
+
1936
+ /////////// C++ stuff
1937
+
1938
+ #ifdef __cplusplus
1939
+
1940
+ // The C++ divider class is templated on both an integer type
1941
+ // (like uint64_t) and an algorithm type.
1942
+ // * BRANCHFULL is the default algorithm type.
1943
+ // * BRANCHFREE is the branchfree algorithm type.
1944
+ enum {
1945
+ BRANCHFULL,
1946
+ BRANCHFREE
1947
+ };
1948
+
1949
+ #if defined(LIBDIVIDE_AVX512)
1950
+ #define LIBDIVIDE_VECTOR_TYPE __m512i
1951
+ #elif defined(LIBDIVIDE_AVX2)
1952
+ #define LIBDIVIDE_VECTOR_TYPE __m256i
1953
+ #elif defined(LIBDIVIDE_SSE2)
1954
+ #define LIBDIVIDE_VECTOR_TYPE __m128i
1955
+ #endif
1956
+
1957
+ #if !defined(LIBDIVIDE_VECTOR_TYPE)
1958
+ #define LIBDIVIDE_DIVIDE_VECTOR(ALGO)
1959
+ #else
1960
+ #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \
1961
+ LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \
1962
+ return libdivide_##ALGO##_do_vector(n, &denom); \
1963
+ }
1964
+ #endif
1965
+
1966
+ // The DISPATCHER_GEN() macro generates C++ methods (for the given integer
1967
+ // and algorithm types) that redirect to libdivide's C API.
1968
+ #define DISPATCHER_GEN(T, ALGO) \
1969
+ libdivide_##ALGO##_t denom; \
1970
+ dispatcher() { } \
1971
+ dispatcher(T d) \
1972
+ : denom(libdivide_##ALGO##_gen(d)) \
1973
+ { } \
1974
+ T divide(T n) const { \
1975
+ return libdivide_##ALGO##_do(n, &denom); \
1976
+ } \
1977
+ LIBDIVIDE_DIVIDE_VECTOR(ALGO) \
1978
+ T recover() const { \
1979
+ return libdivide_##ALGO##_recover(&denom); \
1980
+ }
1981
+
1982
+ // The dispatcher selects a specific division algorithm for a given
1983
+ // type and ALGO using partial template specialization.
1984
+ template<bool IS_INTEGRAL, bool IS_SIGNED, int SIZEOF, int ALGO> struct dispatcher { };
1985
+
1986
+ template<> struct dispatcher<true, true, sizeof(int32_t), BRANCHFULL> { DISPATCHER_GEN(int32_t, s32) };
1987
+ template<> struct dispatcher<true, true, sizeof(int32_t), BRANCHFREE> { DISPATCHER_GEN(int32_t, s32_branchfree) };
1988
+ template<> struct dispatcher<true, false, sizeof(uint32_t), BRANCHFULL> { DISPATCHER_GEN(uint32_t, u32) };
1989
+ template<> struct dispatcher<true, false, sizeof(uint32_t), BRANCHFREE> { DISPATCHER_GEN(uint32_t, u32_branchfree) };
1990
+ template<> struct dispatcher<true, true, sizeof(int64_t), BRANCHFULL> { DISPATCHER_GEN(int64_t, s64) };
1991
+ template<> struct dispatcher<true, true, sizeof(int64_t), BRANCHFREE> { DISPATCHER_GEN(int64_t, s64_branchfree) };
1992
+ template<> struct dispatcher<true, false, sizeof(uint64_t), BRANCHFULL> { DISPATCHER_GEN(uint64_t, u64) };
1993
+ template<> struct dispatcher<true, false, sizeof(uint64_t), BRANCHFREE> { DISPATCHER_GEN(uint64_t, u64_branchfree) };
1994
+
1995
+ // This is the main divider class for use by the user (C++ API).
1996
+ // The actual division algorithm is selected using the dispatcher struct
1997
+ // based on the integer and algorithm template parameters.
1998
+ template<typename T, int ALGO = BRANCHFULL>
1999
+ class divider {
2000
+ public:
2001
+ // We leave the default constructor empty so that creating
2002
+ // an array of dividers and then initializing them
2003
+ // later doesn't slow us down.
2004
+ divider() { }
2005
+
2006
+ // Constructor that takes the divisor as a parameter
2007
+ divider(T d) : div(d) { }
2008
+
2009
+ // Divides n by the divisor
2010
+ T divide(T n) const {
2011
+ return div.divide(n);
2012
+ }
2013
+
2014
+ // Recovers the divisor, returns the value that was
2015
+ // used to initialize this divider object.
2016
+ T recover() const {
2017
+ return div.recover();
2018
+ }
2019
+
2020
+ bool operator==(const divider<T, ALGO>& other) const {
2021
+ return div.denom.magic == other.denom.magic &&
2022
+ div.denom.more == other.denom.more;
2023
+ }
2024
+
2025
+ bool operator!=(const divider<T, ALGO>& other) const {
2026
+ return !(*this == other);
2027
+ }
2028
+
2029
+ #if defined(LIBDIVIDE_VECTOR_TYPE)
2030
+ // Treats the vector as packed integer values with the same type as
2031
+ // the divider (e.g. s32, u32, s64, u64) and divides each of
2032
+ // them by the divider, returning the packed quotients.
2033
+ LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const {
2034
+ return div.divide(n);
2035
+ }
2036
+ #endif
2037
+
2038
+ private:
2039
+ // Storage for the actual divisor
2040
+ dispatcher<std::is_integral<T>::value,
2041
+ std::is_signed<T>::value, sizeof(T), ALGO> div;
2042
+ };
2043
+
2044
+ // Overload of operator / for scalar division
2045
+ template<typename T, int ALGO>
2046
+ T operator/(T n, const divider<T, ALGO>& div) {
2047
+ return div.divide(n);
2048
+ }
2049
+
2050
+ // Overload of operator /= for scalar division
2051
+ template<typename T, int ALGO>
2052
+ T& operator/=(T& n, const divider<T, ALGO>& div) {
2053
+ n = div.divide(n);
2054
+ return n;
2055
+ }
2056
+
2057
+ #if defined(LIBDIVIDE_VECTOR_TYPE)
2058
+ // Overload of operator / for vector division
2059
+ template<typename T, int ALGO>
2060
+ LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider<T, ALGO>& div) {
2061
+ return div.divide(n);
2062
+ }
2063
+ // Overload of operator /= for vector division
2064
+ template<typename T, int ALGO>
2065
+ LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider<T, ALGO>& div) {
2066
+ n = div.divide(n);
2067
+ return n;
2068
+ }
2069
+ #endif
2070
+
2071
+ // libdivdie::branchfree_divider<T>
2072
+ template <typename T>
2073
+ using branchfree_divider = divider<T, BRANCHFREE>;
2074
+
2075
+ } // namespace libdivide
2076
+
2077
+ #endif // __cplusplus
2078
+
2079
+ #endif // NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_