scipy 1.16.2__cp313-cp313-win_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1530) hide show
  1. scipy/__config__.py +161 -0
  2. scipy/__init__.py +150 -0
  3. scipy/_cyutility.cp313-win_arm64.lib +0 -0
  4. scipy/_cyutility.cp313-win_arm64.pyd +0 -0
  5. scipy/_distributor_init.py +18 -0
  6. scipy/_lib/__init__.py +14 -0
  7. scipy/_lib/_array_api.py +931 -0
  8. scipy/_lib/_array_api_compat_vendor.py +9 -0
  9. scipy/_lib/_array_api_no_0d.py +103 -0
  10. scipy/_lib/_bunch.py +229 -0
  11. scipy/_lib/_ccallback.py +251 -0
  12. scipy/_lib/_ccallback_c.cp313-win_arm64.lib +0 -0
  13. scipy/_lib/_ccallback_c.cp313-win_arm64.pyd +0 -0
  14. scipy/_lib/_disjoint_set.py +254 -0
  15. scipy/_lib/_docscrape.py +761 -0
  16. scipy/_lib/_elementwise_iterative_method.py +346 -0
  17. scipy/_lib/_fpumode.cp313-win_arm64.lib +0 -0
  18. scipy/_lib/_fpumode.cp313-win_arm64.pyd +0 -0
  19. scipy/_lib/_gcutils.py +105 -0
  20. scipy/_lib/_pep440.py +487 -0
  21. scipy/_lib/_sparse.py +41 -0
  22. scipy/_lib/_test_ccallback.cp313-win_arm64.lib +0 -0
  23. scipy/_lib/_test_ccallback.cp313-win_arm64.pyd +0 -0
  24. scipy/_lib/_test_deprecation_call.cp313-win_arm64.lib +0 -0
  25. scipy/_lib/_test_deprecation_call.cp313-win_arm64.pyd +0 -0
  26. scipy/_lib/_test_deprecation_def.cp313-win_arm64.lib +0 -0
  27. scipy/_lib/_test_deprecation_def.cp313-win_arm64.pyd +0 -0
  28. scipy/_lib/_testutils.py +373 -0
  29. scipy/_lib/_threadsafety.py +58 -0
  30. scipy/_lib/_tmpdirs.py +86 -0
  31. scipy/_lib/_uarray/LICENSE +29 -0
  32. scipy/_lib/_uarray/__init__.py +116 -0
  33. scipy/_lib/_uarray/_backend.py +707 -0
  34. scipy/_lib/_uarray/_uarray.cp313-win_arm64.lib +0 -0
  35. scipy/_lib/_uarray/_uarray.cp313-win_arm64.pyd +0 -0
  36. scipy/_lib/_util.py +1283 -0
  37. scipy/_lib/array_api_compat/__init__.py +22 -0
  38. scipy/_lib/array_api_compat/_internal.py +59 -0
  39. scipy/_lib/array_api_compat/common/__init__.py +1 -0
  40. scipy/_lib/array_api_compat/common/_aliases.py +727 -0
  41. scipy/_lib/array_api_compat/common/_fft.py +213 -0
  42. scipy/_lib/array_api_compat/common/_helpers.py +1058 -0
  43. scipy/_lib/array_api_compat/common/_linalg.py +232 -0
  44. scipy/_lib/array_api_compat/common/_typing.py +192 -0
  45. scipy/_lib/array_api_compat/cupy/__init__.py +13 -0
  46. scipy/_lib/array_api_compat/cupy/_aliases.py +156 -0
  47. scipy/_lib/array_api_compat/cupy/_info.py +336 -0
  48. scipy/_lib/array_api_compat/cupy/_typing.py +31 -0
  49. scipy/_lib/array_api_compat/cupy/fft.py +36 -0
  50. scipy/_lib/array_api_compat/cupy/linalg.py +49 -0
  51. scipy/_lib/array_api_compat/dask/__init__.py +0 -0
  52. scipy/_lib/array_api_compat/dask/array/__init__.py +12 -0
  53. scipy/_lib/array_api_compat/dask/array/_aliases.py +376 -0
  54. scipy/_lib/array_api_compat/dask/array/_info.py +416 -0
  55. scipy/_lib/array_api_compat/dask/array/fft.py +21 -0
  56. scipy/_lib/array_api_compat/dask/array/linalg.py +72 -0
  57. scipy/_lib/array_api_compat/numpy/__init__.py +28 -0
  58. scipy/_lib/array_api_compat/numpy/_aliases.py +190 -0
  59. scipy/_lib/array_api_compat/numpy/_info.py +366 -0
  60. scipy/_lib/array_api_compat/numpy/_typing.py +30 -0
  61. scipy/_lib/array_api_compat/numpy/fft.py +35 -0
  62. scipy/_lib/array_api_compat/numpy/linalg.py +143 -0
  63. scipy/_lib/array_api_compat/torch/__init__.py +22 -0
  64. scipy/_lib/array_api_compat/torch/_aliases.py +855 -0
  65. scipy/_lib/array_api_compat/torch/_info.py +369 -0
  66. scipy/_lib/array_api_compat/torch/_typing.py +3 -0
  67. scipy/_lib/array_api_compat/torch/fft.py +85 -0
  68. scipy/_lib/array_api_compat/torch/linalg.py +121 -0
  69. scipy/_lib/array_api_extra/__init__.py +38 -0
  70. scipy/_lib/array_api_extra/_delegation.py +171 -0
  71. scipy/_lib/array_api_extra/_lib/__init__.py +1 -0
  72. scipy/_lib/array_api_extra/_lib/_at.py +463 -0
  73. scipy/_lib/array_api_extra/_lib/_backends.py +46 -0
  74. scipy/_lib/array_api_extra/_lib/_funcs.py +937 -0
  75. scipy/_lib/array_api_extra/_lib/_lazy.py +357 -0
  76. scipy/_lib/array_api_extra/_lib/_testing.py +278 -0
  77. scipy/_lib/array_api_extra/_lib/_utils/__init__.py +1 -0
  78. scipy/_lib/array_api_extra/_lib/_utils/_compat.py +74 -0
  79. scipy/_lib/array_api_extra/_lib/_utils/_compat.pyi +45 -0
  80. scipy/_lib/array_api_extra/_lib/_utils/_helpers.py +559 -0
  81. scipy/_lib/array_api_extra/_lib/_utils/_typing.py +10 -0
  82. scipy/_lib/array_api_extra/_lib/_utils/_typing.pyi +105 -0
  83. scipy/_lib/array_api_extra/testing.py +359 -0
  84. scipy/_lib/cobyqa/__init__.py +20 -0
  85. scipy/_lib/cobyqa/framework.py +1240 -0
  86. scipy/_lib/cobyqa/main.py +1506 -0
  87. scipy/_lib/cobyqa/models.py +1529 -0
  88. scipy/_lib/cobyqa/problem.py +1296 -0
  89. scipy/_lib/cobyqa/settings.py +132 -0
  90. scipy/_lib/cobyqa/subsolvers/__init__.py +14 -0
  91. scipy/_lib/cobyqa/subsolvers/geometry.py +387 -0
  92. scipy/_lib/cobyqa/subsolvers/optim.py +1203 -0
  93. scipy/_lib/cobyqa/utils/__init__.py +18 -0
  94. scipy/_lib/cobyqa/utils/exceptions.py +22 -0
  95. scipy/_lib/cobyqa/utils/math.py +77 -0
  96. scipy/_lib/cobyqa/utils/versions.py +67 -0
  97. scipy/_lib/decorator.py +399 -0
  98. scipy/_lib/deprecation.py +274 -0
  99. scipy/_lib/doccer.py +366 -0
  100. scipy/_lib/messagestream.cp313-win_arm64.lib +0 -0
  101. scipy/_lib/messagestream.cp313-win_arm64.pyd +0 -0
  102. scipy/_lib/pyprima/__init__.py +212 -0
  103. scipy/_lib/pyprima/cobyla/__init__.py +0 -0
  104. scipy/_lib/pyprima/cobyla/cobyla.py +559 -0
  105. scipy/_lib/pyprima/cobyla/cobylb.py +714 -0
  106. scipy/_lib/pyprima/cobyla/geometry.py +226 -0
  107. scipy/_lib/pyprima/cobyla/initialize.py +215 -0
  108. scipy/_lib/pyprima/cobyla/trustregion.py +492 -0
  109. scipy/_lib/pyprima/cobyla/update.py +289 -0
  110. scipy/_lib/pyprima/common/__init__.py +0 -0
  111. scipy/_lib/pyprima/common/_bounds.py +34 -0
  112. scipy/_lib/pyprima/common/_linear_constraints.py +46 -0
  113. scipy/_lib/pyprima/common/_nonlinear_constraints.py +54 -0
  114. scipy/_lib/pyprima/common/_project.py +173 -0
  115. scipy/_lib/pyprima/common/checkbreak.py +93 -0
  116. scipy/_lib/pyprima/common/consts.py +47 -0
  117. scipy/_lib/pyprima/common/evaluate.py +99 -0
  118. scipy/_lib/pyprima/common/history.py +38 -0
  119. scipy/_lib/pyprima/common/infos.py +30 -0
  120. scipy/_lib/pyprima/common/linalg.py +435 -0
  121. scipy/_lib/pyprima/common/message.py +290 -0
  122. scipy/_lib/pyprima/common/powalg.py +131 -0
  123. scipy/_lib/pyprima/common/preproc.py +277 -0
  124. scipy/_lib/pyprima/common/present.py +5 -0
  125. scipy/_lib/pyprima/common/ratio.py +54 -0
  126. scipy/_lib/pyprima/common/redrho.py +47 -0
  127. scipy/_lib/pyprima/common/selectx.py +296 -0
  128. scipy/_lib/tests/__init__.py +0 -0
  129. scipy/_lib/tests/test__gcutils.py +110 -0
  130. scipy/_lib/tests/test__pep440.py +67 -0
  131. scipy/_lib/tests/test__testutils.py +32 -0
  132. scipy/_lib/tests/test__threadsafety.py +51 -0
  133. scipy/_lib/tests/test__util.py +641 -0
  134. scipy/_lib/tests/test_array_api.py +322 -0
  135. scipy/_lib/tests/test_bunch.py +169 -0
  136. scipy/_lib/tests/test_ccallback.py +196 -0
  137. scipy/_lib/tests/test_config.py +45 -0
  138. scipy/_lib/tests/test_deprecation.py +10 -0
  139. scipy/_lib/tests/test_doccer.py +143 -0
  140. scipy/_lib/tests/test_import_cycles.py +18 -0
  141. scipy/_lib/tests/test_public_api.py +482 -0
  142. scipy/_lib/tests/test_scipy_version.py +28 -0
  143. scipy/_lib/tests/test_tmpdirs.py +48 -0
  144. scipy/_lib/tests/test_warnings.py +137 -0
  145. scipy/_lib/uarray.py +31 -0
  146. scipy/cluster/__init__.py +31 -0
  147. scipy/cluster/_hierarchy.cp313-win_arm64.lib +0 -0
  148. scipy/cluster/_hierarchy.cp313-win_arm64.pyd +0 -0
  149. scipy/cluster/_optimal_leaf_ordering.cp313-win_arm64.lib +0 -0
  150. scipy/cluster/_optimal_leaf_ordering.cp313-win_arm64.pyd +0 -0
  151. scipy/cluster/_vq.cp313-win_arm64.lib +0 -0
  152. scipy/cluster/_vq.cp313-win_arm64.pyd +0 -0
  153. scipy/cluster/hierarchy.py +4348 -0
  154. scipy/cluster/tests/__init__.py +0 -0
  155. scipy/cluster/tests/hierarchy_test_data.py +145 -0
  156. scipy/cluster/tests/test_disjoint_set.py +202 -0
  157. scipy/cluster/tests/test_hierarchy.py +1238 -0
  158. scipy/cluster/tests/test_vq.py +434 -0
  159. scipy/cluster/vq.py +832 -0
  160. scipy/conftest.py +683 -0
  161. scipy/constants/__init__.py +358 -0
  162. scipy/constants/_codata.py +2266 -0
  163. scipy/constants/_constants.py +369 -0
  164. scipy/constants/codata.py +21 -0
  165. scipy/constants/constants.py +53 -0
  166. scipy/constants/tests/__init__.py +0 -0
  167. scipy/constants/tests/test_codata.py +78 -0
  168. scipy/constants/tests/test_constants.py +83 -0
  169. scipy/datasets/__init__.py +90 -0
  170. scipy/datasets/_download_all.py +71 -0
  171. scipy/datasets/_fetchers.py +225 -0
  172. scipy/datasets/_registry.py +26 -0
  173. scipy/datasets/_utils.py +81 -0
  174. scipy/datasets/tests/__init__.py +0 -0
  175. scipy/datasets/tests/test_data.py +128 -0
  176. scipy/differentiate/__init__.py +27 -0
  177. scipy/differentiate/_differentiate.py +1129 -0
  178. scipy/differentiate/tests/__init__.py +0 -0
  179. scipy/differentiate/tests/test_differentiate.py +694 -0
  180. scipy/fft/__init__.py +114 -0
  181. scipy/fft/_backend.py +196 -0
  182. scipy/fft/_basic.py +1650 -0
  183. scipy/fft/_basic_backend.py +197 -0
  184. scipy/fft/_debug_backends.py +22 -0
  185. scipy/fft/_fftlog.py +223 -0
  186. scipy/fft/_fftlog_backend.py +200 -0
  187. scipy/fft/_helper.py +348 -0
  188. scipy/fft/_pocketfft/LICENSE.md +25 -0
  189. scipy/fft/_pocketfft/__init__.py +9 -0
  190. scipy/fft/_pocketfft/basic.py +251 -0
  191. scipy/fft/_pocketfft/helper.py +249 -0
  192. scipy/fft/_pocketfft/pypocketfft.cp313-win_arm64.lib +0 -0
  193. scipy/fft/_pocketfft/pypocketfft.cp313-win_arm64.pyd +0 -0
  194. scipy/fft/_pocketfft/realtransforms.py +109 -0
  195. scipy/fft/_pocketfft/tests/__init__.py +0 -0
  196. scipy/fft/_pocketfft/tests/test_basic.py +1011 -0
  197. scipy/fft/_pocketfft/tests/test_real_transforms.py +505 -0
  198. scipy/fft/_realtransforms.py +706 -0
  199. scipy/fft/_realtransforms_backend.py +63 -0
  200. scipy/fft/tests/__init__.py +0 -0
  201. scipy/fft/tests/mock_backend.py +96 -0
  202. scipy/fft/tests/test_backend.py +98 -0
  203. scipy/fft/tests/test_basic.py +504 -0
  204. scipy/fft/tests/test_fftlog.py +215 -0
  205. scipy/fft/tests/test_helper.py +558 -0
  206. scipy/fft/tests/test_multithreading.py +84 -0
  207. scipy/fft/tests/test_real_transforms.py +247 -0
  208. scipy/fftpack/__init__.py +103 -0
  209. scipy/fftpack/_basic.py +428 -0
  210. scipy/fftpack/_helper.py +115 -0
  211. scipy/fftpack/_pseudo_diffs.py +554 -0
  212. scipy/fftpack/_realtransforms.py +598 -0
  213. scipy/fftpack/basic.py +20 -0
  214. scipy/fftpack/convolve.cp313-win_arm64.lib +0 -0
  215. scipy/fftpack/convolve.cp313-win_arm64.pyd +0 -0
  216. scipy/fftpack/helper.py +19 -0
  217. scipy/fftpack/pseudo_diffs.py +22 -0
  218. scipy/fftpack/realtransforms.py +19 -0
  219. scipy/fftpack/tests/__init__.py +0 -0
  220. scipy/fftpack/tests/fftw_double_ref.npz +0 -0
  221. scipy/fftpack/tests/fftw_longdouble_ref.npz +0 -0
  222. scipy/fftpack/tests/fftw_single_ref.npz +0 -0
  223. scipy/fftpack/tests/test.npz +0 -0
  224. scipy/fftpack/tests/test_basic.py +877 -0
  225. scipy/fftpack/tests/test_helper.py +54 -0
  226. scipy/fftpack/tests/test_import.py +33 -0
  227. scipy/fftpack/tests/test_pseudo_diffs.py +388 -0
  228. scipy/fftpack/tests/test_real_transforms.py +836 -0
  229. scipy/integrate/__init__.py +122 -0
  230. scipy/integrate/_bvp.py +1160 -0
  231. scipy/integrate/_cubature.py +729 -0
  232. scipy/integrate/_dop.cp313-win_arm64.lib +0 -0
  233. scipy/integrate/_dop.cp313-win_arm64.pyd +0 -0
  234. scipy/integrate/_ivp/__init__.py +8 -0
  235. scipy/integrate/_ivp/base.py +290 -0
  236. scipy/integrate/_ivp/bdf.py +478 -0
  237. scipy/integrate/_ivp/common.py +451 -0
  238. scipy/integrate/_ivp/dop853_coefficients.py +193 -0
  239. scipy/integrate/_ivp/ivp.py +755 -0
  240. scipy/integrate/_ivp/lsoda.py +224 -0
  241. scipy/integrate/_ivp/radau.py +572 -0
  242. scipy/integrate/_ivp/rk.py +601 -0
  243. scipy/integrate/_ivp/tests/__init__.py +0 -0
  244. scipy/integrate/_ivp/tests/test_ivp.py +1287 -0
  245. scipy/integrate/_ivp/tests/test_rk.py +37 -0
  246. scipy/integrate/_lebedev.py +5450 -0
  247. scipy/integrate/_lsoda.cp313-win_arm64.lib +0 -0
  248. scipy/integrate/_lsoda.cp313-win_arm64.pyd +0 -0
  249. scipy/integrate/_ode.py +1395 -0
  250. scipy/integrate/_odepack.cp313-win_arm64.lib +0 -0
  251. scipy/integrate/_odepack.cp313-win_arm64.pyd +0 -0
  252. scipy/integrate/_odepack_py.py +273 -0
  253. scipy/integrate/_quad_vec.py +674 -0
  254. scipy/integrate/_quadpack.cp313-win_arm64.lib +0 -0
  255. scipy/integrate/_quadpack.cp313-win_arm64.pyd +0 -0
  256. scipy/integrate/_quadpack_py.py +1283 -0
  257. scipy/integrate/_quadrature.py +1336 -0
  258. scipy/integrate/_rules/__init__.py +12 -0
  259. scipy/integrate/_rules/_base.py +518 -0
  260. scipy/integrate/_rules/_gauss_kronrod.py +202 -0
  261. scipy/integrate/_rules/_gauss_legendre.py +62 -0
  262. scipy/integrate/_rules/_genz_malik.py +210 -0
  263. scipy/integrate/_tanhsinh.py +1385 -0
  264. scipy/integrate/_test_multivariate.cp313-win_arm64.lib +0 -0
  265. scipy/integrate/_test_multivariate.cp313-win_arm64.pyd +0 -0
  266. scipy/integrate/_test_odeint_banded.cp313-win_arm64.lib +0 -0
  267. scipy/integrate/_test_odeint_banded.cp313-win_arm64.pyd +0 -0
  268. scipy/integrate/_vode.cp313-win_arm64.lib +0 -0
  269. scipy/integrate/_vode.cp313-win_arm64.pyd +0 -0
  270. scipy/integrate/dop.py +15 -0
  271. scipy/integrate/lsoda.py +15 -0
  272. scipy/integrate/odepack.py +17 -0
  273. scipy/integrate/quadpack.py +23 -0
  274. scipy/integrate/tests/__init__.py +0 -0
  275. scipy/integrate/tests/test__quad_vec.py +211 -0
  276. scipy/integrate/tests/test_banded_ode_solvers.py +305 -0
  277. scipy/integrate/tests/test_bvp.py +714 -0
  278. scipy/integrate/tests/test_cubature.py +1375 -0
  279. scipy/integrate/tests/test_integrate.py +840 -0
  280. scipy/integrate/tests/test_odeint_jac.py +74 -0
  281. scipy/integrate/tests/test_quadpack.py +680 -0
  282. scipy/integrate/tests/test_quadrature.py +730 -0
  283. scipy/integrate/tests/test_tanhsinh.py +1171 -0
  284. scipy/integrate/vode.py +15 -0
  285. scipy/interpolate/__init__.py +228 -0
  286. scipy/interpolate/_bary_rational.py +715 -0
  287. scipy/interpolate/_bsplines.py +2469 -0
  288. scipy/interpolate/_cubic.py +973 -0
  289. scipy/interpolate/_dfitpack.cp313-win_arm64.lib +0 -0
  290. scipy/interpolate/_dfitpack.cp313-win_arm64.pyd +0 -0
  291. scipy/interpolate/_dierckx.cp313-win_arm64.lib +0 -0
  292. scipy/interpolate/_dierckx.cp313-win_arm64.pyd +0 -0
  293. scipy/interpolate/_fitpack.cp313-win_arm64.lib +0 -0
  294. scipy/interpolate/_fitpack.cp313-win_arm64.pyd +0 -0
  295. scipy/interpolate/_fitpack2.py +2397 -0
  296. scipy/interpolate/_fitpack_impl.py +811 -0
  297. scipy/interpolate/_fitpack_py.py +898 -0
  298. scipy/interpolate/_fitpack_repro.py +996 -0
  299. scipy/interpolate/_interpnd.cp313-win_arm64.lib +0 -0
  300. scipy/interpolate/_interpnd.cp313-win_arm64.pyd +0 -0
  301. scipy/interpolate/_interpolate.py +2266 -0
  302. scipy/interpolate/_ndbspline.py +415 -0
  303. scipy/interpolate/_ndgriddata.py +329 -0
  304. scipy/interpolate/_pade.py +67 -0
  305. scipy/interpolate/_polyint.py +1025 -0
  306. scipy/interpolate/_ppoly.cp313-win_arm64.lib +0 -0
  307. scipy/interpolate/_ppoly.cp313-win_arm64.pyd +0 -0
  308. scipy/interpolate/_rbf.py +290 -0
  309. scipy/interpolate/_rbfinterp.py +550 -0
  310. scipy/interpolate/_rbfinterp_pythran.cp313-win_arm64.lib +0 -0
  311. scipy/interpolate/_rbfinterp_pythran.cp313-win_arm64.pyd +0 -0
  312. scipy/interpolate/_rgi.py +764 -0
  313. scipy/interpolate/_rgi_cython.cp313-win_arm64.lib +0 -0
  314. scipy/interpolate/_rgi_cython.cp313-win_arm64.pyd +0 -0
  315. scipy/interpolate/dfitpack.py +24 -0
  316. scipy/interpolate/fitpack.py +31 -0
  317. scipy/interpolate/fitpack2.py +29 -0
  318. scipy/interpolate/interpnd.py +24 -0
  319. scipy/interpolate/interpolate.py +30 -0
  320. scipy/interpolate/ndgriddata.py +23 -0
  321. scipy/interpolate/polyint.py +24 -0
  322. scipy/interpolate/rbf.py +18 -0
  323. scipy/interpolate/tests/__init__.py +0 -0
  324. scipy/interpolate/tests/data/bug-1310.npz +0 -0
  325. scipy/interpolate/tests/data/estimate_gradients_hang.npy +0 -0
  326. scipy/interpolate/tests/data/gcvspl.npz +0 -0
  327. scipy/interpolate/tests/test_bary_rational.py +368 -0
  328. scipy/interpolate/tests/test_bsplines.py +3754 -0
  329. scipy/interpolate/tests/test_fitpack.py +519 -0
  330. scipy/interpolate/tests/test_fitpack2.py +1431 -0
  331. scipy/interpolate/tests/test_gil.py +64 -0
  332. scipy/interpolate/tests/test_interpnd.py +452 -0
  333. scipy/interpolate/tests/test_interpolate.py +2630 -0
  334. scipy/interpolate/tests/test_ndgriddata.py +308 -0
  335. scipy/interpolate/tests/test_pade.py +107 -0
  336. scipy/interpolate/tests/test_polyint.py +972 -0
  337. scipy/interpolate/tests/test_rbf.py +246 -0
  338. scipy/interpolate/tests/test_rbfinterp.py +534 -0
  339. scipy/interpolate/tests/test_rgi.py +1151 -0
  340. scipy/io/__init__.py +116 -0
  341. scipy/io/_fast_matrix_market/__init__.py +600 -0
  342. scipy/io/_fast_matrix_market/_fmm_core.cp313-win_arm64.lib +0 -0
  343. scipy/io/_fast_matrix_market/_fmm_core.cp313-win_arm64.pyd +0 -0
  344. scipy/io/_fortran.py +354 -0
  345. scipy/io/_harwell_boeing/__init__.py +7 -0
  346. scipy/io/_harwell_boeing/_fortran_format_parser.py +316 -0
  347. scipy/io/_harwell_boeing/hb.py +571 -0
  348. scipy/io/_harwell_boeing/tests/__init__.py +0 -0
  349. scipy/io/_harwell_boeing/tests/test_fortran_format.py +74 -0
  350. scipy/io/_harwell_boeing/tests/test_hb.py +70 -0
  351. scipy/io/_idl.py +917 -0
  352. scipy/io/_mmio.py +968 -0
  353. scipy/io/_netcdf.py +1104 -0
  354. scipy/io/_test_fortran.cp313-win_arm64.lib +0 -0
  355. scipy/io/_test_fortran.cp313-win_arm64.pyd +0 -0
  356. scipy/io/arff/__init__.py +28 -0
  357. scipy/io/arff/_arffread.py +873 -0
  358. scipy/io/arff/arffread.py +19 -0
  359. scipy/io/arff/tests/__init__.py +0 -0
  360. scipy/io/arff/tests/data/iris.arff +225 -0
  361. scipy/io/arff/tests/data/missing.arff +8 -0
  362. scipy/io/arff/tests/data/nodata.arff +11 -0
  363. scipy/io/arff/tests/data/quoted_nominal.arff +13 -0
  364. scipy/io/arff/tests/data/quoted_nominal_spaces.arff +13 -0
  365. scipy/io/arff/tests/data/test1.arff +10 -0
  366. scipy/io/arff/tests/data/test10.arff +8 -0
  367. scipy/io/arff/tests/data/test11.arff +11 -0
  368. scipy/io/arff/tests/data/test2.arff +15 -0
  369. scipy/io/arff/tests/data/test3.arff +6 -0
  370. scipy/io/arff/tests/data/test4.arff +11 -0
  371. scipy/io/arff/tests/data/test5.arff +26 -0
  372. scipy/io/arff/tests/data/test6.arff +12 -0
  373. scipy/io/arff/tests/data/test7.arff +15 -0
  374. scipy/io/arff/tests/data/test8.arff +12 -0
  375. scipy/io/arff/tests/data/test9.arff +14 -0
  376. scipy/io/arff/tests/test_arffread.py +421 -0
  377. scipy/io/harwell_boeing.py +17 -0
  378. scipy/io/idl.py +17 -0
  379. scipy/io/matlab/__init__.py +66 -0
  380. scipy/io/matlab/_byteordercodes.py +75 -0
  381. scipy/io/matlab/_mio.py +375 -0
  382. scipy/io/matlab/_mio4.py +632 -0
  383. scipy/io/matlab/_mio5.py +901 -0
  384. scipy/io/matlab/_mio5_params.py +281 -0
  385. scipy/io/matlab/_mio5_utils.cp313-win_arm64.lib +0 -0
  386. scipy/io/matlab/_mio5_utils.cp313-win_arm64.pyd +0 -0
  387. scipy/io/matlab/_mio_utils.cp313-win_arm64.lib +0 -0
  388. scipy/io/matlab/_mio_utils.cp313-win_arm64.pyd +0 -0
  389. scipy/io/matlab/_miobase.py +435 -0
  390. scipy/io/matlab/_streams.cp313-win_arm64.lib +0 -0
  391. scipy/io/matlab/_streams.cp313-win_arm64.pyd +0 -0
  392. scipy/io/matlab/byteordercodes.py +17 -0
  393. scipy/io/matlab/mio.py +16 -0
  394. scipy/io/matlab/mio4.py +17 -0
  395. scipy/io/matlab/mio5.py +19 -0
  396. scipy/io/matlab/mio5_params.py +18 -0
  397. scipy/io/matlab/mio5_utils.py +17 -0
  398. scipy/io/matlab/mio_utils.py +17 -0
  399. scipy/io/matlab/miobase.py +16 -0
  400. scipy/io/matlab/streams.py +16 -0
  401. scipy/io/matlab/tests/__init__.py +0 -0
  402. scipy/io/matlab/tests/data/bad_miuint32.mat +0 -0
  403. scipy/io/matlab/tests/data/bad_miutf8_array_name.mat +0 -0
  404. scipy/io/matlab/tests/data/big_endian.mat +0 -0
  405. scipy/io/matlab/tests/data/broken_utf8.mat +0 -0
  406. scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat +0 -0
  407. scipy/io/matlab/tests/data/corrupted_zlib_data.mat +0 -0
  408. scipy/io/matlab/tests/data/debigged_m4.mat +0 -0
  409. scipy/io/matlab/tests/data/japanese_utf8.txt +5 -0
  410. scipy/io/matlab/tests/data/little_endian.mat +0 -0
  411. scipy/io/matlab/tests/data/logical_sparse.mat +0 -0
  412. scipy/io/matlab/tests/data/malformed1.mat +0 -0
  413. scipy/io/matlab/tests/data/miuint32_for_miint32.mat +0 -0
  414. scipy/io/matlab/tests/data/miutf8_array_name.mat +0 -0
  415. scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat +0 -0
  416. scipy/io/matlab/tests/data/one_by_zero_char.mat +0 -0
  417. scipy/io/matlab/tests/data/parabola.mat +0 -0
  418. scipy/io/matlab/tests/data/single_empty_string.mat +0 -0
  419. scipy/io/matlab/tests/data/some_functions.mat +0 -0
  420. scipy/io/matlab/tests/data/sqr.mat +0 -0
  421. scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat +0 -0
  422. scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat +0 -0
  423. scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat +0 -0
  424. scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat +0 -0
  425. scipy/io/matlab/tests/data/test_empty_struct.mat +0 -0
  426. scipy/io/matlab/tests/data/test_mat4_le_floats.mat +0 -0
  427. scipy/io/matlab/tests/data/test_skip_variable.mat +0 -0
  428. scipy/io/matlab/tests/data/testbool_8_WIN64.mat +0 -0
  429. scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat +0 -0
  430. scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat +0 -0
  431. scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat +0 -0
  432. scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat +0 -0
  433. scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat +0 -0
  434. scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat +0 -0
  435. scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat +0 -0
  436. scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat +0 -0
  437. scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat +0 -0
  438. scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat +0 -0
  439. scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat +0 -0
  440. scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat +0 -0
  441. scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat +0 -0
  442. scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat +0 -0
  443. scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat +0 -0
  444. scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat +0 -0
  445. scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat +0 -0
  446. scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat +0 -0
  447. scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat +0 -0
  448. scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat +0 -0
  449. scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat +0 -0
  450. scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat +0 -0
  451. scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat +0 -0
  452. scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat +0 -0
  453. scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat +0 -0
  454. scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat +0 -0
  455. scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat +0 -0
  456. scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat +0 -0
  457. scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat +0 -0
  458. scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat +0 -0
  459. scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat +0 -0
  460. scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat +0 -0
  461. scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat +0 -0
  462. scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat +0 -0
  463. scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat +0 -0
  464. scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat +0 -0
  465. scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat +0 -0
  466. scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat +0 -0
  467. scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat +0 -0
  468. scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat +0 -0
  469. scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat +0 -0
  470. scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat +0 -0
  471. scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat +0 -0
  472. scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat +0 -0
  473. scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat +0 -0
  474. scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat +0 -0
  475. scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat +0 -0
  476. scipy/io/matlab/tests/data/testsimplecell.mat +0 -0
  477. scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat +0 -0
  478. scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat +0 -0
  479. scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat +0 -0
  480. scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat +0 -0
  481. scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat +0 -0
  482. scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat +0 -0
  483. scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat +0 -0
  484. scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat +0 -0
  485. scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat +0 -0
  486. scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat +0 -0
  487. scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat +0 -0
  488. scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat +0 -0
  489. scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat +0 -0
  490. scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat +0 -0
  491. scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat +0 -0
  492. scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat +0 -0
  493. scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat +0 -0
  494. scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat +0 -0
  495. scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat +0 -0
  496. scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat +0 -0
  497. scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat +0 -0
  498. scipy/io/matlab/tests/data/teststruct_6.1_SOL2.mat +0 -0
  499. scipy/io/matlab/tests/data/teststruct_6.5.1_GLNX86.mat +0 -0
  500. scipy/io/matlab/tests/data/teststruct_7.1_GLNX86.mat +0 -0
  501. scipy/io/matlab/tests/data/teststruct_7.4_GLNX86.mat +0 -0
  502. scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat +0 -0
  503. scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat +0 -0
  504. scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat +0 -0
  505. scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat +0 -0
  506. scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat +0 -0
  507. scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat +0 -0
  508. scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat +0 -0
  509. scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat +0 -0
  510. scipy/io/matlab/tests/data/testunicode_7.1_GLNX86.mat +0 -0
  511. scipy/io/matlab/tests/data/testunicode_7.4_GLNX86.mat +0 -0
  512. scipy/io/matlab/tests/data/testvec_4_GLNX86.mat +0 -0
  513. scipy/io/matlab/tests/test_byteordercodes.py +29 -0
  514. scipy/io/matlab/tests/test_mio.py +1399 -0
  515. scipy/io/matlab/tests/test_mio5_utils.py +179 -0
  516. scipy/io/matlab/tests/test_mio_funcs.py +51 -0
  517. scipy/io/matlab/tests/test_mio_utils.py +45 -0
  518. scipy/io/matlab/tests/test_miobase.py +32 -0
  519. scipy/io/matlab/tests/test_pathological.py +33 -0
  520. scipy/io/matlab/tests/test_streams.py +241 -0
  521. scipy/io/mmio.py +17 -0
  522. scipy/io/netcdf.py +17 -0
  523. scipy/io/tests/__init__.py +0 -0
  524. scipy/io/tests/data/Transparent Busy.ani +0 -0
  525. scipy/io/tests/data/array_float32_1d.sav +0 -0
  526. scipy/io/tests/data/array_float32_2d.sav +0 -0
  527. scipy/io/tests/data/array_float32_3d.sav +0 -0
  528. scipy/io/tests/data/array_float32_4d.sav +0 -0
  529. scipy/io/tests/data/array_float32_5d.sav +0 -0
  530. scipy/io/tests/data/array_float32_6d.sav +0 -0
  531. scipy/io/tests/data/array_float32_7d.sav +0 -0
  532. scipy/io/tests/data/array_float32_8d.sav +0 -0
  533. scipy/io/tests/data/array_float32_pointer_1d.sav +0 -0
  534. scipy/io/tests/data/array_float32_pointer_2d.sav +0 -0
  535. scipy/io/tests/data/array_float32_pointer_3d.sav +0 -0
  536. scipy/io/tests/data/array_float32_pointer_4d.sav +0 -0
  537. scipy/io/tests/data/array_float32_pointer_5d.sav +0 -0
  538. scipy/io/tests/data/array_float32_pointer_6d.sav +0 -0
  539. scipy/io/tests/data/array_float32_pointer_7d.sav +0 -0
  540. scipy/io/tests/data/array_float32_pointer_8d.sav +0 -0
  541. scipy/io/tests/data/example_1.nc +0 -0
  542. scipy/io/tests/data/example_2.nc +0 -0
  543. scipy/io/tests/data/example_3_maskedvals.nc +0 -0
  544. scipy/io/tests/data/fortran-3x3d-2i.dat +0 -0
  545. scipy/io/tests/data/fortran-mixed.dat +0 -0
  546. scipy/io/tests/data/fortran-sf8-11x1x10.dat +0 -0
  547. scipy/io/tests/data/fortran-sf8-15x10x22.dat +0 -0
  548. scipy/io/tests/data/fortran-sf8-1x1x1.dat +0 -0
  549. scipy/io/tests/data/fortran-sf8-1x1x5.dat +0 -0
  550. scipy/io/tests/data/fortran-sf8-1x1x7.dat +0 -0
  551. scipy/io/tests/data/fortran-sf8-1x3x5.dat +0 -0
  552. scipy/io/tests/data/fortran-si4-11x1x10.dat +0 -0
  553. scipy/io/tests/data/fortran-si4-15x10x22.dat +0 -0
  554. scipy/io/tests/data/fortran-si4-1x1x1.dat +0 -0
  555. scipy/io/tests/data/fortran-si4-1x1x5.dat +0 -0
  556. scipy/io/tests/data/fortran-si4-1x1x7.dat +0 -0
  557. scipy/io/tests/data/fortran-si4-1x3x5.dat +0 -0
  558. scipy/io/tests/data/invalid_pointer.sav +0 -0
  559. scipy/io/tests/data/null_pointer.sav +0 -0
  560. scipy/io/tests/data/scalar_byte.sav +0 -0
  561. scipy/io/tests/data/scalar_byte_descr.sav +0 -0
  562. scipy/io/tests/data/scalar_complex32.sav +0 -0
  563. scipy/io/tests/data/scalar_complex64.sav +0 -0
  564. scipy/io/tests/data/scalar_float32.sav +0 -0
  565. scipy/io/tests/data/scalar_float64.sav +0 -0
  566. scipy/io/tests/data/scalar_heap_pointer.sav +0 -0
  567. scipy/io/tests/data/scalar_int16.sav +0 -0
  568. scipy/io/tests/data/scalar_int32.sav +0 -0
  569. scipy/io/tests/data/scalar_int64.sav +0 -0
  570. scipy/io/tests/data/scalar_string.sav +0 -0
  571. scipy/io/tests/data/scalar_uint16.sav +0 -0
  572. scipy/io/tests/data/scalar_uint32.sav +0 -0
  573. scipy/io/tests/data/scalar_uint64.sav +0 -0
  574. scipy/io/tests/data/struct_arrays.sav +0 -0
  575. scipy/io/tests/data/struct_arrays_byte_idl80.sav +0 -0
  576. scipy/io/tests/data/struct_arrays_replicated.sav +0 -0
  577. scipy/io/tests/data/struct_arrays_replicated_3d.sav +0 -0
  578. scipy/io/tests/data/struct_inherit.sav +0 -0
  579. scipy/io/tests/data/struct_pointer_arrays.sav +0 -0
  580. scipy/io/tests/data/struct_pointer_arrays_replicated.sav +0 -0
  581. scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav +0 -0
  582. scipy/io/tests/data/struct_pointers.sav +0 -0
  583. scipy/io/tests/data/struct_pointers_replicated.sav +0 -0
  584. scipy/io/tests/data/struct_pointers_replicated_3d.sav +0 -0
  585. scipy/io/tests/data/struct_scalars.sav +0 -0
  586. scipy/io/tests/data/struct_scalars_replicated.sav +0 -0
  587. scipy/io/tests/data/struct_scalars_replicated_3d.sav +0 -0
  588. scipy/io/tests/data/test-1234Hz-le-1ch-10S-20bit-extra.wav +0 -0
  589. scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav +0 -0
  590. scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav +0 -0
  591. scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav +0 -0
  592. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav +0 -0
  593. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav +0 -0
  594. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav +0 -0
  595. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-rf64.wav +0 -0
  596. scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav +0 -0
  597. scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav +0 -0
  598. scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav +0 -0
  599. scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav +0 -0
  600. scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav +0 -0
  601. scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav +0 -0
  602. scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-rf64.wav +0 -0
  603. scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav +0 -0
  604. scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav +0 -0
  605. scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav +0 -0
  606. scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav +0 -0
  607. scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav +0 -0
  608. scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav +0 -0
  609. scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav +0 -0
  610. scipy/io/tests/data/various_compressed.sav +0 -0
  611. scipy/io/tests/test_fortran.py +264 -0
  612. scipy/io/tests/test_idl.py +483 -0
  613. scipy/io/tests/test_mmio.py +831 -0
  614. scipy/io/tests/test_netcdf.py +550 -0
  615. scipy/io/tests/test_paths.py +93 -0
  616. scipy/io/tests/test_wavfile.py +501 -0
  617. scipy/io/wavfile.py +938 -0
  618. scipy/linalg/__init__.pxd +1 -0
  619. scipy/linalg/__init__.py +236 -0
  620. scipy/linalg/_basic.py +2146 -0
  621. scipy/linalg/_blas_subroutines.h +164 -0
  622. scipy/linalg/_cythonized_array_utils.cp313-win_arm64.lib +0 -0
  623. scipy/linalg/_cythonized_array_utils.cp313-win_arm64.pyd +0 -0
  624. scipy/linalg/_cythonized_array_utils.pxd +40 -0
  625. scipy/linalg/_cythonized_array_utils.pyi +16 -0
  626. scipy/linalg/_decomp.py +1645 -0
  627. scipy/linalg/_decomp_cholesky.py +413 -0
  628. scipy/linalg/_decomp_cossin.py +236 -0
  629. scipy/linalg/_decomp_interpolative.cp313-win_arm64.lib +0 -0
  630. scipy/linalg/_decomp_interpolative.cp313-win_arm64.pyd +0 -0
  631. scipy/linalg/_decomp_ldl.py +356 -0
  632. scipy/linalg/_decomp_lu.py +401 -0
  633. scipy/linalg/_decomp_lu_cython.cp313-win_arm64.lib +0 -0
  634. scipy/linalg/_decomp_lu_cython.cp313-win_arm64.pyd +0 -0
  635. scipy/linalg/_decomp_lu_cython.pyi +6 -0
  636. scipy/linalg/_decomp_polar.py +113 -0
  637. scipy/linalg/_decomp_qr.py +494 -0
  638. scipy/linalg/_decomp_qz.py +452 -0
  639. scipy/linalg/_decomp_schur.py +336 -0
  640. scipy/linalg/_decomp_svd.py +545 -0
  641. scipy/linalg/_decomp_update.cp313-win_arm64.lib +0 -0
  642. scipy/linalg/_decomp_update.cp313-win_arm64.pyd +0 -0
  643. scipy/linalg/_expm_frechet.py +417 -0
  644. scipy/linalg/_fblas.cp313-win_arm64.lib +0 -0
  645. scipy/linalg/_fblas.cp313-win_arm64.pyd +0 -0
  646. scipy/linalg/_flapack.cp313-win_arm64.lib +0 -0
  647. scipy/linalg/_flapack.cp313-win_arm64.pyd +0 -0
  648. scipy/linalg/_lapack_subroutines.h +1521 -0
  649. scipy/linalg/_linalg_pythran.cp313-win_arm64.lib +0 -0
  650. scipy/linalg/_linalg_pythran.cp313-win_arm64.pyd +0 -0
  651. scipy/linalg/_matfuncs.py +1050 -0
  652. scipy/linalg/_matfuncs_expm.cp313-win_arm64.lib +0 -0
  653. scipy/linalg/_matfuncs_expm.cp313-win_arm64.pyd +0 -0
  654. scipy/linalg/_matfuncs_expm.pyi +6 -0
  655. scipy/linalg/_matfuncs_inv_ssq.py +886 -0
  656. scipy/linalg/_matfuncs_schur_sqrtm.cp313-win_arm64.lib +0 -0
  657. scipy/linalg/_matfuncs_schur_sqrtm.cp313-win_arm64.pyd +0 -0
  658. scipy/linalg/_matfuncs_sqrtm.py +107 -0
  659. scipy/linalg/_matfuncs_sqrtm_triu.cp313-win_arm64.lib +0 -0
  660. scipy/linalg/_matfuncs_sqrtm_triu.cp313-win_arm64.pyd +0 -0
  661. scipy/linalg/_misc.py +191 -0
  662. scipy/linalg/_procrustes.py +113 -0
  663. scipy/linalg/_sketches.py +189 -0
  664. scipy/linalg/_solve_toeplitz.cp313-win_arm64.lib +0 -0
  665. scipy/linalg/_solve_toeplitz.cp313-win_arm64.pyd +0 -0
  666. scipy/linalg/_solvers.py +862 -0
  667. scipy/linalg/_special_matrices.py +1322 -0
  668. scipy/linalg/_testutils.py +65 -0
  669. scipy/linalg/basic.py +23 -0
  670. scipy/linalg/blas.py +495 -0
  671. scipy/linalg/cython_blas.cp313-win_arm64.lib +0 -0
  672. scipy/linalg/cython_blas.cp313-win_arm64.pyd +0 -0
  673. scipy/linalg/cython_blas.pxd +169 -0
  674. scipy/linalg/cython_blas.pyx +1432 -0
  675. scipy/linalg/cython_lapack.cp313-win_arm64.lib +0 -0
  676. scipy/linalg/cython_lapack.cp313-win_arm64.pyd +0 -0
  677. scipy/linalg/cython_lapack.pxd +1528 -0
  678. scipy/linalg/cython_lapack.pyx +12045 -0
  679. scipy/linalg/decomp.py +23 -0
  680. scipy/linalg/decomp_cholesky.py +21 -0
  681. scipy/linalg/decomp_lu.py +21 -0
  682. scipy/linalg/decomp_qr.py +20 -0
  683. scipy/linalg/decomp_schur.py +21 -0
  684. scipy/linalg/decomp_svd.py +21 -0
  685. scipy/linalg/interpolative.py +989 -0
  686. scipy/linalg/lapack.py +1081 -0
  687. scipy/linalg/matfuncs.py +23 -0
  688. scipy/linalg/misc.py +21 -0
  689. scipy/linalg/special_matrices.py +22 -0
  690. scipy/linalg/tests/__init__.py +0 -0
  691. scipy/linalg/tests/_cython_examples/extending.pyx +23 -0
  692. scipy/linalg/tests/_cython_examples/meson.build +34 -0
  693. scipy/linalg/tests/data/carex_15_data.npz +0 -0
  694. scipy/linalg/tests/data/carex_18_data.npz +0 -0
  695. scipy/linalg/tests/data/carex_19_data.npz +0 -0
  696. scipy/linalg/tests/data/carex_20_data.npz +0 -0
  697. scipy/linalg/tests/data/carex_6_data.npz +0 -0
  698. scipy/linalg/tests/data/gendare_20170120_data.npz +0 -0
  699. scipy/linalg/tests/test_basic.py +2074 -0
  700. scipy/linalg/tests/test_batch.py +588 -0
  701. scipy/linalg/tests/test_blas.py +1127 -0
  702. scipy/linalg/tests/test_cython_blas.py +118 -0
  703. scipy/linalg/tests/test_cython_lapack.py +22 -0
  704. scipy/linalg/tests/test_cythonized_array_utils.py +130 -0
  705. scipy/linalg/tests/test_decomp.py +3189 -0
  706. scipy/linalg/tests/test_decomp_cholesky.py +268 -0
  707. scipy/linalg/tests/test_decomp_cossin.py +314 -0
  708. scipy/linalg/tests/test_decomp_ldl.py +137 -0
  709. scipy/linalg/tests/test_decomp_lu.py +308 -0
  710. scipy/linalg/tests/test_decomp_polar.py +110 -0
  711. scipy/linalg/tests/test_decomp_update.py +1701 -0
  712. scipy/linalg/tests/test_extending.py +46 -0
  713. scipy/linalg/tests/test_fblas.py +607 -0
  714. scipy/linalg/tests/test_interpolative.py +232 -0
  715. scipy/linalg/tests/test_lapack.py +3620 -0
  716. scipy/linalg/tests/test_matfuncs.py +1125 -0
  717. scipy/linalg/tests/test_matmul_toeplitz.py +136 -0
  718. scipy/linalg/tests/test_procrustes.py +214 -0
  719. scipy/linalg/tests/test_sketches.py +118 -0
  720. scipy/linalg/tests/test_solve_toeplitz.py +150 -0
  721. scipy/linalg/tests/test_solvers.py +844 -0
  722. scipy/linalg/tests/test_special_matrices.py +636 -0
  723. scipy/misc/__init__.py +6 -0
  724. scipy/misc/common.py +6 -0
  725. scipy/misc/doccer.py +6 -0
  726. scipy/ndimage/__init__.py +174 -0
  727. scipy/ndimage/_ctest.cp313-win_arm64.lib +0 -0
  728. scipy/ndimage/_ctest.cp313-win_arm64.pyd +0 -0
  729. scipy/ndimage/_cytest.cp313-win_arm64.lib +0 -0
  730. scipy/ndimage/_cytest.cp313-win_arm64.pyd +0 -0
  731. scipy/ndimage/_delegators.py +303 -0
  732. scipy/ndimage/_filters.py +2422 -0
  733. scipy/ndimage/_fourier.py +306 -0
  734. scipy/ndimage/_interpolation.py +1033 -0
  735. scipy/ndimage/_measurements.py +1689 -0
  736. scipy/ndimage/_morphology.py +2634 -0
  737. scipy/ndimage/_nd_image.cp313-win_arm64.lib +0 -0
  738. scipy/ndimage/_nd_image.cp313-win_arm64.pyd +0 -0
  739. scipy/ndimage/_ndimage_api.py +16 -0
  740. scipy/ndimage/_ni_docstrings.py +214 -0
  741. scipy/ndimage/_ni_label.cp313-win_arm64.lib +0 -0
  742. scipy/ndimage/_ni_label.cp313-win_arm64.pyd +0 -0
  743. scipy/ndimage/_ni_support.py +139 -0
  744. scipy/ndimage/_rank_filter_1d.cp313-win_arm64.lib +0 -0
  745. scipy/ndimage/_rank_filter_1d.cp313-win_arm64.pyd +0 -0
  746. scipy/ndimage/_support_alternative_backends.py +84 -0
  747. scipy/ndimage/filters.py +27 -0
  748. scipy/ndimage/fourier.py +21 -0
  749. scipy/ndimage/interpolation.py +22 -0
  750. scipy/ndimage/measurements.py +24 -0
  751. scipy/ndimage/morphology.py +27 -0
  752. scipy/ndimage/tests/__init__.py +12 -0
  753. scipy/ndimage/tests/data/label_inputs.txt +21 -0
  754. scipy/ndimage/tests/data/label_results.txt +294 -0
  755. scipy/ndimage/tests/data/label_strels.txt +42 -0
  756. scipy/ndimage/tests/dots.png +0 -0
  757. scipy/ndimage/tests/test_c_api.py +102 -0
  758. scipy/ndimage/tests/test_datatypes.py +67 -0
  759. scipy/ndimage/tests/test_filters.py +3083 -0
  760. scipy/ndimage/tests/test_fourier.py +187 -0
  761. scipy/ndimage/tests/test_interpolation.py +1491 -0
  762. scipy/ndimage/tests/test_measurements.py +1592 -0
  763. scipy/ndimage/tests/test_morphology.py +2950 -0
  764. scipy/ndimage/tests/test_ni_support.py +78 -0
  765. scipy/ndimage/tests/test_splines.py +70 -0
  766. scipy/odr/__init__.py +131 -0
  767. scipy/odr/__odrpack.cp313-win_arm64.lib +0 -0
  768. scipy/odr/__odrpack.cp313-win_arm64.pyd +0 -0
  769. scipy/odr/_add_newdocs.py +34 -0
  770. scipy/odr/_models.py +315 -0
  771. scipy/odr/_odrpack.py +1154 -0
  772. scipy/odr/models.py +20 -0
  773. scipy/odr/odrpack.py +21 -0
  774. scipy/odr/tests/__init__.py +0 -0
  775. scipy/odr/tests/test_odr.py +607 -0
  776. scipy/optimize/__init__.pxd +1 -0
  777. scipy/optimize/__init__.py +460 -0
  778. scipy/optimize/_basinhopping.py +741 -0
  779. scipy/optimize/_bglu_dense.cp313-win_arm64.lib +0 -0
  780. scipy/optimize/_bglu_dense.cp313-win_arm64.pyd +0 -0
  781. scipy/optimize/_bracket.py +706 -0
  782. scipy/optimize/_chandrupatla.py +551 -0
  783. scipy/optimize/_cobyla_py.py +297 -0
  784. scipy/optimize/_cobyqa_py.py +72 -0
  785. scipy/optimize/_constraints.py +598 -0
  786. scipy/optimize/_dcsrch.py +728 -0
  787. scipy/optimize/_differentiable_functions.py +835 -0
  788. scipy/optimize/_differentialevolution.py +1970 -0
  789. scipy/optimize/_direct.cp313-win_arm64.lib +0 -0
  790. scipy/optimize/_direct.cp313-win_arm64.pyd +0 -0
  791. scipy/optimize/_direct_py.py +280 -0
  792. scipy/optimize/_dual_annealing.py +732 -0
  793. scipy/optimize/_elementwise.py +798 -0
  794. scipy/optimize/_group_columns.cp313-win_arm64.lib +0 -0
  795. scipy/optimize/_group_columns.cp313-win_arm64.pyd +0 -0
  796. scipy/optimize/_hessian_update_strategy.py +479 -0
  797. scipy/optimize/_highspy/__init__.py +0 -0
  798. scipy/optimize/_highspy/_core.cp313-win_arm64.lib +0 -0
  799. scipy/optimize/_highspy/_core.cp313-win_arm64.pyd +0 -0
  800. scipy/optimize/_highspy/_highs_options.cp313-win_arm64.lib +0 -0
  801. scipy/optimize/_highspy/_highs_options.cp313-win_arm64.pyd +0 -0
  802. scipy/optimize/_highspy/_highs_wrapper.py +338 -0
  803. scipy/optimize/_isotonic.py +157 -0
  804. scipy/optimize/_lbfgsb.cp313-win_arm64.lib +0 -0
  805. scipy/optimize/_lbfgsb.cp313-win_arm64.pyd +0 -0
  806. scipy/optimize/_lbfgsb_py.py +634 -0
  807. scipy/optimize/_linesearch.py +896 -0
  808. scipy/optimize/_linprog.py +733 -0
  809. scipy/optimize/_linprog_doc.py +1434 -0
  810. scipy/optimize/_linprog_highs.py +422 -0
  811. scipy/optimize/_linprog_ip.py +1141 -0
  812. scipy/optimize/_linprog_rs.py +572 -0
  813. scipy/optimize/_linprog_simplex.py +663 -0
  814. scipy/optimize/_linprog_util.py +1521 -0
  815. scipy/optimize/_lsap.cp313-win_arm64.lib +0 -0
  816. scipy/optimize/_lsap.cp313-win_arm64.pyd +0 -0
  817. scipy/optimize/_lsq/__init__.py +5 -0
  818. scipy/optimize/_lsq/bvls.py +183 -0
  819. scipy/optimize/_lsq/common.py +731 -0
  820. scipy/optimize/_lsq/dogbox.py +345 -0
  821. scipy/optimize/_lsq/givens_elimination.cp313-win_arm64.lib +0 -0
  822. scipy/optimize/_lsq/givens_elimination.cp313-win_arm64.pyd +0 -0
  823. scipy/optimize/_lsq/least_squares.py +1044 -0
  824. scipy/optimize/_lsq/lsq_linear.py +361 -0
  825. scipy/optimize/_lsq/trf.py +587 -0
  826. scipy/optimize/_lsq/trf_linear.py +249 -0
  827. scipy/optimize/_milp.py +394 -0
  828. scipy/optimize/_minimize.py +1199 -0
  829. scipy/optimize/_minpack.cp313-win_arm64.lib +0 -0
  830. scipy/optimize/_minpack.cp313-win_arm64.pyd +0 -0
  831. scipy/optimize/_minpack_py.py +1178 -0
  832. scipy/optimize/_moduleTNC.cp313-win_arm64.lib +0 -0
  833. scipy/optimize/_moduleTNC.cp313-win_arm64.pyd +0 -0
  834. scipy/optimize/_nnls.py +96 -0
  835. scipy/optimize/_nonlin.py +1634 -0
  836. scipy/optimize/_numdiff.py +963 -0
  837. scipy/optimize/_optimize.py +4169 -0
  838. scipy/optimize/_pava_pybind.cp313-win_arm64.lib +0 -0
  839. scipy/optimize/_pava_pybind.cp313-win_arm64.pyd +0 -0
  840. scipy/optimize/_qap.py +760 -0
  841. scipy/optimize/_remove_redundancy.py +522 -0
  842. scipy/optimize/_root.py +732 -0
  843. scipy/optimize/_root_scalar.py +538 -0
  844. scipy/optimize/_shgo.py +1606 -0
  845. scipy/optimize/_shgo_lib/__init__.py +0 -0
  846. scipy/optimize/_shgo_lib/_complex.py +1225 -0
  847. scipy/optimize/_shgo_lib/_vertex.py +460 -0
  848. scipy/optimize/_slsqp_py.py +603 -0
  849. scipy/optimize/_slsqplib.cp313-win_arm64.lib +0 -0
  850. scipy/optimize/_slsqplib.cp313-win_arm64.pyd +0 -0
  851. scipy/optimize/_spectral.py +260 -0
  852. scipy/optimize/_tnc.py +438 -0
  853. scipy/optimize/_trlib/__init__.py +12 -0
  854. scipy/optimize/_trlib/_trlib.cp313-win_arm64.lib +0 -0
  855. scipy/optimize/_trlib/_trlib.cp313-win_arm64.pyd +0 -0
  856. scipy/optimize/_trustregion.py +318 -0
  857. scipy/optimize/_trustregion_constr/__init__.py +6 -0
  858. scipy/optimize/_trustregion_constr/canonical_constraint.py +390 -0
  859. scipy/optimize/_trustregion_constr/equality_constrained_sqp.py +231 -0
  860. scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py +584 -0
  861. scipy/optimize/_trustregion_constr/projections.py +411 -0
  862. scipy/optimize/_trustregion_constr/qp_subproblem.py +637 -0
  863. scipy/optimize/_trustregion_constr/report.py +49 -0
  864. scipy/optimize/_trustregion_constr/tests/__init__.py +0 -0
  865. scipy/optimize/_trustregion_constr/tests/test_canonical_constraint.py +296 -0
  866. scipy/optimize/_trustregion_constr/tests/test_nested_minimize.py +39 -0
  867. scipy/optimize/_trustregion_constr/tests/test_projections.py +214 -0
  868. scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py +645 -0
  869. scipy/optimize/_trustregion_constr/tests/test_report.py +34 -0
  870. scipy/optimize/_trustregion_constr/tr_interior_point.py +361 -0
  871. scipy/optimize/_trustregion_dogleg.py +122 -0
  872. scipy/optimize/_trustregion_exact.py +437 -0
  873. scipy/optimize/_trustregion_krylov.py +65 -0
  874. scipy/optimize/_trustregion_ncg.py +126 -0
  875. scipy/optimize/_tstutils.py +972 -0
  876. scipy/optimize/_zeros.cp313-win_arm64.lib +0 -0
  877. scipy/optimize/_zeros.cp313-win_arm64.pyd +0 -0
  878. scipy/optimize/_zeros_py.py +1475 -0
  879. scipy/optimize/cobyla.py +19 -0
  880. scipy/optimize/cython_optimize/__init__.py +133 -0
  881. scipy/optimize/cython_optimize/_zeros.cp313-win_arm64.lib +0 -0
  882. scipy/optimize/cython_optimize/_zeros.cp313-win_arm64.pyd +0 -0
  883. scipy/optimize/cython_optimize/_zeros.pxd +33 -0
  884. scipy/optimize/cython_optimize/c_zeros.pxd +26 -0
  885. scipy/optimize/cython_optimize.pxd +11 -0
  886. scipy/optimize/elementwise.py +38 -0
  887. scipy/optimize/lbfgsb.py +23 -0
  888. scipy/optimize/linesearch.py +18 -0
  889. scipy/optimize/minpack.py +27 -0
  890. scipy/optimize/minpack2.py +17 -0
  891. scipy/optimize/moduleTNC.py +19 -0
  892. scipy/optimize/nonlin.py +29 -0
  893. scipy/optimize/optimize.py +40 -0
  894. scipy/optimize/slsqp.py +22 -0
  895. scipy/optimize/tests/__init__.py +0 -0
  896. scipy/optimize/tests/_cython_examples/extending.pyx +43 -0
  897. scipy/optimize/tests/_cython_examples/meson.build +32 -0
  898. scipy/optimize/tests/test__basinhopping.py +535 -0
  899. scipy/optimize/tests/test__differential_evolution.py +1703 -0
  900. scipy/optimize/tests/test__dual_annealing.py +416 -0
  901. scipy/optimize/tests/test__linprog_clean_inputs.py +312 -0
  902. scipy/optimize/tests/test__numdiff.py +885 -0
  903. scipy/optimize/tests/test__remove_redundancy.py +228 -0
  904. scipy/optimize/tests/test__root.py +124 -0
  905. scipy/optimize/tests/test__shgo.py +1164 -0
  906. scipy/optimize/tests/test__spectral.py +226 -0
  907. scipy/optimize/tests/test_bracket.py +896 -0
  908. scipy/optimize/tests/test_chandrupatla.py +982 -0
  909. scipy/optimize/tests/test_cobyla.py +195 -0
  910. scipy/optimize/tests/test_cobyqa.py +252 -0
  911. scipy/optimize/tests/test_constraint_conversion.py +286 -0
  912. scipy/optimize/tests/test_constraints.py +255 -0
  913. scipy/optimize/tests/test_cython_optimize.py +92 -0
  914. scipy/optimize/tests/test_differentiable_functions.py +1025 -0
  915. scipy/optimize/tests/test_direct.py +321 -0
  916. scipy/optimize/tests/test_extending.py +28 -0
  917. scipy/optimize/tests/test_hessian_update_strategy.py +300 -0
  918. scipy/optimize/tests/test_isotonic_regression.py +167 -0
  919. scipy/optimize/tests/test_lbfgsb_hessinv.py +65 -0
  920. scipy/optimize/tests/test_lbfgsb_setulb.py +122 -0
  921. scipy/optimize/tests/test_least_squares.py +986 -0
  922. scipy/optimize/tests/test_linear_assignment.py +116 -0
  923. scipy/optimize/tests/test_linesearch.py +328 -0
  924. scipy/optimize/tests/test_linprog.py +2577 -0
  925. scipy/optimize/tests/test_lsq_common.py +297 -0
  926. scipy/optimize/tests/test_lsq_linear.py +287 -0
  927. scipy/optimize/tests/test_milp.py +459 -0
  928. scipy/optimize/tests/test_minimize_constrained.py +845 -0
  929. scipy/optimize/tests/test_minpack.py +1194 -0
  930. scipy/optimize/tests/test_nnls.py +469 -0
  931. scipy/optimize/tests/test_nonlin.py +572 -0
  932. scipy/optimize/tests/test_optimize.py +3344 -0
  933. scipy/optimize/tests/test_quadratic_assignment.py +455 -0
  934. scipy/optimize/tests/test_regression.py +40 -0
  935. scipy/optimize/tests/test_slsqp.py +645 -0
  936. scipy/optimize/tests/test_tnc.py +345 -0
  937. scipy/optimize/tests/test_trustregion.py +110 -0
  938. scipy/optimize/tests/test_trustregion_exact.py +351 -0
  939. scipy/optimize/tests/test_trustregion_krylov.py +170 -0
  940. scipy/optimize/tests/test_zeros.py +998 -0
  941. scipy/optimize/tnc.py +22 -0
  942. scipy/optimize/zeros.py +26 -0
  943. scipy/signal/__init__.py +316 -0
  944. scipy/signal/_arraytools.py +264 -0
  945. scipy/signal/_czt.py +575 -0
  946. scipy/signal/_delegators.py +568 -0
  947. scipy/signal/_filter_design.py +5893 -0
  948. scipy/signal/_fir_filter_design.py +1458 -0
  949. scipy/signal/_lti_conversion.py +534 -0
  950. scipy/signal/_ltisys.py +3546 -0
  951. scipy/signal/_max_len_seq.py +139 -0
  952. scipy/signal/_max_len_seq_inner.cp313-win_arm64.lib +0 -0
  953. scipy/signal/_max_len_seq_inner.cp313-win_arm64.pyd +0 -0
  954. scipy/signal/_peak_finding.py +1310 -0
  955. scipy/signal/_peak_finding_utils.cp313-win_arm64.lib +0 -0
  956. scipy/signal/_peak_finding_utils.cp313-win_arm64.pyd +0 -0
  957. scipy/signal/_polyutils.py +172 -0
  958. scipy/signal/_savitzky_golay.py +357 -0
  959. scipy/signal/_short_time_fft.py +2228 -0
  960. scipy/signal/_signal_api.py +30 -0
  961. scipy/signal/_signaltools.py +5309 -0
  962. scipy/signal/_sigtools.cp313-win_arm64.lib +0 -0
  963. scipy/signal/_sigtools.cp313-win_arm64.pyd +0 -0
  964. scipy/signal/_sosfilt.cp313-win_arm64.lib +0 -0
  965. scipy/signal/_sosfilt.cp313-win_arm64.pyd +0 -0
  966. scipy/signal/_spectral_py.py +2471 -0
  967. scipy/signal/_spline.cp313-win_arm64.lib +0 -0
  968. scipy/signal/_spline.cp313-win_arm64.pyd +0 -0
  969. scipy/signal/_spline.pyi +34 -0
  970. scipy/signal/_spline_filters.py +848 -0
  971. scipy/signal/_support_alternative_backends.py +73 -0
  972. scipy/signal/_upfirdn.py +219 -0
  973. scipy/signal/_upfirdn_apply.cp313-win_arm64.lib +0 -0
  974. scipy/signal/_upfirdn_apply.cp313-win_arm64.pyd +0 -0
  975. scipy/signal/_waveforms.py +687 -0
  976. scipy/signal/_wavelets.py +29 -0
  977. scipy/signal/bsplines.py +21 -0
  978. scipy/signal/filter_design.py +28 -0
  979. scipy/signal/fir_filter_design.py +21 -0
  980. scipy/signal/lti_conversion.py +20 -0
  981. scipy/signal/ltisys.py +25 -0
  982. scipy/signal/signaltools.py +27 -0
  983. scipy/signal/spectral.py +21 -0
  984. scipy/signal/spline.py +18 -0
  985. scipy/signal/tests/__init__.py +0 -0
  986. scipy/signal/tests/_scipy_spectral_test_shim.py +311 -0
  987. scipy/signal/tests/mpsig.py +122 -0
  988. scipy/signal/tests/test_array_tools.py +111 -0
  989. scipy/signal/tests/test_bsplines.py +365 -0
  990. scipy/signal/tests/test_cont2discrete.py +424 -0
  991. scipy/signal/tests/test_czt.py +221 -0
  992. scipy/signal/tests/test_dltisys.py +599 -0
  993. scipy/signal/tests/test_filter_design.py +4744 -0
  994. scipy/signal/tests/test_fir_filter_design.py +851 -0
  995. scipy/signal/tests/test_ltisys.py +1225 -0
  996. scipy/signal/tests/test_max_len_seq.py +71 -0
  997. scipy/signal/tests/test_peak_finding.py +915 -0
  998. scipy/signal/tests/test_result_type.py +51 -0
  999. scipy/signal/tests/test_savitzky_golay.py +363 -0
  1000. scipy/signal/tests/test_short_time_fft.py +1107 -0
  1001. scipy/signal/tests/test_signaltools.py +4735 -0
  1002. scipy/signal/tests/test_spectral.py +2141 -0
  1003. scipy/signal/tests/test_splines.py +427 -0
  1004. scipy/signal/tests/test_upfirdn.py +322 -0
  1005. scipy/signal/tests/test_waveforms.py +400 -0
  1006. scipy/signal/tests/test_wavelets.py +59 -0
  1007. scipy/signal/tests/test_windows.py +987 -0
  1008. scipy/signal/waveforms.py +20 -0
  1009. scipy/signal/wavelets.py +17 -0
  1010. scipy/signal/windows/__init__.py +52 -0
  1011. scipy/signal/windows/_windows.py +2513 -0
  1012. scipy/signal/windows/windows.py +23 -0
  1013. scipy/sparse/__init__.py +350 -0
  1014. scipy/sparse/_base.py +1613 -0
  1015. scipy/sparse/_bsr.py +880 -0
  1016. scipy/sparse/_compressed.py +1328 -0
  1017. scipy/sparse/_construct.py +1454 -0
  1018. scipy/sparse/_coo.py +1581 -0
  1019. scipy/sparse/_csc.py +367 -0
  1020. scipy/sparse/_csparsetools.cp313-win_arm64.lib +0 -0
  1021. scipy/sparse/_csparsetools.cp313-win_arm64.pyd +0 -0
  1022. scipy/sparse/_csr.py +558 -0
  1023. scipy/sparse/_data.py +569 -0
  1024. scipy/sparse/_dia.py +677 -0
  1025. scipy/sparse/_dok.py +669 -0
  1026. scipy/sparse/_extract.py +178 -0
  1027. scipy/sparse/_index.py +444 -0
  1028. scipy/sparse/_lil.py +632 -0
  1029. scipy/sparse/_matrix.py +169 -0
  1030. scipy/sparse/_matrix_io.py +167 -0
  1031. scipy/sparse/_sparsetools.cp313-win_arm64.lib +0 -0
  1032. scipy/sparse/_sparsetools.cp313-win_arm64.pyd +0 -0
  1033. scipy/sparse/_spfuncs.py +76 -0
  1034. scipy/sparse/_sputils.py +632 -0
  1035. scipy/sparse/base.py +24 -0
  1036. scipy/sparse/bsr.py +22 -0
  1037. scipy/sparse/compressed.py +20 -0
  1038. scipy/sparse/construct.py +38 -0
  1039. scipy/sparse/coo.py +23 -0
  1040. scipy/sparse/csc.py +22 -0
  1041. scipy/sparse/csgraph/__init__.py +210 -0
  1042. scipy/sparse/csgraph/_flow.cp313-win_arm64.lib +0 -0
  1043. scipy/sparse/csgraph/_flow.cp313-win_arm64.pyd +0 -0
  1044. scipy/sparse/csgraph/_laplacian.py +563 -0
  1045. scipy/sparse/csgraph/_matching.cp313-win_arm64.lib +0 -0
  1046. scipy/sparse/csgraph/_matching.cp313-win_arm64.pyd +0 -0
  1047. scipy/sparse/csgraph/_min_spanning_tree.cp313-win_arm64.lib +0 -0
  1048. scipy/sparse/csgraph/_min_spanning_tree.cp313-win_arm64.pyd +0 -0
  1049. scipy/sparse/csgraph/_reordering.cp313-win_arm64.lib +0 -0
  1050. scipy/sparse/csgraph/_reordering.cp313-win_arm64.pyd +0 -0
  1051. scipy/sparse/csgraph/_shortest_path.cp313-win_arm64.lib +0 -0
  1052. scipy/sparse/csgraph/_shortest_path.cp313-win_arm64.pyd +0 -0
  1053. scipy/sparse/csgraph/_tools.cp313-win_arm64.lib +0 -0
  1054. scipy/sparse/csgraph/_tools.cp313-win_arm64.pyd +0 -0
  1055. scipy/sparse/csgraph/_traversal.cp313-win_arm64.lib +0 -0
  1056. scipy/sparse/csgraph/_traversal.cp313-win_arm64.pyd +0 -0
  1057. scipy/sparse/csgraph/_validation.py +66 -0
  1058. scipy/sparse/csgraph/tests/__init__.py +0 -0
  1059. scipy/sparse/csgraph/tests/test_connected_components.py +119 -0
  1060. scipy/sparse/csgraph/tests/test_conversions.py +61 -0
  1061. scipy/sparse/csgraph/tests/test_flow.py +209 -0
  1062. scipy/sparse/csgraph/tests/test_graph_laplacian.py +368 -0
  1063. scipy/sparse/csgraph/tests/test_matching.py +307 -0
  1064. scipy/sparse/csgraph/tests/test_pydata_sparse.py +197 -0
  1065. scipy/sparse/csgraph/tests/test_reordering.py +70 -0
  1066. scipy/sparse/csgraph/tests/test_shortest_path.py +540 -0
  1067. scipy/sparse/csgraph/tests/test_spanning_tree.py +66 -0
  1068. scipy/sparse/csgraph/tests/test_traversal.py +148 -0
  1069. scipy/sparse/csr.py +22 -0
  1070. scipy/sparse/data.py +18 -0
  1071. scipy/sparse/dia.py +22 -0
  1072. scipy/sparse/dok.py +22 -0
  1073. scipy/sparse/extract.py +23 -0
  1074. scipy/sparse/lil.py +22 -0
  1075. scipy/sparse/linalg/__init__.py +148 -0
  1076. scipy/sparse/linalg/_dsolve/__init__.py +71 -0
  1077. scipy/sparse/linalg/_dsolve/_add_newdocs.py +147 -0
  1078. scipy/sparse/linalg/_dsolve/_superlu.cp313-win_arm64.lib +0 -0
  1079. scipy/sparse/linalg/_dsolve/_superlu.cp313-win_arm64.pyd +0 -0
  1080. scipy/sparse/linalg/_dsolve/linsolve.py +882 -0
  1081. scipy/sparse/linalg/_dsolve/tests/__init__.py +0 -0
  1082. scipy/sparse/linalg/_dsolve/tests/test_linsolve.py +928 -0
  1083. scipy/sparse/linalg/_eigen/__init__.py +22 -0
  1084. scipy/sparse/linalg/_eigen/_svds.py +540 -0
  1085. scipy/sparse/linalg/_eigen/_svds_doc.py +382 -0
  1086. scipy/sparse/linalg/_eigen/arpack/COPYING +45 -0
  1087. scipy/sparse/linalg/_eigen/arpack/__init__.py +20 -0
  1088. scipy/sparse/linalg/_eigen/arpack/_arpack.cp313-win_arm64.lib +0 -0
  1089. scipy/sparse/linalg/_eigen/arpack/_arpack.cp313-win_arm64.pyd +0 -0
  1090. scipy/sparse/linalg/_eigen/arpack/arpack.py +1706 -0
  1091. scipy/sparse/linalg/_eigen/arpack/tests/__init__.py +0 -0
  1092. scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py +717 -0
  1093. scipy/sparse/linalg/_eigen/lobpcg/__init__.py +16 -0
  1094. scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py +1110 -0
  1095. scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py +0 -0
  1096. scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py +725 -0
  1097. scipy/sparse/linalg/_eigen/tests/__init__.py +0 -0
  1098. scipy/sparse/linalg/_eigen/tests/test_svds.py +886 -0
  1099. scipy/sparse/linalg/_expm_multiply.py +816 -0
  1100. scipy/sparse/linalg/_interface.py +920 -0
  1101. scipy/sparse/linalg/_isolve/__init__.py +20 -0
  1102. scipy/sparse/linalg/_isolve/_gcrotmk.py +503 -0
  1103. scipy/sparse/linalg/_isolve/iterative.py +1051 -0
  1104. scipy/sparse/linalg/_isolve/lgmres.py +230 -0
  1105. scipy/sparse/linalg/_isolve/lsmr.py +486 -0
  1106. scipy/sparse/linalg/_isolve/lsqr.py +589 -0
  1107. scipy/sparse/linalg/_isolve/minres.py +372 -0
  1108. scipy/sparse/linalg/_isolve/tests/__init__.py +0 -0
  1109. scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py +183 -0
  1110. scipy/sparse/linalg/_isolve/tests/test_iterative.py +809 -0
  1111. scipy/sparse/linalg/_isolve/tests/test_lgmres.py +225 -0
  1112. scipy/sparse/linalg/_isolve/tests/test_lsmr.py +185 -0
  1113. scipy/sparse/linalg/_isolve/tests/test_lsqr.py +120 -0
  1114. scipy/sparse/linalg/_isolve/tests/test_minres.py +97 -0
  1115. scipy/sparse/linalg/_isolve/tests/test_utils.py +9 -0
  1116. scipy/sparse/linalg/_isolve/tfqmr.py +179 -0
  1117. scipy/sparse/linalg/_isolve/utils.py +121 -0
  1118. scipy/sparse/linalg/_matfuncs.py +940 -0
  1119. scipy/sparse/linalg/_norm.py +195 -0
  1120. scipy/sparse/linalg/_onenormest.py +467 -0
  1121. scipy/sparse/linalg/_propack/_cpropack.cp313-win_arm64.lib +0 -0
  1122. scipy/sparse/linalg/_propack/_cpropack.cp313-win_arm64.pyd +0 -0
  1123. scipy/sparse/linalg/_propack/_dpropack.cp313-win_arm64.lib +0 -0
  1124. scipy/sparse/linalg/_propack/_dpropack.cp313-win_arm64.pyd +0 -0
  1125. scipy/sparse/linalg/_propack/_spropack.cp313-win_arm64.lib +0 -0
  1126. scipy/sparse/linalg/_propack/_spropack.cp313-win_arm64.pyd +0 -0
  1127. scipy/sparse/linalg/_propack/_zpropack.cp313-win_arm64.lib +0 -0
  1128. scipy/sparse/linalg/_propack/_zpropack.cp313-win_arm64.pyd +0 -0
  1129. scipy/sparse/linalg/_special_sparse_arrays.py +949 -0
  1130. scipy/sparse/linalg/_svdp.py +309 -0
  1131. scipy/sparse/linalg/dsolve.py +22 -0
  1132. scipy/sparse/linalg/eigen.py +21 -0
  1133. scipy/sparse/linalg/interface.py +20 -0
  1134. scipy/sparse/linalg/isolve.py +22 -0
  1135. scipy/sparse/linalg/matfuncs.py +18 -0
  1136. scipy/sparse/linalg/tests/__init__.py +0 -0
  1137. scipy/sparse/linalg/tests/propack_test_data.npz +0 -0
  1138. scipy/sparse/linalg/tests/test_expm_multiply.py +367 -0
  1139. scipy/sparse/linalg/tests/test_interface.py +561 -0
  1140. scipy/sparse/linalg/tests/test_matfuncs.py +592 -0
  1141. scipy/sparse/linalg/tests/test_norm.py +154 -0
  1142. scipy/sparse/linalg/tests/test_onenormest.py +252 -0
  1143. scipy/sparse/linalg/tests/test_propack.py +165 -0
  1144. scipy/sparse/linalg/tests/test_pydata_sparse.py +272 -0
  1145. scipy/sparse/linalg/tests/test_special_sparse_arrays.py +337 -0
  1146. scipy/sparse/sparsetools.py +17 -0
  1147. scipy/sparse/spfuncs.py +17 -0
  1148. scipy/sparse/sputils.py +17 -0
  1149. scipy/sparse/tests/__init__.py +0 -0
  1150. scipy/sparse/tests/data/csc_py2.npz +0 -0
  1151. scipy/sparse/tests/data/csc_py3.npz +0 -0
  1152. scipy/sparse/tests/test_arithmetic1d.py +341 -0
  1153. scipy/sparse/tests/test_array_api.py +561 -0
  1154. scipy/sparse/tests/test_base.py +5870 -0
  1155. scipy/sparse/tests/test_common1d.py +447 -0
  1156. scipy/sparse/tests/test_construct.py +872 -0
  1157. scipy/sparse/tests/test_coo.py +1119 -0
  1158. scipy/sparse/tests/test_csc.py +98 -0
  1159. scipy/sparse/tests/test_csr.py +214 -0
  1160. scipy/sparse/tests/test_dok.py +209 -0
  1161. scipy/sparse/tests/test_extract.py +51 -0
  1162. scipy/sparse/tests/test_indexing1d.py +603 -0
  1163. scipy/sparse/tests/test_matrix_io.py +109 -0
  1164. scipy/sparse/tests/test_minmax1d.py +128 -0
  1165. scipy/sparse/tests/test_sparsetools.py +344 -0
  1166. scipy/sparse/tests/test_spfuncs.py +97 -0
  1167. scipy/sparse/tests/test_sputils.py +424 -0
  1168. scipy/spatial/__init__.py +129 -0
  1169. scipy/spatial/_ckdtree.cp313-win_arm64.lib +0 -0
  1170. scipy/spatial/_ckdtree.cp313-win_arm64.pyd +0 -0
  1171. scipy/spatial/_distance_pybind.cp313-win_arm64.lib +0 -0
  1172. scipy/spatial/_distance_pybind.cp313-win_arm64.pyd +0 -0
  1173. scipy/spatial/_distance_wrap.cp313-win_arm64.lib +0 -0
  1174. scipy/spatial/_distance_wrap.cp313-win_arm64.pyd +0 -0
  1175. scipy/spatial/_geometric_slerp.py +238 -0
  1176. scipy/spatial/_hausdorff.cp313-win_arm64.lib +0 -0
  1177. scipy/spatial/_hausdorff.cp313-win_arm64.pyd +0 -0
  1178. scipy/spatial/_kdtree.py +920 -0
  1179. scipy/spatial/_plotutils.py +274 -0
  1180. scipy/spatial/_procrustes.py +132 -0
  1181. scipy/spatial/_qhull.cp313-win_arm64.lib +0 -0
  1182. scipy/spatial/_qhull.cp313-win_arm64.pyd +0 -0
  1183. scipy/spatial/_qhull.pyi +213 -0
  1184. scipy/spatial/_spherical_voronoi.py +341 -0
  1185. scipy/spatial/_voronoi.cp313-win_arm64.lib +0 -0
  1186. scipy/spatial/_voronoi.cp313-win_arm64.pyd +0 -0
  1187. scipy/spatial/_voronoi.pyi +4 -0
  1188. scipy/spatial/ckdtree.py +18 -0
  1189. scipy/spatial/distance.py +3147 -0
  1190. scipy/spatial/distance.pyi +210 -0
  1191. scipy/spatial/kdtree.py +25 -0
  1192. scipy/spatial/qhull.py +25 -0
  1193. scipy/spatial/qhull_src/COPYING_QHULL.txt +39 -0
  1194. scipy/spatial/tests/__init__.py +0 -0
  1195. scipy/spatial/tests/data/cdist-X1.txt +10 -0
  1196. scipy/spatial/tests/data/cdist-X2.txt +20 -0
  1197. scipy/spatial/tests/data/degenerate_pointset.npz +0 -0
  1198. scipy/spatial/tests/data/iris.txt +150 -0
  1199. scipy/spatial/tests/data/pdist-boolean-inp.txt +20 -0
  1200. scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt +1 -0
  1201. scipy/spatial/tests/data/pdist-chebyshev-ml.txt +1 -0
  1202. scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt +1 -0
  1203. scipy/spatial/tests/data/pdist-cityblock-ml.txt +1 -0
  1204. scipy/spatial/tests/data/pdist-correlation-ml-iris.txt +1 -0
  1205. scipy/spatial/tests/data/pdist-correlation-ml.txt +1 -0
  1206. scipy/spatial/tests/data/pdist-cosine-ml-iris.txt +1 -0
  1207. scipy/spatial/tests/data/pdist-cosine-ml.txt +1 -0
  1208. scipy/spatial/tests/data/pdist-double-inp.txt +20 -0
  1209. scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt +1 -0
  1210. scipy/spatial/tests/data/pdist-euclidean-ml.txt +1 -0
  1211. scipy/spatial/tests/data/pdist-hamming-ml.txt +1 -0
  1212. scipy/spatial/tests/data/pdist-jaccard-ml.txt +1 -0
  1213. scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt +1 -0
  1214. scipy/spatial/tests/data/pdist-jensenshannon-ml.txt +1 -0
  1215. scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt +1 -0
  1216. scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt +1 -0
  1217. scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt +1 -0
  1218. scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt +1 -0
  1219. scipy/spatial/tests/data/pdist-seuclidean-ml.txt +1 -0
  1220. scipy/spatial/tests/data/pdist-spearman-ml.txt +1 -0
  1221. scipy/spatial/tests/data/random-bool-data.txt +100 -0
  1222. scipy/spatial/tests/data/random-double-data.txt +100 -0
  1223. scipy/spatial/tests/data/random-int-data.txt +100 -0
  1224. scipy/spatial/tests/data/random-uint-data.txt +100 -0
  1225. scipy/spatial/tests/data/selfdual-4d-polytope.txt +27 -0
  1226. scipy/spatial/tests/test__plotutils.py +91 -0
  1227. scipy/spatial/tests/test__procrustes.py +116 -0
  1228. scipy/spatial/tests/test_distance.py +2389 -0
  1229. scipy/spatial/tests/test_hausdorff.py +199 -0
  1230. scipy/spatial/tests/test_kdtree.py +1536 -0
  1231. scipy/spatial/tests/test_qhull.py +1313 -0
  1232. scipy/spatial/tests/test_slerp.py +417 -0
  1233. scipy/spatial/tests/test_spherical_voronoi.py +358 -0
  1234. scipy/spatial/transform/__init__.py +31 -0
  1235. scipy/spatial/transform/_rigid_transform.cp313-win_arm64.lib +0 -0
  1236. scipy/spatial/transform/_rigid_transform.cp313-win_arm64.pyd +0 -0
  1237. scipy/spatial/transform/_rotation.cp313-win_arm64.lib +0 -0
  1238. scipy/spatial/transform/_rotation.cp313-win_arm64.pyd +0 -0
  1239. scipy/spatial/transform/_rotation_groups.py +140 -0
  1240. scipy/spatial/transform/_rotation_spline.py +460 -0
  1241. scipy/spatial/transform/rotation.py +21 -0
  1242. scipy/spatial/transform/tests/__init__.py +0 -0
  1243. scipy/spatial/transform/tests/test_rigid_transform.py +1221 -0
  1244. scipy/spatial/transform/tests/test_rotation.py +2569 -0
  1245. scipy/spatial/transform/tests/test_rotation_groups.py +169 -0
  1246. scipy/spatial/transform/tests/test_rotation_spline.py +183 -0
  1247. scipy/special/__init__.pxd +1 -0
  1248. scipy/special/__init__.py +841 -0
  1249. scipy/special/_add_newdocs.py +9961 -0
  1250. scipy/special/_basic.py +3576 -0
  1251. scipy/special/_comb.cp313-win_arm64.lib +0 -0
  1252. scipy/special/_comb.cp313-win_arm64.pyd +0 -0
  1253. scipy/special/_ellip_harm.py +214 -0
  1254. scipy/special/_ellip_harm_2.cp313-win_arm64.lib +0 -0
  1255. scipy/special/_ellip_harm_2.cp313-win_arm64.pyd +0 -0
  1256. scipy/special/_gufuncs.cp313-win_arm64.lib +0 -0
  1257. scipy/special/_gufuncs.cp313-win_arm64.pyd +0 -0
  1258. scipy/special/_input_validation.py +17 -0
  1259. scipy/special/_lambertw.py +149 -0
  1260. scipy/special/_logsumexp.py +426 -0
  1261. scipy/special/_mptestutils.py +453 -0
  1262. scipy/special/_multiufuncs.py +610 -0
  1263. scipy/special/_orthogonal.py +2592 -0
  1264. scipy/special/_orthogonal.pyi +330 -0
  1265. scipy/special/_precompute/__init__.py +0 -0
  1266. scipy/special/_precompute/cosine_cdf.py +17 -0
  1267. scipy/special/_precompute/expn_asy.py +54 -0
  1268. scipy/special/_precompute/gammainc_asy.py +116 -0
  1269. scipy/special/_precompute/gammainc_data.py +124 -0
  1270. scipy/special/_precompute/hyp2f1_data.py +484 -0
  1271. scipy/special/_precompute/lambertw.py +68 -0
  1272. scipy/special/_precompute/loggamma.py +43 -0
  1273. scipy/special/_precompute/struve_convergence.py +131 -0
  1274. scipy/special/_precompute/utils.py +38 -0
  1275. scipy/special/_precompute/wright_bessel.py +342 -0
  1276. scipy/special/_precompute/wright_bessel_data.py +152 -0
  1277. scipy/special/_precompute/wrightomega.py +41 -0
  1278. scipy/special/_precompute/zetac.py +27 -0
  1279. scipy/special/_sf_error.py +15 -0
  1280. scipy/special/_specfun.cp313-win_arm64.lib +0 -0
  1281. scipy/special/_specfun.cp313-win_arm64.pyd +0 -0
  1282. scipy/special/_special_ufuncs.cp313-win_arm64.lib +0 -0
  1283. scipy/special/_special_ufuncs.cp313-win_arm64.pyd +0 -0
  1284. scipy/special/_spfun_stats.py +106 -0
  1285. scipy/special/_spherical_bessel.py +397 -0
  1286. scipy/special/_support_alternative_backends.py +295 -0
  1287. scipy/special/_test_internal.cp313-win_arm64.lib +0 -0
  1288. scipy/special/_test_internal.cp313-win_arm64.pyd +0 -0
  1289. scipy/special/_test_internal.pyi +9 -0
  1290. scipy/special/_testutils.py +321 -0
  1291. scipy/special/_ufuncs.cp313-win_arm64.lib +0 -0
  1292. scipy/special/_ufuncs.cp313-win_arm64.pyd +0 -0
  1293. scipy/special/_ufuncs.pyi +522 -0
  1294. scipy/special/_ufuncs.pyx +13173 -0
  1295. scipy/special/_ufuncs_cxx.cp313-win_arm64.lib +0 -0
  1296. scipy/special/_ufuncs_cxx.cp313-win_arm64.pyd +0 -0
  1297. scipy/special/_ufuncs_cxx.pxd +142 -0
  1298. scipy/special/_ufuncs_cxx.pyx +427 -0
  1299. scipy/special/_ufuncs_cxx_defs.h +147 -0
  1300. scipy/special/_ufuncs_defs.h +57 -0
  1301. scipy/special/add_newdocs.py +15 -0
  1302. scipy/special/basic.py +87 -0
  1303. scipy/special/cython_special.cp313-win_arm64.lib +0 -0
  1304. scipy/special/cython_special.cp313-win_arm64.pyd +0 -0
  1305. scipy/special/cython_special.pxd +259 -0
  1306. scipy/special/cython_special.pyi +3 -0
  1307. scipy/special/orthogonal.py +45 -0
  1308. scipy/special/sf_error.py +20 -0
  1309. scipy/special/specfun.py +24 -0
  1310. scipy/special/spfun_stats.py +17 -0
  1311. scipy/special/tests/__init__.py +0 -0
  1312. scipy/special/tests/_cython_examples/extending.pyx +12 -0
  1313. scipy/special/tests/_cython_examples/meson.build +34 -0
  1314. scipy/special/tests/data/__init__.py +0 -0
  1315. scipy/special/tests/data/boost.npz +0 -0
  1316. scipy/special/tests/data/gsl.npz +0 -0
  1317. scipy/special/tests/data/local.npz +0 -0
  1318. scipy/special/tests/test_basic.py +4815 -0
  1319. scipy/special/tests/test_bdtr.py +112 -0
  1320. scipy/special/tests/test_boost_ufuncs.py +64 -0
  1321. scipy/special/tests/test_boxcox.py +125 -0
  1322. scipy/special/tests/test_cdflib.py +712 -0
  1323. scipy/special/tests/test_cdft_asymptotic.py +49 -0
  1324. scipy/special/tests/test_cephes_intp_cast.py +29 -0
  1325. scipy/special/tests/test_cosine_distr.py +83 -0
  1326. scipy/special/tests/test_cython_special.py +363 -0
  1327. scipy/special/tests/test_data.py +719 -0
  1328. scipy/special/tests/test_dd.py +42 -0
  1329. scipy/special/tests/test_digamma.py +45 -0
  1330. scipy/special/tests/test_ellip_harm.py +278 -0
  1331. scipy/special/tests/test_erfinv.py +89 -0
  1332. scipy/special/tests/test_exponential_integrals.py +118 -0
  1333. scipy/special/tests/test_extending.py +28 -0
  1334. scipy/special/tests/test_faddeeva.py +85 -0
  1335. scipy/special/tests/test_gamma.py +12 -0
  1336. scipy/special/tests/test_gammainc.py +152 -0
  1337. scipy/special/tests/test_hyp2f1.py +2566 -0
  1338. scipy/special/tests/test_hypergeometric.py +234 -0
  1339. scipy/special/tests/test_iv_ratio.py +249 -0
  1340. scipy/special/tests/test_kolmogorov.py +491 -0
  1341. scipy/special/tests/test_lambertw.py +109 -0
  1342. scipy/special/tests/test_legendre.py +1518 -0
  1343. scipy/special/tests/test_log1mexp.py +85 -0
  1344. scipy/special/tests/test_loggamma.py +70 -0
  1345. scipy/special/tests/test_logit.py +162 -0
  1346. scipy/special/tests/test_logsumexp.py +469 -0
  1347. scipy/special/tests/test_mpmath.py +2293 -0
  1348. scipy/special/tests/test_nan_inputs.py +65 -0
  1349. scipy/special/tests/test_ndtr.py +77 -0
  1350. scipy/special/tests/test_ndtri_exp.py +94 -0
  1351. scipy/special/tests/test_orthogonal.py +821 -0
  1352. scipy/special/tests/test_orthogonal_eval.py +275 -0
  1353. scipy/special/tests/test_owens_t.py +53 -0
  1354. scipy/special/tests/test_pcf.py +24 -0
  1355. scipy/special/tests/test_pdtr.py +48 -0
  1356. scipy/special/tests/test_powm1.py +65 -0
  1357. scipy/special/tests/test_precompute_expn_asy.py +24 -0
  1358. scipy/special/tests/test_precompute_gammainc.py +108 -0
  1359. scipy/special/tests/test_precompute_utils.py +36 -0
  1360. scipy/special/tests/test_round.py +18 -0
  1361. scipy/special/tests/test_sf_error.py +146 -0
  1362. scipy/special/tests/test_sici.py +36 -0
  1363. scipy/special/tests/test_specfun.py +48 -0
  1364. scipy/special/tests/test_spence.py +32 -0
  1365. scipy/special/tests/test_spfun_stats.py +61 -0
  1366. scipy/special/tests/test_sph_harm.py +85 -0
  1367. scipy/special/tests/test_spherical_bessel.py +400 -0
  1368. scipy/special/tests/test_support_alternative_backends.py +248 -0
  1369. scipy/special/tests/test_trig.py +72 -0
  1370. scipy/special/tests/test_ufunc_signatures.py +46 -0
  1371. scipy/special/tests/test_wright_bessel.py +205 -0
  1372. scipy/special/tests/test_wrightomega.py +117 -0
  1373. scipy/special/tests/test_zeta.py +301 -0
  1374. scipy/stats/__init__.py +670 -0
  1375. scipy/stats/_ansari_swilk_statistics.cp313-win_arm64.lib +0 -0
  1376. scipy/stats/_ansari_swilk_statistics.cp313-win_arm64.pyd +0 -0
  1377. scipy/stats/_axis_nan_policy.py +692 -0
  1378. scipy/stats/_biasedurn.cp313-win_arm64.lib +0 -0
  1379. scipy/stats/_biasedurn.cp313-win_arm64.pyd +0 -0
  1380. scipy/stats/_biasedurn.pxd +27 -0
  1381. scipy/stats/_binned_statistic.py +795 -0
  1382. scipy/stats/_binomtest.py +375 -0
  1383. scipy/stats/_bws_test.py +177 -0
  1384. scipy/stats/_censored_data.py +459 -0
  1385. scipy/stats/_common.py +5 -0
  1386. scipy/stats/_constants.py +42 -0
  1387. scipy/stats/_continued_fraction.py +387 -0
  1388. scipy/stats/_continuous_distns.py +12486 -0
  1389. scipy/stats/_correlation.py +210 -0
  1390. scipy/stats/_covariance.py +636 -0
  1391. scipy/stats/_crosstab.py +204 -0
  1392. scipy/stats/_discrete_distns.py +2098 -0
  1393. scipy/stats/_distn_infrastructure.py +4201 -0
  1394. scipy/stats/_distr_params.py +299 -0
  1395. scipy/stats/_distribution_infrastructure.py +5750 -0
  1396. scipy/stats/_entropy.py +428 -0
  1397. scipy/stats/_finite_differences.py +145 -0
  1398. scipy/stats/_fit.py +1351 -0
  1399. scipy/stats/_hypotests.py +2060 -0
  1400. scipy/stats/_kde.py +732 -0
  1401. scipy/stats/_ksstats.py +600 -0
  1402. scipy/stats/_levy_stable/__init__.py +1231 -0
  1403. scipy/stats/_levy_stable/levyst.cp313-win_arm64.lib +0 -0
  1404. scipy/stats/_levy_stable/levyst.cp313-win_arm64.pyd +0 -0
  1405. scipy/stats/_mannwhitneyu.py +492 -0
  1406. scipy/stats/_mgc.py +550 -0
  1407. scipy/stats/_morestats.py +4626 -0
  1408. scipy/stats/_mstats_basic.py +3658 -0
  1409. scipy/stats/_mstats_extras.py +521 -0
  1410. scipy/stats/_multicomp.py +449 -0
  1411. scipy/stats/_multivariate.py +7281 -0
  1412. scipy/stats/_new_distributions.py +452 -0
  1413. scipy/stats/_odds_ratio.py +466 -0
  1414. scipy/stats/_page_trend_test.py +486 -0
  1415. scipy/stats/_probability_distribution.py +1964 -0
  1416. scipy/stats/_qmc.py +2956 -0
  1417. scipy/stats/_qmc_cy.cp313-win_arm64.lib +0 -0
  1418. scipy/stats/_qmc_cy.cp313-win_arm64.pyd +0 -0
  1419. scipy/stats/_qmc_cy.pyi +54 -0
  1420. scipy/stats/_qmvnt.py +454 -0
  1421. scipy/stats/_qmvnt_cy.cp313-win_arm64.lib +0 -0
  1422. scipy/stats/_qmvnt_cy.cp313-win_arm64.pyd +0 -0
  1423. scipy/stats/_quantile.py +335 -0
  1424. scipy/stats/_rcont/__init__.py +4 -0
  1425. scipy/stats/_rcont/rcont.cp313-win_arm64.lib +0 -0
  1426. scipy/stats/_rcont/rcont.cp313-win_arm64.pyd +0 -0
  1427. scipy/stats/_relative_risk.py +263 -0
  1428. scipy/stats/_resampling.py +2352 -0
  1429. scipy/stats/_result_classes.py +40 -0
  1430. scipy/stats/_sampling.py +1314 -0
  1431. scipy/stats/_sensitivity_analysis.py +713 -0
  1432. scipy/stats/_sobol.cp313-win_arm64.lib +0 -0
  1433. scipy/stats/_sobol.cp313-win_arm64.pyd +0 -0
  1434. scipy/stats/_sobol.pyi +54 -0
  1435. scipy/stats/_sobol_direction_numbers.npz +0 -0
  1436. scipy/stats/_stats.cp313-win_arm64.lib +0 -0
  1437. scipy/stats/_stats.cp313-win_arm64.pyd +0 -0
  1438. scipy/stats/_stats.pxd +10 -0
  1439. scipy/stats/_stats_mstats_common.py +322 -0
  1440. scipy/stats/_stats_py.py +11089 -0
  1441. scipy/stats/_stats_pythran.cp313-win_arm64.lib +0 -0
  1442. scipy/stats/_stats_pythran.cp313-win_arm64.pyd +0 -0
  1443. scipy/stats/_survival.py +683 -0
  1444. scipy/stats/_tukeylambda_stats.py +199 -0
  1445. scipy/stats/_unuran/__init__.py +0 -0
  1446. scipy/stats/_unuran/unuran_wrapper.cp313-win_arm64.lib +0 -0
  1447. scipy/stats/_unuran/unuran_wrapper.cp313-win_arm64.pyd +0 -0
  1448. scipy/stats/_unuran/unuran_wrapper.pyi +179 -0
  1449. scipy/stats/_variation.py +126 -0
  1450. scipy/stats/_warnings_errors.py +38 -0
  1451. scipy/stats/_wilcoxon.py +265 -0
  1452. scipy/stats/biasedurn.py +16 -0
  1453. scipy/stats/contingency.py +521 -0
  1454. scipy/stats/distributions.py +24 -0
  1455. scipy/stats/kde.py +18 -0
  1456. scipy/stats/morestats.py +27 -0
  1457. scipy/stats/mstats.py +140 -0
  1458. scipy/stats/mstats_basic.py +42 -0
  1459. scipy/stats/mstats_extras.py +25 -0
  1460. scipy/stats/mvn.py +17 -0
  1461. scipy/stats/qmc.py +236 -0
  1462. scipy/stats/sampling.py +73 -0
  1463. scipy/stats/stats.py +41 -0
  1464. scipy/stats/tests/__init__.py +0 -0
  1465. scipy/stats/tests/common_tests.py +356 -0
  1466. scipy/stats/tests/data/_mvt.py +171 -0
  1467. scipy/stats/tests/data/fisher_exact_results_from_r.py +607 -0
  1468. scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy +0 -0
  1469. scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy +0 -0
  1470. scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy +0 -0
  1471. scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy +0 -0
  1472. scipy/stats/tests/data/nist_anova/AtmWtAg.dat +108 -0
  1473. scipy/stats/tests/data/nist_anova/SiRstv.dat +85 -0
  1474. scipy/stats/tests/data/nist_anova/SmLs01.dat +249 -0
  1475. scipy/stats/tests/data/nist_anova/SmLs02.dat +1869 -0
  1476. scipy/stats/tests/data/nist_anova/SmLs03.dat +18069 -0
  1477. scipy/stats/tests/data/nist_anova/SmLs04.dat +249 -0
  1478. scipy/stats/tests/data/nist_anova/SmLs05.dat +1869 -0
  1479. scipy/stats/tests/data/nist_anova/SmLs06.dat +18069 -0
  1480. scipy/stats/tests/data/nist_anova/SmLs07.dat +249 -0
  1481. scipy/stats/tests/data/nist_anova/SmLs08.dat +1869 -0
  1482. scipy/stats/tests/data/nist_anova/SmLs09.dat +18069 -0
  1483. scipy/stats/tests/data/nist_linregress/Norris.dat +97 -0
  1484. scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy +0 -0
  1485. scipy/stats/tests/data/studentized_range_mpmath_ref.json +1499 -0
  1486. scipy/stats/tests/test_axis_nan_policy.py +1388 -0
  1487. scipy/stats/tests/test_binned_statistic.py +568 -0
  1488. scipy/stats/tests/test_censored_data.py +152 -0
  1489. scipy/stats/tests/test_contingency.py +294 -0
  1490. scipy/stats/tests/test_continued_fraction.py +173 -0
  1491. scipy/stats/tests/test_continuous.py +2198 -0
  1492. scipy/stats/tests/test_continuous_basic.py +1053 -0
  1493. scipy/stats/tests/test_continuous_fit_censored.py +683 -0
  1494. scipy/stats/tests/test_correlation.py +80 -0
  1495. scipy/stats/tests/test_crosstab.py +115 -0
  1496. scipy/stats/tests/test_discrete_basic.py +580 -0
  1497. scipy/stats/tests/test_discrete_distns.py +700 -0
  1498. scipy/stats/tests/test_distributions.py +10413 -0
  1499. scipy/stats/tests/test_entropy.py +322 -0
  1500. scipy/stats/tests/test_fast_gen_inversion.py +435 -0
  1501. scipy/stats/tests/test_fit.py +1090 -0
  1502. scipy/stats/tests/test_hypotests.py +1991 -0
  1503. scipy/stats/tests/test_kdeoth.py +676 -0
  1504. scipy/stats/tests/test_marray.py +289 -0
  1505. scipy/stats/tests/test_mgc.py +217 -0
  1506. scipy/stats/tests/test_morestats.py +3259 -0
  1507. scipy/stats/tests/test_mstats_basic.py +2071 -0
  1508. scipy/stats/tests/test_mstats_extras.py +172 -0
  1509. scipy/stats/tests/test_multicomp.py +405 -0
  1510. scipy/stats/tests/test_multivariate.py +4381 -0
  1511. scipy/stats/tests/test_odds_ratio.py +148 -0
  1512. scipy/stats/tests/test_qmc.py +1492 -0
  1513. scipy/stats/tests/test_quantile.py +199 -0
  1514. scipy/stats/tests/test_rank.py +345 -0
  1515. scipy/stats/tests/test_relative_risk.py +95 -0
  1516. scipy/stats/tests/test_resampling.py +2000 -0
  1517. scipy/stats/tests/test_sampling.py +1450 -0
  1518. scipy/stats/tests/test_sensitivity_analysis.py +310 -0
  1519. scipy/stats/tests/test_stats.py +9707 -0
  1520. scipy/stats/tests/test_survival.py +466 -0
  1521. scipy/stats/tests/test_tukeylambda_stats.py +85 -0
  1522. scipy/stats/tests/test_variation.py +216 -0
  1523. scipy/version.py +12 -0
  1524. scipy-1.16.2.dist-info/DELVEWHEEL +2 -0
  1525. scipy-1.16.2.dist-info/LICENSE.txt +912 -0
  1526. scipy-1.16.2.dist-info/METADATA +1061 -0
  1527. scipy-1.16.2.dist-info/RECORD +1530 -0
  1528. scipy-1.16.2.dist-info/WHEEL +4 -0
  1529. scipy.libs/msvcp140-5f1c5dd31916990d94181e07bc3afb32.dll +0 -0
  1530. scipy.libs/scipy_openblas-f3ac85b1f412f7e86514c923dc4058d1.dll +0 -0
scipy/stats/_qmc.py ADDED
@@ -0,0 +1,2956 @@
1
+ """Quasi-Monte Carlo engines and helpers."""
2
+ import copy
3
+ import math
4
+ import numbers
5
+ import os
6
+ import warnings
7
+ from abc import ABC, abstractmethod
8
+ from functools import partial
9
+ from typing import (
10
+ ClassVar,
11
+ Literal,
12
+ overload,
13
+ TYPE_CHECKING,
14
+ )
15
+ from collections.abc import Callable
16
+
17
+ import numpy as np
18
+
19
+ from scipy._lib._util import DecimalNumber, GeneratorType, IntNumber, SeedType
20
+
21
+ if TYPE_CHECKING:
22
+ import numpy.typing as npt
23
+
24
+ import scipy.stats as stats
25
+ from scipy._lib._util import rng_integers, _rng_spawn, _transition_to_rng
26
+ from scipy.sparse.csgraph import minimum_spanning_tree
27
+ from scipy.spatial import distance, Voronoi
28
+ from scipy.special import gammainc
29
+ from ._sobol import (
30
+ _initialize_v, _cscramble, _fill_p_cumulative, _draw, _fast_forward,
31
+ _categorize, _MAXDIM
32
+ )
33
+ from ._qmc_cy import (
34
+ _cy_wrapper_centered_discrepancy,
35
+ _cy_wrapper_wrap_around_discrepancy,
36
+ _cy_wrapper_mixture_discrepancy,
37
+ _cy_wrapper_l2_star_discrepancy,
38
+ _cy_wrapper_update_discrepancy,
39
+ _cy_van_der_corput_scrambled,
40
+ _cy_van_der_corput,
41
+ )
42
+
43
+
44
+ __all__ = ['scale', 'discrepancy', 'geometric_discrepancy', 'update_discrepancy',
45
+ 'QMCEngine', 'Sobol', 'Halton', 'LatinHypercube', 'PoissonDisk',
46
+ 'MultinomialQMC', 'MultivariateNormalQMC']
47
+
48
+
49
+ @overload
50
+ def check_random_state(seed: IntNumber | None = ...) -> np.random.Generator:
51
+ ...
52
+
53
+
54
+ @overload
55
+ def check_random_state(seed: GeneratorType) -> GeneratorType:
56
+ ...
57
+
58
+
59
+ # Based on scipy._lib._util.check_random_state
60
+ # This is going to be removed at the end of the SPEC 7 transition,
61
+ # so I'll just leave the argument name `seed` alone
62
+ def check_random_state(seed=None):
63
+ """Turn `seed` into a `numpy.random.Generator` instance.
64
+
65
+ Parameters
66
+ ----------
67
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
68
+ If `seed` is an int or None, a new `numpy.random.Generator` is
69
+ created using ``np.random.default_rng(seed)``.
70
+ If `seed` is already a ``Generator`` or ``RandomState`` instance, then
71
+ the provided instance is used.
72
+
73
+ Returns
74
+ -------
75
+ seed : {`numpy.random.Generator`, `numpy.random.RandomState`}
76
+ Random number generator.
77
+
78
+ """
79
+ if seed is None or isinstance(seed, numbers.Integral | np.integer):
80
+ return np.random.default_rng(seed)
81
+ elif isinstance(seed, np.random.RandomState | np.random.Generator):
82
+ return seed
83
+ else:
84
+ raise ValueError(f'{seed!r} cannot be used to seed a'
85
+ ' numpy.random.Generator instance')
86
+
87
+
88
+ def scale(
89
+ sample: "npt.ArrayLike",
90
+ l_bounds: "npt.ArrayLike",
91
+ u_bounds: "npt.ArrayLike",
92
+ *,
93
+ reverse: bool = False
94
+ ) -> np.ndarray:
95
+ r"""Sample scaling from unit hypercube to different bounds.
96
+
97
+ To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
98
+ with :math:`a` the lower bounds and :math:`b` the upper bounds.
99
+ The following transformation is used:
100
+
101
+ .. math::
102
+
103
+ (b - a) \cdot \text{sample} + a
104
+
105
+ Parameters
106
+ ----------
107
+ sample : array_like (n, d)
108
+ Sample to scale.
109
+ l_bounds, u_bounds : array_like (d,)
110
+ Lower and upper bounds (resp. :math:`a`, :math:`b`) of transformed
111
+ data. If `reverse` is True, range of the original data to transform
112
+ to the unit hypercube.
113
+ reverse : bool, optional
114
+ Reverse the transformation from different bounds to the unit hypercube.
115
+ Default is False.
116
+
117
+ Returns
118
+ -------
119
+ sample : array_like (n, d)
120
+ Scaled sample.
121
+
122
+ Examples
123
+ --------
124
+ Transform 3 samples in the unit hypercube to bounds:
125
+
126
+ >>> from scipy.stats import qmc
127
+ >>> l_bounds = [-2, 0]
128
+ >>> u_bounds = [6, 5]
129
+ >>> sample = [[0.5 , 0.75],
130
+ ... [0.5 , 0.5],
131
+ ... [0.75, 0.25]]
132
+ >>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)
133
+ >>> sample_scaled
134
+ array([[2. , 3.75],
135
+ [2. , 2.5 ],
136
+ [4. , 1.25]])
137
+
138
+ And convert back to the unit hypercube:
139
+
140
+ >>> sample_ = qmc.scale(sample_scaled, l_bounds, u_bounds, reverse=True)
141
+ >>> sample_
142
+ array([[0.5 , 0.75],
143
+ [0.5 , 0.5 ],
144
+ [0.75, 0.25]])
145
+
146
+ """
147
+ sample = np.asarray(sample)
148
+
149
+ # Checking bounds and sample
150
+ if not sample.ndim == 2:
151
+ raise ValueError('Sample is not a 2D array')
152
+
153
+ lower, upper = _validate_bounds(
154
+ l_bounds=l_bounds, u_bounds=u_bounds, d=sample.shape[1]
155
+ )
156
+
157
+ if not reverse:
158
+ # Checking that sample is within the hypercube
159
+ if (sample.max() > 1.) or (sample.min() < 0.):
160
+ raise ValueError('Sample is not in unit hypercube')
161
+
162
+ return sample * (upper - lower) + lower
163
+ else:
164
+ # Checking that sample is within the bounds
165
+ if not (np.all(sample >= lower) and np.all(sample <= upper)):
166
+ raise ValueError('Sample is out of bounds')
167
+
168
+ return (sample - lower) / (upper - lower)
169
+
170
+
171
+ def _ensure_in_unit_hypercube(sample: "npt.ArrayLike") -> np.ndarray:
172
+ """Ensure that sample is a 2D array and is within a unit hypercube
173
+
174
+ Parameters
175
+ ----------
176
+ sample : array_like (n, d)
177
+ A 2D array of points.
178
+
179
+ Returns
180
+ -------
181
+ np.ndarray
182
+ The array interpretation of the input sample
183
+
184
+ Raises
185
+ ------
186
+ ValueError
187
+ If the input is not a 2D array or contains points outside of
188
+ a unit hypercube.
189
+ """
190
+ sample = np.asarray(sample, dtype=np.float64, order="C")
191
+
192
+ if not sample.ndim == 2:
193
+ raise ValueError("Sample is not a 2D array")
194
+
195
+ if (sample.max() > 1.) or (sample.min() < 0.):
196
+ raise ValueError("Sample is not in unit hypercube")
197
+
198
+ return sample
199
+
200
+
201
+ def discrepancy(
202
+ sample: "npt.ArrayLike",
203
+ *,
204
+ iterative: bool = False,
205
+ method: Literal["CD", "WD", "MD", "L2-star"] = "CD",
206
+ workers: IntNumber = 1) -> float:
207
+ """Discrepancy of a given sample.
208
+
209
+ Parameters
210
+ ----------
211
+ sample : array_like (n, d)
212
+ The sample to compute the discrepancy from.
213
+ iterative : bool, optional
214
+ Must be False if not using it for updating the discrepancy.
215
+ Default is False. Refer to the notes for more details.
216
+ method : str, optional
217
+ Type of discrepancy, can be ``CD``, ``WD``, ``MD`` or ``L2-star``.
218
+ Refer to the notes for more details. Default is ``CD``.
219
+ workers : int, optional
220
+ Number of workers to use for parallel processing. If -1 is given all
221
+ CPU threads are used. Default is 1.
222
+
223
+ Returns
224
+ -------
225
+ discrepancy : float
226
+ Discrepancy.
227
+
228
+ See Also
229
+ --------
230
+ geometric_discrepancy
231
+
232
+ Notes
233
+ -----
234
+ The discrepancy is a uniformity criterion used to assess the space filling
235
+ of a number of samples in a hypercube. A discrepancy quantifies the
236
+ distance between the continuous uniform distribution on a hypercube and the
237
+ discrete uniform distribution on :math:`n` distinct sample points.
238
+
239
+ The lower the value is, the better the coverage of the parameter space is.
240
+
241
+ For a collection of subsets of the hypercube, the discrepancy is the
242
+ difference between the fraction of sample points in one of those
243
+ subsets and the volume of that subset. There are different definitions of
244
+ discrepancy corresponding to different collections of subsets. Some
245
+ versions take a root mean square difference over subsets instead of
246
+ a maximum.
247
+
248
+ A measure of uniformity is reasonable if it satisfies the following
249
+ criteria [1]_:
250
+
251
+ 1. It is invariant under permuting factors and/or runs.
252
+ 2. It is invariant under rotation of the coordinates.
253
+ 3. It can measure not only uniformity of the sample over the hypercube,
254
+ but also the projection uniformity of the sample over non-empty
255
+ subset of lower dimension hypercubes.
256
+ 4. There is some reasonable geometric meaning.
257
+ 5. It is easy to compute.
258
+ 6. It satisfies the Koksma-Hlawka-like inequality.
259
+ 7. It is consistent with other criteria in experimental design.
260
+
261
+ Four methods are available:
262
+
263
+ * ``CD``: Centered Discrepancy - subspace involves a corner of the
264
+ hypercube
265
+ * ``WD``: Wrap-around Discrepancy - subspace can wrap around bounds
266
+ * ``MD``: Mixture Discrepancy - mix between CD/WD covering more criteria
267
+ * ``L2-star``: L2-star discrepancy - like CD BUT variant to rotation
268
+
269
+ Methods ``CD``, ``WD``, and ``MD`` implement the right hand side of equations
270
+ 9, 10, and 18 of [2]_, respectively; the square root is not taken. On the
271
+ other hand, ``L2-star`` computes the quantity given by equation 10 of
272
+ [3]_ as implemented by subsequent equations; the square root is taken.
273
+
274
+ Lastly, using ``iterative=True``, it is possible to compute the
275
+ discrepancy as if we had :math:`n+1` samples. This is useful if we want
276
+ to add a point to a sampling and check the candidate which would give the
277
+ lowest discrepancy. Then you could just update the discrepancy with
278
+ each candidate using `update_discrepancy`. This method is faster than
279
+ computing the discrepancy for a large number of candidates.
280
+
281
+ References
282
+ ----------
283
+ .. [1] Fang et al. "Design and modeling for computer experiments".
284
+ Computer Science and Data Analysis Series, 2006.
285
+ .. [2] Zhou Y.-D. et al. "Mixture discrepancy for quasi-random point sets."
286
+ Journal of Complexity, 29 (3-4) , pp. 283-301, 2013.
287
+ .. [3] T. T. Warnock. "Computational investigations of low discrepancy
288
+ point sets." Applications of Number Theory to Numerical
289
+ Analysis, Academic Press, pp. 319-343, 1972.
290
+
291
+ Examples
292
+ --------
293
+ Calculate the quality of the sample using the discrepancy:
294
+
295
+ >>> import numpy as np
296
+ >>> from scipy.stats import qmc
297
+ >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
298
+ >>> l_bounds = [0.5, 0.5]
299
+ >>> u_bounds = [6.5, 6.5]
300
+ >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
301
+ >>> space
302
+ array([[0.08333333, 0.41666667],
303
+ [0.25 , 0.91666667],
304
+ [0.41666667, 0.25 ],
305
+ [0.58333333, 0.75 ],
306
+ [0.75 , 0.08333333],
307
+ [0.91666667, 0.58333333]])
308
+ >>> qmc.discrepancy(space)
309
+ 0.008142039609053464
310
+
311
+ We can also compute iteratively the ``CD`` discrepancy by using
312
+ ``iterative=True``.
313
+
314
+ >>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
315
+ >>> disc_init
316
+ 0.04769081147119336
317
+ >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
318
+ 0.008142039609053513
319
+
320
+ """
321
+ sample = _ensure_in_unit_hypercube(sample)
322
+
323
+ workers = _validate_workers(workers)
324
+
325
+ methods = {
326
+ "CD": _cy_wrapper_centered_discrepancy,
327
+ "WD": _cy_wrapper_wrap_around_discrepancy,
328
+ "MD": _cy_wrapper_mixture_discrepancy,
329
+ "L2-star": _cy_wrapper_l2_star_discrepancy,
330
+ }
331
+
332
+ if method in methods:
333
+ return methods[method](sample, iterative, workers=workers)
334
+ else:
335
+ raise ValueError(f"{method!r} is not a valid method. It must be one of"
336
+ f" {set(methods)!r}")
337
+
338
+
339
+ def geometric_discrepancy(
340
+ sample: "npt.ArrayLike",
341
+ method: Literal["mindist", "mst"] = "mindist",
342
+ metric: str = "euclidean") -> float:
343
+ """Discrepancy of a given sample based on its geometric properties.
344
+
345
+ Parameters
346
+ ----------
347
+ sample : array_like (n, d)
348
+ The sample to compute the discrepancy from.
349
+ method : {"mindist", "mst"}, optional
350
+ The method to use. One of ``mindist`` for minimum distance (default)
351
+ or ``mst`` for minimum spanning tree.
352
+ metric : str or callable, optional
353
+ The distance metric to use. See the documentation
354
+ for `scipy.spatial.distance.pdist` for the available metrics and
355
+ the default.
356
+
357
+ Returns
358
+ -------
359
+ discrepancy : float
360
+ Discrepancy (higher values correspond to greater sample uniformity).
361
+
362
+ See Also
363
+ --------
364
+ discrepancy
365
+
366
+ Notes
367
+ -----
368
+ The discrepancy can serve as a simple measure of quality of a random sample.
369
+ This measure is based on the geometric properties of the distribution of points
370
+ in the sample, such as the minimum distance between any pair of points, or
371
+ the mean edge length in a minimum spanning tree.
372
+
373
+ The higher the value is, the better the coverage of the parameter space is.
374
+ Note that this is different from `scipy.stats.qmc.discrepancy`, where lower
375
+ values correspond to higher quality of the sample.
376
+
377
+ Also note that when comparing different sampling strategies using this function,
378
+ the sample size must be kept constant.
379
+
380
+ It is possible to calculate two metrics from the minimum spanning tree:
381
+ the mean edge length and the standard deviation of edges lengths. Using
382
+ both metrics offers a better picture of uniformity than either metric alone,
383
+ with higher mean and lower standard deviation being preferable (see [1]_
384
+ for a brief discussion). This function currently only calculates the mean
385
+ edge length.
386
+
387
+ References
388
+ ----------
389
+ .. [1] Franco J. et al. "Minimum Spanning Tree: A new approach to assess the quality
390
+ of the design of computer experiments." Chemometrics and Intelligent Laboratory
391
+ Systems, 97 (2), pp. 164-169, 2009.
392
+
393
+ Examples
394
+ --------
395
+ Calculate the quality of the sample using the minimum euclidean distance
396
+ (the defaults):
397
+
398
+ >>> import numpy as np
399
+ >>> from scipy.stats import qmc
400
+ >>> rng = np.random.default_rng(191468432622931918890291693003068437394)
401
+ >>> sample = qmc.LatinHypercube(d=2, rng=rng).random(50)
402
+ >>> qmc.geometric_discrepancy(sample)
403
+ 0.03708161435687876
404
+
405
+ Calculate the quality using the mean edge length in the minimum
406
+ spanning tree:
407
+
408
+ >>> qmc.geometric_discrepancy(sample, method='mst')
409
+ 0.1105149978798376
410
+
411
+ Display the minimum spanning tree and the points with
412
+ the smallest distance:
413
+
414
+ >>> import matplotlib.pyplot as plt
415
+ >>> from matplotlib.lines import Line2D
416
+ >>> from scipy.sparse.csgraph import minimum_spanning_tree
417
+ >>> from scipy.spatial.distance import pdist, squareform
418
+ >>> dist = pdist(sample)
419
+ >>> mst = minimum_spanning_tree(squareform(dist))
420
+ >>> edges = np.where(mst.toarray() > 0)
421
+ >>> edges = np.asarray(edges).T
422
+ >>> min_dist = np.min(dist)
423
+ >>> min_idx = np.argwhere(squareform(dist) == min_dist)[0]
424
+ >>> fig, ax = plt.subplots(figsize=(10, 5))
425
+ >>> _ = ax.set(aspect='equal', xlabel=r'$x_1$', ylabel=r'$x_2$',
426
+ ... xlim=[0, 1], ylim=[0, 1])
427
+ >>> for edge in edges:
428
+ ... ax.plot(sample[edge, 0], sample[edge, 1], c='k')
429
+ >>> ax.scatter(sample[:, 0], sample[:, 1])
430
+ >>> ax.add_patch(plt.Circle(sample[min_idx[0]], min_dist, color='red', fill=False))
431
+ >>> markers = [
432
+ ... Line2D([0], [0], marker='o', lw=0, label='Sample points'),
433
+ ... Line2D([0], [0], color='k', label='Minimum spanning tree'),
434
+ ... Line2D([0], [0], marker='o', lw=0, markerfacecolor='w', markeredgecolor='r',
435
+ ... label='Minimum point-to-point distance'),
436
+ ... ]
437
+ >>> ax.legend(handles=markers, loc='center left', bbox_to_anchor=(1, 0.5));
438
+ >>> plt.show()
439
+
440
+ """
441
+ sample = _ensure_in_unit_hypercube(sample)
442
+ if sample.shape[0] < 2:
443
+ raise ValueError("Sample must contain at least two points")
444
+
445
+ distances = distance.pdist(sample, metric=metric) # type: ignore[call-overload]
446
+
447
+ if np.any(distances == 0.0):
448
+ warnings.warn("Sample contains duplicate points.", stacklevel=2)
449
+
450
+ if method == "mindist":
451
+ return np.min(distances[distances.nonzero()])
452
+ elif method == "mst":
453
+ fully_connected_graph = distance.squareform(distances)
454
+ mst = minimum_spanning_tree(fully_connected_graph)
455
+ distances = mst[mst.nonzero()]
456
+ # TODO consider returning both the mean and the standard deviation
457
+ # see [1] for a discussion
458
+ return np.mean(distances)
459
+ else:
460
+ raise ValueError(f"{method!r} is not a valid method. "
461
+ f"It must be one of {{'mindist', 'mst'}}")
462
+
463
+
464
+ def update_discrepancy(
465
+ x_new: "npt.ArrayLike",
466
+ sample: "npt.ArrayLike",
467
+ initial_disc: DecimalNumber) -> float:
468
+ """Update the centered discrepancy with a new sample.
469
+
470
+ Parameters
471
+ ----------
472
+ x_new : array_like (1, d)
473
+ The new sample to add in `sample`.
474
+ sample : array_like (n, d)
475
+ The initial sample.
476
+ initial_disc : float
477
+ Centered discrepancy of the `sample`.
478
+
479
+ Returns
480
+ -------
481
+ discrepancy : float
482
+ Centered discrepancy of the sample composed of `x_new` and `sample`.
483
+
484
+ Examples
485
+ --------
486
+ We can also compute iteratively the discrepancy by using
487
+ ``iterative=True``.
488
+
489
+ >>> import numpy as np
490
+ >>> from scipy.stats import qmc
491
+ >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]])
492
+ >>> l_bounds = [0.5, 0.5]
493
+ >>> u_bounds = [6.5, 6.5]
494
+ >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True)
495
+ >>> disc_init = qmc.discrepancy(space[:-1], iterative=True)
496
+ >>> disc_init
497
+ 0.04769081147119336
498
+ >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init)
499
+ 0.008142039609053513
500
+
501
+ """
502
+ sample = np.asarray(sample, dtype=np.float64, order="C")
503
+ x_new = np.asarray(x_new, dtype=np.float64, order="C")
504
+
505
+ # Checking that sample is within the hypercube and 2D
506
+ if not sample.ndim == 2:
507
+ raise ValueError('Sample is not a 2D array')
508
+
509
+ if (sample.max() > 1.) or (sample.min() < 0.):
510
+ raise ValueError('Sample is not in unit hypercube')
511
+
512
+ # Checking that x_new is within the hypercube and 1D
513
+ if not x_new.ndim == 1:
514
+ raise ValueError('x_new is not a 1D array')
515
+
516
+ if not (np.all(x_new >= 0) and np.all(x_new <= 1)):
517
+ raise ValueError('x_new is not in unit hypercube')
518
+
519
+ if x_new.shape[0] != sample.shape[1]:
520
+ raise ValueError("x_new and sample must be broadcastable")
521
+
522
+ return _cy_wrapper_update_discrepancy(x_new, sample, initial_disc)
523
+
524
+
525
+ def _perturb_discrepancy(sample: np.ndarray, i1: int, i2: int, k: int,
526
+ disc: float):
527
+ """Centered discrepancy after an elementary perturbation of a LHS.
528
+
529
+ An elementary perturbation consists of an exchange of coordinates between
530
+ two points: ``sample[i1, k] <-> sample[i2, k]``. By construction,
531
+ this operation conserves the LHS properties.
532
+
533
+ Parameters
534
+ ----------
535
+ sample : array_like (n, d)
536
+ The sample (before permutation) to compute the discrepancy from.
537
+ i1 : int
538
+ The first line of the elementary permutation.
539
+ i2 : int
540
+ The second line of the elementary permutation.
541
+ k : int
542
+ The column of the elementary permutation.
543
+ disc : float
544
+ Centered discrepancy of the design before permutation.
545
+
546
+ Returns
547
+ -------
548
+ discrepancy : float
549
+ Centered discrepancy of the design after permutation.
550
+
551
+ References
552
+ ----------
553
+ .. [1] Jin et al. "An efficient algorithm for constructing optimal design
554
+ of computer experiments", Journal of Statistical Planning and
555
+ Inference, 2005.
556
+
557
+ """
558
+ n = sample.shape[0]
559
+
560
+ z_ij = sample - 0.5
561
+
562
+ # Eq (19)
563
+ c_i1j = (1. / n ** 2.
564
+ * np.prod(0.5 * (2. + abs(z_ij[i1, :])
565
+ + abs(z_ij) - abs(z_ij[i1, :] - z_ij)), axis=1))
566
+ c_i2j = (1. / n ** 2.
567
+ * np.prod(0.5 * (2. + abs(z_ij[i2, :])
568
+ + abs(z_ij) - abs(z_ij[i2, :] - z_ij)), axis=1))
569
+
570
+ # Eq (20)
571
+ c_i1i1 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i1, :]))
572
+ - 2. / n * np.prod(1. + 0.5 * abs(z_ij[i1, :])
573
+ - 0.5 * z_ij[i1, :] ** 2))
574
+ c_i2i2 = (1. / n ** 2 * np.prod(1 + abs(z_ij[i2, :]))
575
+ - 2. / n * np.prod(1. + 0.5 * abs(z_ij[i2, :])
576
+ - 0.5 * z_ij[i2, :] ** 2))
577
+
578
+ # Eq (22), typo in the article in the denominator i2 -> i1
579
+ num = (2 + abs(z_ij[i2, k]) + abs(z_ij[:, k])
580
+ - abs(z_ij[i2, k] - z_ij[:, k]))
581
+ denum = (2 + abs(z_ij[i1, k]) + abs(z_ij[:, k])
582
+ - abs(z_ij[i1, k] - z_ij[:, k]))
583
+ gamma = num / denum
584
+
585
+ # Eq (23)
586
+ c_p_i1j = gamma * c_i1j
587
+ # Eq (24)
588
+ c_p_i2j = c_i2j / gamma
589
+
590
+ alpha = (1 + abs(z_ij[i2, k])) / (1 + abs(z_ij[i1, k]))
591
+ beta = (2 - abs(z_ij[i2, k])) / (2 - abs(z_ij[i1, k]))
592
+
593
+ g_i1 = np.prod(1. + abs(z_ij[i1, :]))
594
+ g_i2 = np.prod(1. + abs(z_ij[i2, :]))
595
+ h_i1 = np.prod(1. + 0.5 * abs(z_ij[i1, :]) - 0.5 * (z_ij[i1, :] ** 2))
596
+ h_i2 = np.prod(1. + 0.5 * abs(z_ij[i2, :]) - 0.5 * (z_ij[i2, :] ** 2))
597
+
598
+ # Eq (25), typo in the article g is missing
599
+ c_p_i1i1 = ((g_i1 * alpha) / (n ** 2) - 2. * alpha * beta * h_i1 / n)
600
+ # Eq (26), typo in the article n ** 2
601
+ c_p_i2i2 = ((g_i2 / ((n ** 2) * alpha)) - (2. * h_i2 / (n * alpha * beta)))
602
+
603
+ # Eq (26)
604
+ sum_ = c_p_i1j - c_i1j + c_p_i2j - c_i2j
605
+
606
+ mask = np.ones(n, dtype=bool)
607
+ mask[[i1, i2]] = False
608
+ sum_ = sum(sum_[mask])
609
+
610
+ disc_ep = (disc + c_p_i1i1 - c_i1i1 + c_p_i2i2 - c_i2i2 + 2 * sum_)
611
+
612
+ return disc_ep
613
+
614
+
615
+ def primes_from_2_to(n: int) -> np.ndarray:
616
+ """Prime numbers from 2 to *n*.
617
+
618
+ Parameters
619
+ ----------
620
+ n : int
621
+ Sup bound with ``n >= 6``.
622
+
623
+ Returns
624
+ -------
625
+ primes : list(int)
626
+ Primes in ``2 <= p < n``.
627
+
628
+ Notes
629
+ -----
630
+ Taken from [1]_ by P.T. Roy, written consent given on 23.04.2021
631
+ by the original author, Bruno Astrolino, for free use in SciPy under
632
+ the 3-clause BSD.
633
+
634
+ References
635
+ ----------
636
+ .. [1] `StackOverflow <https://stackoverflow.com/questions/2068372>`_.
637
+
638
+ """
639
+ sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)
640
+ for i in range(1, int(n ** 0.5) // 3 + 1):
641
+ k = 3 * i + 1 | 1
642
+ sieve[k * k // 3::2 * k] = False
643
+ sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False
644
+ return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]
645
+
646
+
647
+ def n_primes(n: IntNumber) -> list[int]:
648
+ """List of the n-first prime numbers.
649
+
650
+ Parameters
651
+ ----------
652
+ n : int
653
+ Number of prime numbers wanted.
654
+
655
+ Returns
656
+ -------
657
+ primes : list(int)
658
+ List of primes.
659
+
660
+ """
661
+ primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
662
+ 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
663
+ 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
664
+ 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,
665
+ 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
666
+ 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,
667
+ 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
668
+ 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,
669
+ 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,
670
+ 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
671
+ 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
672
+ 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,
673
+ 953, 967, 971, 977, 983, 991, 997][:n]
674
+
675
+ if len(primes) < n:
676
+ big_number = 2000
677
+ while 'Not enough primes':
678
+ primes = primes_from_2_to(big_number)[:n] # type: ignore
679
+ if len(primes) == n:
680
+ break
681
+ big_number += 1000
682
+
683
+ return primes
684
+
685
+
686
+ def _van_der_corput_permutations(
687
+ base: IntNumber, *, rng: SeedType = None
688
+ ) -> np.ndarray:
689
+ """Permutations for scrambling a Van der Corput sequence.
690
+
691
+ Parameters
692
+ ----------
693
+ base : int
694
+ Base of the sequence.
695
+ rng : `numpy.random.Generator`, optional
696
+ Pseudorandom number generator state. When `rng` is None, a new
697
+ `numpy.random.Generator` is created using entropy from the
698
+ operating system. Types other than `numpy.random.Generator` are
699
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
700
+
701
+ .. versionchanged:: 1.15.0
702
+
703
+ As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
704
+ transition from use of `numpy.random.RandomState` to
705
+ `numpy.random.Generator`, this keyword was changed from `seed` to
706
+ `rng`. During the transition, the behavior documented above is not
707
+ accurate; see `check_random_state` for actual behavior. After the
708
+ transition, this admonition can be removed.
709
+
710
+ Returns
711
+ -------
712
+ permutations : array_like
713
+ Permutation indices.
714
+
715
+ Notes
716
+ -----
717
+ In Algorithm 1 of Owen 2017, a permutation of `np.arange(base)` is
718
+ created for each positive integer `k` such that ``1 - base**-k < 1``
719
+ using floating-point arithmetic. For double precision floats, the
720
+ condition ``1 - base**-k < 1`` can also be written as ``base**-k >
721
+ 2**-54``, which makes it more apparent how many permutations we need
722
+ to create.
723
+ """
724
+ rng = check_random_state(rng)
725
+ count = math.ceil(54 / math.log2(base)) - 1
726
+ permutations = np.repeat(np.arange(base)[None], count, axis=0)
727
+ for perm in permutations:
728
+ rng.shuffle(perm)
729
+
730
+ return permutations
731
+
732
+
733
+ def van_der_corput(
734
+ n: IntNumber,
735
+ base: IntNumber = 2,
736
+ *,
737
+ start_index: IntNumber = 0,
738
+ scramble: bool = False,
739
+ permutations: "npt.ArrayLike | None" = None,
740
+ rng: SeedType = None,
741
+ workers: IntNumber = 1) -> np.ndarray:
742
+ """Van der Corput sequence.
743
+
744
+ Pseudo-random number generator based on a b-adic expansion.
745
+
746
+ Scrambling uses permutations of the remainders (see [1]_). Multiple
747
+ permutations are applied to construct a point. The sequence of
748
+ permutations has to be the same for all points of the sequence.
749
+
750
+ Parameters
751
+ ----------
752
+ n : int
753
+ Number of element of the sequence.
754
+ base : int, optional
755
+ Base of the sequence. Default is 2.
756
+ start_index : int, optional
757
+ Index to start the sequence from. Default is 0.
758
+ scramble : bool, optional
759
+ If True, use Owen scrambling. Otherwise no scrambling is done.
760
+ Default is True.
761
+ permutations : array_like, optional
762
+ Permutations used for scrambling.
763
+ rng : `numpy.random.Generator`, optional
764
+ Pseudorandom number generator state. When `rng` is None, a new
765
+ `numpy.random.Generator` is created using entropy from the
766
+ operating system. Types other than `numpy.random.Generator` are
767
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
768
+ workers : int, optional
769
+ Number of workers to use for parallel processing. If -1 is
770
+ given all CPU threads are used. Default is 1.
771
+
772
+ Returns
773
+ -------
774
+ sequence : list (n,)
775
+ Sequence of Van der Corput.
776
+
777
+ References
778
+ ----------
779
+ .. [1] A. B. Owen. "A randomized Halton algorithm in R",
780
+ :arxiv:`1706.02808`, 2017.
781
+
782
+ """
783
+ if base < 2:
784
+ raise ValueError("'base' must be at least 2")
785
+
786
+ if scramble:
787
+ if permutations is None:
788
+ permutations = _van_der_corput_permutations(
789
+ base=base, rng=rng
790
+ )
791
+ else:
792
+ permutations = np.asarray(permutations)
793
+
794
+ permutations = permutations.astype(np.int64)
795
+ return _cy_van_der_corput_scrambled(n, base, start_index,
796
+ permutations, workers)
797
+
798
+ else:
799
+ return _cy_van_der_corput(n, base, start_index, workers)
800
+
801
+
802
+ class QMCEngine(ABC):
803
+ """A generic Quasi-Monte Carlo sampler class meant for subclassing.
804
+
805
+ QMCEngine is a base class to construct a specific Quasi-Monte Carlo
806
+ sampler. It cannot be used directly as a sampler.
807
+
808
+ Parameters
809
+ ----------
810
+ d : int
811
+ Dimension of the parameter space.
812
+ optimization : {None, "random-cd", "lloyd"}, optional
813
+ Whether to use an optimization scheme to improve the quality after
814
+ sampling. Note that this is a post-processing step that does not
815
+ guarantee that all properties of the sample will be conserved.
816
+ Default is None.
817
+
818
+ * ``random-cd``: random permutations of coordinates to lower the
819
+ centered discrepancy. The best sample based on the centered
820
+ discrepancy is constantly updated. Centered discrepancy-based
821
+ sampling shows better space-filling robustness toward 2D and 3D
822
+ subprojections compared to using other discrepancy measures.
823
+ * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
824
+ The process converges to equally spaced samples.
825
+
826
+ .. versionadded:: 1.10.0
827
+
828
+ rng : `numpy.random.Generator`, optional
829
+ Pseudorandom number generator state. When `rng` is None, a new
830
+ `numpy.random.Generator` is created using entropy from the
831
+ operating system. Types other than `numpy.random.Generator` are
832
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
833
+
834
+ .. versionchanged:: 1.15.0
835
+
836
+ As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
837
+ transition from use of `numpy.random.RandomState` to
838
+ `numpy.random.Generator`, this keyword was changed from `seed` to
839
+ `rng`. For an interim period, both keywords will continue to work, although
840
+ only one may be specified at a time. After the interim period, function
841
+ calls using the `seed` keyword will emit warnings. Following a
842
+ deprecation period, the `seed` keyword will be removed.
843
+
844
+ Notes
845
+ -----
846
+ By convention samples are distributed over the half-open interval
847
+ ``[0, 1)``. Instances of the class can access the attributes: ``d`` for
848
+ the dimension; and ``rng`` for the random number generator.
849
+
850
+ **Subclassing**
851
+
852
+ When subclassing `QMCEngine` to create a new sampler, ``__init__`` and
853
+ ``random`` must be redefined.
854
+
855
+ * ``__init__(d, rng=None)``: at least fix the dimension. If the sampler
856
+ does not take advantage of a ``rng`` (deterministic methods like
857
+ Halton), this parameter can be omitted.
858
+ * ``_random(n, *, workers=1)``: draw ``n`` from the engine. ``workers``
859
+ is used for parallelism. See `Halton` for example.
860
+
861
+ Optionally, two other methods can be overwritten by subclasses:
862
+
863
+ * ``reset``: Reset the engine to its original state.
864
+ * ``fast_forward``: If the sequence is deterministic (like Halton
865
+ sequence), then ``fast_forward(n)`` is skipping the ``n`` first draw.
866
+
867
+ Examples
868
+ --------
869
+ To create a random sampler based on ``np.random.random``, we would do the
870
+ following:
871
+
872
+ >>> from scipy.stats import qmc
873
+ >>> class RandomEngine(qmc.QMCEngine):
874
+ ... def __init__(self, d, rng=None):
875
+ ... super().__init__(d=d, rng=rng)
876
+ ...
877
+ ...
878
+ ... def _random(self, n=1, *, workers=1):
879
+ ... return self.rng.random((n, self.d))
880
+ ...
881
+ ...
882
+ ... def reset(self):
883
+ ... super().__init__(d=self.d, rng=self.rng_seed)
884
+ ... return self
885
+ ...
886
+ ...
887
+ ... def fast_forward(self, n):
888
+ ... self.random(n)
889
+ ... return self
890
+
891
+ After subclassing `QMCEngine` to define the sampling strategy we want to
892
+ use, we can create an instance to sample from.
893
+
894
+ >>> engine = RandomEngine(2)
895
+ >>> engine.random(5)
896
+ array([[0.22733602, 0.31675834], # random
897
+ [0.79736546, 0.67625467],
898
+ [0.39110955, 0.33281393],
899
+ [0.59830875, 0.18673419],
900
+ [0.67275604, 0.94180287]])
901
+
902
+ We can also reset the state of the generator and resample again.
903
+
904
+ >>> _ = engine.reset()
905
+ >>> engine.random(5)
906
+ array([[0.22733602, 0.31675834], # random
907
+ [0.79736546, 0.67625467],
908
+ [0.39110955, 0.33281393],
909
+ [0.59830875, 0.18673419],
910
+ [0.67275604, 0.94180287]])
911
+
912
+ """
913
+
914
+ @abstractmethod
915
+ @_transition_to_rng('seed', replace_doc=False)
916
+ def __init__(
917
+ self,
918
+ d: IntNumber,
919
+ *,
920
+ optimization: Literal["random-cd", "lloyd"] | None = None,
921
+ rng: SeedType = None
922
+ ) -> None:
923
+ self._initialize(d, optimization=optimization, rng=rng)
924
+
925
+ # During SPEC 7 transition:
926
+ # `__init__` has to be wrapped with @_transition_to_rng decorator
927
+ # because it is public. Subclasses previously called `__init__`
928
+ # directly, but this was problematic because arguments passed to
929
+ # subclass `__init__` as `seed` would get passed to superclass
930
+ # `__init__` as `rng`, rejecting `RandomState` arguments.
931
+ def _initialize(
932
+ self,
933
+ d: IntNumber,
934
+ *,
935
+ optimization: Literal["random-cd", "lloyd"] | None = None,
936
+ rng: SeedType = None
937
+ ) -> None:
938
+ if not np.issubdtype(type(d), np.integer) or d < 0:
939
+ raise ValueError('d must be a non-negative integer value')
940
+
941
+ self.d = d
942
+
943
+ if isinstance(rng, np.random.Generator):
944
+ # Spawn a Generator that we can own and reset.
945
+ self.rng = _rng_spawn(rng, 1)[0]
946
+ else:
947
+ # Create our instance of Generator, does not need spawning
948
+ # Also catch RandomState which cannot be spawned
949
+ self.rng = check_random_state(rng)
950
+ self.rng_seed = copy.deepcopy(self.rng)
951
+
952
+ self.num_generated = 0
953
+
954
+ config = {
955
+ # random-cd
956
+ "n_nochange": 100,
957
+ "n_iters": 10_000,
958
+ "rng": self.rng,
959
+
960
+ # lloyd
961
+ "tol": 1e-5,
962
+ "maxiter": 10,
963
+ "qhull_options": None,
964
+ }
965
+ self._optimization = optimization
966
+ self.optimization_method = _select_optimizer(optimization, config)
967
+
968
+ @abstractmethod
969
+ def _random(
970
+ self, n: IntNumber = 1, *, workers: IntNumber = 1
971
+ ) -> np.ndarray:
972
+ ...
973
+
974
+ def random(
975
+ self, n: IntNumber = 1, *, workers: IntNumber = 1
976
+ ) -> np.ndarray:
977
+ """Draw `n` in the half-open interval ``[0, 1)``.
978
+
979
+ Parameters
980
+ ----------
981
+ n : int, optional
982
+ Number of samples to generate in the parameter space.
983
+ Default is 1.
984
+ workers : int, optional
985
+ Only supported with `Halton`.
986
+ Number of workers to use for parallel processing. If -1 is
987
+ given all CPU threads are used. Default is 1. It becomes faster
988
+ than one worker for `n` greater than :math:`10^3`.
989
+
990
+ Returns
991
+ -------
992
+ sample : array_like (n, d)
993
+ QMC sample.
994
+
995
+ """
996
+ sample = self._random(n, workers=workers)
997
+ if self.optimization_method is not None:
998
+ sample = self.optimization_method(sample)
999
+
1000
+ self.num_generated += n
1001
+ return sample
1002
+
1003
+ def integers(
1004
+ self,
1005
+ l_bounds: "npt.ArrayLike",
1006
+ *,
1007
+ u_bounds: "npt.ArrayLike | None" = None,
1008
+ n: IntNumber = 1,
1009
+ endpoint: bool = False,
1010
+ workers: IntNumber = 1
1011
+ ) -> np.ndarray:
1012
+ r"""
1013
+ Draw `n` integers from `l_bounds` (inclusive) to `u_bounds`
1014
+ (exclusive), or if endpoint=True, `l_bounds` (inclusive) to
1015
+ `u_bounds` (inclusive).
1016
+
1017
+ Parameters
1018
+ ----------
1019
+ l_bounds : int or array-like of ints
1020
+ Lowest (signed) integers to be drawn (unless ``u_bounds=None``,
1021
+ in which case this parameter is 0 and this value is used for
1022
+ `u_bounds`).
1023
+ u_bounds : int or array-like of ints, optional
1024
+ If provided, one above the largest (signed) integer to be drawn
1025
+ (see above for behavior if ``u_bounds=None``).
1026
+ If array-like, must contain integer values.
1027
+ n : int, optional
1028
+ Number of samples to generate in the parameter space.
1029
+ Default is 1.
1030
+ endpoint : bool, optional
1031
+ If true, sample from the interval ``[l_bounds, u_bounds]`` instead
1032
+ of the default ``[l_bounds, u_bounds)``. Defaults is False.
1033
+ workers : int, optional
1034
+ Number of workers to use for parallel processing. If -1 is
1035
+ given all CPU threads are used. Only supported when using `Halton`
1036
+ Default is 1.
1037
+
1038
+ Returns
1039
+ -------
1040
+ sample : array_like (n, d)
1041
+ QMC sample.
1042
+
1043
+ Notes
1044
+ -----
1045
+ It is safe to just use the same ``[0, 1)`` to integer mapping
1046
+ with QMC that you would use with MC. You still get unbiasedness,
1047
+ a strong law of large numbers, an asymptotically infinite variance
1048
+ reduction and a finite sample variance bound.
1049
+
1050
+ To convert a sample from :math:`[0, 1)` to :math:`[a, b), b>a`,
1051
+ with :math:`a` the lower bounds and :math:`b` the upper bounds,
1052
+ the following transformation is used:
1053
+
1054
+ .. math::
1055
+
1056
+ \text{floor}((b - a) \cdot \text{sample} + a)
1057
+
1058
+ """
1059
+ if u_bounds is None:
1060
+ u_bounds = l_bounds
1061
+ l_bounds = 0
1062
+
1063
+ u_bounds = np.atleast_1d(u_bounds)
1064
+ l_bounds = np.atleast_1d(l_bounds)
1065
+
1066
+ if endpoint:
1067
+ u_bounds = u_bounds + 1
1068
+
1069
+ if (not np.issubdtype(l_bounds.dtype, np.integer) or
1070
+ not np.issubdtype(u_bounds.dtype, np.integer)):
1071
+ message = ("'u_bounds' and 'l_bounds' must be integers or"
1072
+ " array-like of integers")
1073
+ raise ValueError(message)
1074
+
1075
+ if isinstance(self, Halton):
1076
+ sample = self.random(n=n, workers=workers)
1077
+ else:
1078
+ sample = self.random(n=n)
1079
+
1080
+ sample = scale(sample, l_bounds=l_bounds, u_bounds=u_bounds)
1081
+ sample = np.floor(sample).astype(np.int64)
1082
+
1083
+ return sample
1084
+
1085
+ def reset(self) -> "QMCEngine":
1086
+ """Reset the engine to base state.
1087
+
1088
+ Returns
1089
+ -------
1090
+ engine : QMCEngine
1091
+ Engine reset to its base state.
1092
+
1093
+ """
1094
+ rng = copy.deepcopy(self.rng_seed)
1095
+ self.rng = check_random_state(rng)
1096
+ self.num_generated = 0
1097
+ return self
1098
+
1099
+ def fast_forward(self, n: IntNumber) -> "QMCEngine":
1100
+ """Fast-forward the sequence by `n` positions.
1101
+
1102
+ Parameters
1103
+ ----------
1104
+ n : int
1105
+ Number of points to skip in the sequence.
1106
+
1107
+ Returns
1108
+ -------
1109
+ engine : QMCEngine
1110
+ Engine reset to its base state.
1111
+
1112
+ """
1113
+ self.random(n=n)
1114
+ return self
1115
+
1116
+
1117
+ class Halton(QMCEngine):
1118
+ """Halton sequence.
1119
+
1120
+ Pseudo-random number generator that generalize the Van der Corput sequence
1121
+ for multiple dimensions. The Halton sequence uses the base-two Van der
1122
+ Corput sequence for the first dimension, base-three for its second and
1123
+ base-:math:`p` for its :math:`n`-dimension, with :math:`p` the
1124
+ :math:`n`'th prime.
1125
+
1126
+ Parameters
1127
+ ----------
1128
+ d : int
1129
+ Dimension of the parameter space.
1130
+ scramble : bool, optional
1131
+ If True, use random scrambling from [2]_. Otherwise no scrambling
1132
+ is done.
1133
+ Default is True.
1134
+ optimization : {None, "random-cd", "lloyd"}, optional
1135
+ Whether to use an optimization scheme to improve the quality after
1136
+ sampling. Note that this is a post-processing step that does not
1137
+ guarantee that all properties of the sample will be conserved.
1138
+ Default is None.
1139
+
1140
+ * ``random-cd``: random permutations of coordinates to lower the
1141
+ centered discrepancy. The best sample based on the centered
1142
+ discrepancy is constantly updated. Centered discrepancy-based
1143
+ sampling shows better space-filling robustness toward 2D and 3D
1144
+ subprojections compared to using other discrepancy measures.
1145
+ * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
1146
+ The process converges to equally spaced samples.
1147
+
1148
+ .. versionadded:: 1.10.0
1149
+
1150
+ rng : `numpy.random.Generator`, optional
1151
+ Pseudorandom number generator state. When `rng` is None, a new
1152
+ `numpy.random.Generator` is created using entropy from the
1153
+ operating system. Types other than `numpy.random.Generator` are
1154
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
1155
+
1156
+ .. versionchanged:: 1.15.0
1157
+
1158
+ As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
1159
+ transition from use of `numpy.random.RandomState` to
1160
+ `numpy.random.Generator`, this keyword was changed from `seed` to
1161
+ `rng`. For an interim period, both keywords will continue to work, although
1162
+ only one may be specified at a time. After the interim period, function
1163
+ calls using the `seed` keyword will emit warnings. Following a
1164
+ deprecation period, the `seed` keyword will be removed.
1165
+
1166
+ Notes
1167
+ -----
1168
+ The Halton sequence has severe striping artifacts for even modestly
1169
+ large dimensions. These can be ameliorated by scrambling. Scrambling
1170
+ also supports replication-based error estimates and extends
1171
+ applicability to unbounded integrands.
1172
+
1173
+ References
1174
+ ----------
1175
+ .. [1] Halton, "On the efficiency of certain quasi-random sequences of
1176
+ points in evaluating multi-dimensional integrals", Numerische
1177
+ Mathematik, 1960.
1178
+ .. [2] A. B. Owen. "A randomized Halton algorithm in R",
1179
+ :arxiv:`1706.02808`, 2017.
1180
+
1181
+ Examples
1182
+ --------
1183
+ Generate samples from a low discrepancy sequence of Halton.
1184
+
1185
+ >>> from scipy.stats import qmc
1186
+ >>> sampler = qmc.Halton(d=2, scramble=False)
1187
+ >>> sample = sampler.random(n=5)
1188
+ >>> sample
1189
+ array([[0. , 0. ],
1190
+ [0.5 , 0.33333333],
1191
+ [0.25 , 0.66666667],
1192
+ [0.75 , 0.11111111],
1193
+ [0.125 , 0.44444444]])
1194
+
1195
+ Compute the quality of the sample using the discrepancy criterion.
1196
+
1197
+ >>> qmc.discrepancy(sample)
1198
+ 0.088893711419753
1199
+
1200
+ If some wants to continue an existing design, extra points can be obtained
1201
+ by calling again `random`. Alternatively, you can skip some points like:
1202
+
1203
+ >>> _ = sampler.fast_forward(5)
1204
+ >>> sample_continued = sampler.random(n=5)
1205
+ >>> sample_continued
1206
+ array([[0.3125 , 0.37037037],
1207
+ [0.8125 , 0.7037037 ],
1208
+ [0.1875 , 0.14814815],
1209
+ [0.6875 , 0.48148148],
1210
+ [0.4375 , 0.81481481]])
1211
+
1212
+ Finally, samples can be scaled to bounds.
1213
+
1214
+ >>> l_bounds = [0, 2]
1215
+ >>> u_bounds = [10, 5]
1216
+ >>> qmc.scale(sample_continued, l_bounds, u_bounds)
1217
+ array([[3.125 , 3.11111111],
1218
+ [8.125 , 4.11111111],
1219
+ [1.875 , 2.44444444],
1220
+ [6.875 , 3.44444444],
1221
+ [4.375 , 4.44444444]])
1222
+
1223
+ """
1224
+ @_transition_to_rng('seed', replace_doc=False)
1225
+ def __init__(
1226
+ self, d: IntNumber, *, scramble: bool = True,
1227
+ optimization: Literal["random-cd", "lloyd"] | None = None,
1228
+ rng: SeedType = None
1229
+ ) -> None:
1230
+ # Used in `scipy.integrate.qmc_quad`
1231
+ self._init_quad = {'d': d, 'scramble': True,
1232
+ 'optimization': optimization}
1233
+ super()._initialize(d=d, optimization=optimization, rng=rng)
1234
+
1235
+ # important to have ``type(bdim) == int`` for performance reason
1236
+ self.base = [int(bdim) for bdim in n_primes(d)]
1237
+ self.scramble = scramble
1238
+
1239
+ self._initialize_permutations()
1240
+
1241
+ def _initialize_permutations(self) -> None:
1242
+ """Initialize permutations for all Van der Corput sequences.
1243
+
1244
+ Permutations are only needed for scrambling.
1245
+ """
1246
+ self._permutations: list = [None] * len(self.base)
1247
+ if self.scramble:
1248
+ for i, bdim in enumerate(self.base):
1249
+ permutations = _van_der_corput_permutations(
1250
+ base=bdim, rng=self.rng
1251
+ )
1252
+
1253
+ self._permutations[i] = permutations
1254
+
1255
+ def _random(
1256
+ self, n: IntNumber = 1, *, workers: IntNumber = 1
1257
+ ) -> np.ndarray:
1258
+ """Draw `n` in the half-open interval ``[0, 1)``.
1259
+
1260
+ Parameters
1261
+ ----------
1262
+ n : int, optional
1263
+ Number of samples to generate in the parameter space. Default is 1.
1264
+ workers : int, optional
1265
+ Number of workers to use for parallel processing. If -1 is
1266
+ given all CPU threads are used. Default is 1. It becomes faster
1267
+ than one worker for `n` greater than :math:`10^3`.
1268
+
1269
+ Returns
1270
+ -------
1271
+ sample : array_like (n, d)
1272
+ QMC sample.
1273
+
1274
+ """
1275
+ workers = _validate_workers(workers)
1276
+ # Generate a sample using a Van der Corput sequence per dimension.
1277
+ sample = [van_der_corput(n, bdim, start_index=self.num_generated,
1278
+ scramble=self.scramble,
1279
+ permutations=self._permutations[i],
1280
+ workers=workers)
1281
+ for i, bdim in enumerate(self.base)]
1282
+
1283
+ return np.array(sample).T.reshape(n, self.d)
1284
+
1285
+
1286
+ class LatinHypercube(QMCEngine):
1287
+ r"""Latin hypercube sampling (LHS).
1288
+
1289
+ A Latin hypercube sample [1]_ generates :math:`n` points in
1290
+ :math:`[0,1)^{d}`. Each univariate marginal distribution is stratified,
1291
+ placing exactly one point in :math:`[j/n, (j+1)/n)` for
1292
+ :math:`j=0,1,...,n-1`. They are still applicable when :math:`n << d`.
1293
+
1294
+ Parameters
1295
+ ----------
1296
+ d : int
1297
+ Dimension of the parameter space.
1298
+ scramble : bool, optional
1299
+ When False, center samples within cells of a multi-dimensional grid.
1300
+ Otherwise, samples are randomly placed within cells of the grid.
1301
+
1302
+ .. note::
1303
+ Setting ``scramble=False`` does not ensure deterministic output.
1304
+ For that, use the `rng` parameter.
1305
+
1306
+ Default is True.
1307
+
1308
+ .. versionadded:: 1.10.0
1309
+
1310
+ optimization : {None, "random-cd", "lloyd"}, optional
1311
+ Whether to use an optimization scheme to improve the quality after
1312
+ sampling. Note that this is a post-processing step that does not
1313
+ guarantee that all properties of the sample will be conserved.
1314
+ Default is None.
1315
+
1316
+ * ``random-cd``: random permutations of coordinates to lower the
1317
+ centered discrepancy. The best sample based on the centered
1318
+ discrepancy is constantly updated. Centered discrepancy-based
1319
+ sampling shows better space-filling robustness toward 2D and 3D
1320
+ subprojections compared to using other discrepancy measures.
1321
+ * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
1322
+ The process converges to equally spaced samples.
1323
+
1324
+ .. versionadded:: 1.8.0
1325
+ .. versionchanged:: 1.10.0
1326
+ Add ``lloyd``.
1327
+
1328
+ strength : {1, 2}, optional
1329
+ Strength of the LHS. ``strength=1`` produces a plain LHS while
1330
+ ``strength=2`` produces an orthogonal array based LHS of strength 2
1331
+ [7]_, [8]_. In that case, only ``n=p**2`` points can be sampled,
1332
+ with ``p`` a prime number. It also constrains ``d <= p + 1``.
1333
+ Default is 1.
1334
+
1335
+ .. versionadded:: 1.8.0
1336
+
1337
+ rng : `numpy.random.Generator`, optional
1338
+ Pseudorandom number generator state. When `rng` is None, a new
1339
+ `numpy.random.Generator` is created using entropy from the
1340
+ operating system. Types other than `numpy.random.Generator` are
1341
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
1342
+
1343
+ .. versionchanged:: 1.15.0
1344
+
1345
+ As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
1346
+ transition from use of `numpy.random.RandomState` to
1347
+ `numpy.random.Generator`, this keyword was changed from `seed` to
1348
+ `rng`. For an interim period, both keywords will continue to work, although
1349
+ only one may be specified at a time. After the interim period, function
1350
+ calls using the `seed` keyword will emit warnings. Following a
1351
+ deprecation period, the `seed` keyword will be removed.
1352
+
1353
+ See Also
1354
+ --------
1355
+ :ref:`quasi-monte-carlo`
1356
+
1357
+ Notes
1358
+ -----
1359
+
1360
+ When LHS is used for integrating a function :math:`f` over :math:`n`,
1361
+ LHS is extremely effective on integrands that are nearly additive [2]_.
1362
+ With a LHS of :math:`n` points, the variance of the integral is always
1363
+ lower than plain MC on :math:`n-1` points [3]_. There is a central limit
1364
+ theorem for LHS on the mean and variance of the integral [4]_, but not
1365
+ necessarily for optimized LHS due to the randomization.
1366
+
1367
+ :math:`A` is called an orthogonal array of strength :math:`t` if in each
1368
+ n-row-by-t-column submatrix of :math:`A`: all :math:`p^t` possible
1369
+ distinct rows occur the same number of times. The elements of :math:`A`
1370
+ are in the set :math:`\{0, 1, ..., p-1\}`, also called symbols.
1371
+ The constraint that :math:`p` must be a prime number is to allow modular
1372
+ arithmetic. Increasing strength adds some symmetry to the sub-projections
1373
+ of a sample. With strength 2, samples are symmetric along the diagonals of
1374
+ 2D sub-projections. This may be undesirable, but on the other hand, the
1375
+ sample dispersion is improved.
1376
+
1377
+ Strength 1 (plain LHS) brings an advantage over strength 0 (MC) and
1378
+ strength 2 is a useful increment over strength 1. Going to strength 3 is
1379
+ a smaller increment and scrambled QMC like Sobol', Halton are more
1380
+ performant [7]_.
1381
+
1382
+ To create a LHS of strength 2, the orthogonal array :math:`A` is
1383
+ randomized by applying a random, bijective map of the set of symbols onto
1384
+ itself. For example, in column 0, all 0s might become 2; in column 1,
1385
+ all 0s might become 1, etc.
1386
+ Then, for each column :math:`i` and symbol :math:`j`, we add a plain,
1387
+ one-dimensional LHS of size :math:`p` to the subarray where
1388
+ :math:`A^i = j`. The resulting matrix is finally divided by :math:`p`.
1389
+
1390
+ References
1391
+ ----------
1392
+ .. [1] Mckay et al., "A Comparison of Three Methods for Selecting Values
1393
+ of Input Variables in the Analysis of Output from a Computer Code."
1394
+ Technometrics, 1979.
1395
+ .. [2] M. Stein, "Large sample properties of simulations using Latin
1396
+ hypercube sampling." Technometrics 29, no. 2: 143-151, 1987.
1397
+ .. [3] A. B. Owen, "Monte Carlo variance of scrambled net quadrature."
1398
+ SIAM Journal on Numerical Analysis 34, no. 5: 1884-1910, 1997
1399
+ .. [4] Loh, W.-L. "On Latin hypercube sampling." The annals of statistics
1400
+ 24, no. 5: 2058-2080, 1996.
1401
+ .. [5] Fang et al. "Design and modeling for computer experiments".
1402
+ Computer Science and Data Analysis Series, 2006.
1403
+ .. [6] Damblin et al., "Numerical studies of space filling designs:
1404
+ optimization of Latin Hypercube Samples and subprojection properties."
1405
+ Journal of Simulation, 2013.
1406
+ .. [7] A. B. Owen , "Orthogonal arrays for computer experiments,
1407
+ integration and visualization." Statistica Sinica, 1992.
1408
+ .. [8] B. Tang, "Orthogonal Array-Based Latin Hypercubes."
1409
+ Journal of the American Statistical Association, 1993.
1410
+ .. [9] Seaholm, Susan K. et al. (1988). Latin hypercube sampling and the
1411
+ sensitivity analysis of a Monte Carlo epidemic model. Int J Biomed
1412
+ Comput, 23(1-2), 97-112. :doi:`10.1016/0020-7101(88)90067-0`
1413
+
1414
+ Examples
1415
+ --------
1416
+ Generate samples from a Latin hypercube generator.
1417
+
1418
+ >>> from scipy.stats import qmc
1419
+ >>> sampler = qmc.LatinHypercube(d=2)
1420
+ >>> sample = sampler.random(n=5)
1421
+ >>> sample
1422
+ array([[0.1545328 , 0.53664833], # random
1423
+ [0.84052691, 0.06474907],
1424
+ [0.52177809, 0.93343721],
1425
+ [0.68033825, 0.36265316],
1426
+ [0.26544879, 0.61163943]])
1427
+
1428
+ Compute the quality of the sample using the discrepancy criterion.
1429
+
1430
+ >>> qmc.discrepancy(sample)
1431
+ 0.0196... # random
1432
+
1433
+ Samples can be scaled to bounds.
1434
+
1435
+ >>> l_bounds = [0, 2]
1436
+ >>> u_bounds = [10, 5]
1437
+ >>> qmc.scale(sample, l_bounds, u_bounds)
1438
+ array([[1.54532796, 3.609945 ], # random
1439
+ [8.40526909, 2.1942472 ],
1440
+ [5.2177809 , 4.80031164],
1441
+ [6.80338249, 3.08795949],
1442
+ [2.65448791, 3.83491828]])
1443
+
1444
+ Below are other examples showing alternative ways to construct LHS with
1445
+ even better coverage of the space.
1446
+
1447
+ Using a base LHS as a baseline.
1448
+
1449
+ >>> sampler = qmc.LatinHypercube(d=2)
1450
+ >>> sample = sampler.random(n=5)
1451
+ >>> qmc.discrepancy(sample)
1452
+ 0.0196... # random
1453
+
1454
+ Use the `optimization` keyword argument to produce a LHS with
1455
+ lower discrepancy at higher computational cost.
1456
+
1457
+ >>> sampler = qmc.LatinHypercube(d=2, optimization="random-cd")
1458
+ >>> sample = sampler.random(n=5)
1459
+ >>> qmc.discrepancy(sample)
1460
+ 0.0176... # random
1461
+
1462
+ Use the `strength` keyword argument to produce an orthogonal array based
1463
+ LHS of strength 2. In this case, the number of sample points must be the
1464
+ square of a prime number.
1465
+
1466
+ >>> sampler = qmc.LatinHypercube(d=2, strength=2)
1467
+ >>> sample = sampler.random(n=9)
1468
+ >>> qmc.discrepancy(sample)
1469
+ 0.00526... # random
1470
+
1471
+ Options could be combined to produce an optimized centered
1472
+ orthogonal array based LHS. After optimization, the result would not
1473
+ be guaranteed to be of strength 2.
1474
+
1475
+ **Real-world example**
1476
+
1477
+ In [9]_, a Latin Hypercube sampling (LHS) strategy was used to sample a
1478
+ parameter space to study the importance of each parameter of an epidemic
1479
+ model. Such analysis is also called a sensitivity analysis.
1480
+
1481
+ Since the dimensionality of the problem is high (6), it is computationally
1482
+ expensive to cover the space. When numerical experiments are costly, QMC
1483
+ enables analysis that may not be possible if using a grid.
1484
+
1485
+ The six parameters of the model represented the probability of illness,
1486
+ the probability of withdrawal, and four contact probabilities. The
1487
+ authors assumed uniform distributions for all parameters and generated
1488
+ 50 samples.
1489
+
1490
+ Using `scipy.stats.qmc.LatinHypercube` to replicate the protocol,
1491
+ the first step is to create a sample in the unit hypercube:
1492
+
1493
+ >>> from scipy.stats import qmc
1494
+ >>> sampler = qmc.LatinHypercube(d=6)
1495
+ >>> sample = sampler.random(n=50)
1496
+
1497
+ Then the sample can be scaled to the appropriate bounds:
1498
+
1499
+ >>> l_bounds = [0.000125, 0.01, 0.0025, 0.05, 0.47, 0.7]
1500
+ >>> u_bounds = [0.000375, 0.03, 0.0075, 0.15, 0.87, 0.9]
1501
+ >>> sample_scaled = qmc.scale(sample, l_bounds, u_bounds)
1502
+
1503
+ Such a sample was used to run the model 50 times, and a polynomial
1504
+ response surface was constructed. This allowed the authors to study the
1505
+ relative importance of each parameter across the range of possibilities
1506
+ of every other parameter.
1507
+
1508
+ In this computer experiment, they showed a 14-fold reduction in the
1509
+ number of samples required to maintain an error below 2% on their
1510
+ response surface when compared to a grid sampling.
1511
+
1512
+ """
1513
+
1514
+ @_transition_to_rng('seed', replace_doc=False)
1515
+ def __init__(
1516
+ self, d: IntNumber, *,
1517
+ scramble: bool = True,
1518
+ strength: int = 1,
1519
+ optimization: Literal["random-cd", "lloyd"] | None = None,
1520
+ rng: SeedType = None
1521
+ ) -> None:
1522
+ # Used in `scipy.integrate.qmc_quad`
1523
+ self._init_quad = {'d': d, 'scramble': True, 'strength': strength,
1524
+ 'optimization': optimization}
1525
+ super()._initialize(d=d, rng=rng, optimization=optimization)
1526
+ self.scramble = scramble
1527
+
1528
+ lhs_method_strength = {
1529
+ 1: self._random_lhs,
1530
+ 2: self._random_oa_lhs
1531
+ }
1532
+
1533
+ try:
1534
+ self.lhs_method: Callable = lhs_method_strength[strength]
1535
+ except KeyError as exc:
1536
+ message = (f"{strength!r} is not a valid strength. It must be one"
1537
+ f" of {set(lhs_method_strength)!r}")
1538
+ raise ValueError(message) from exc
1539
+
1540
+ def _random(
1541
+ self, n: IntNumber = 1, *, workers: IntNumber = 1
1542
+ ) -> np.ndarray:
1543
+ lhs = self.lhs_method(n)
1544
+ return lhs
1545
+
1546
+ def _random_lhs(self, n: IntNumber = 1) -> np.ndarray:
1547
+ """Base LHS algorithm."""
1548
+ if not self.scramble:
1549
+ samples: np.ndarray | float = 0.5
1550
+ else:
1551
+ samples = self.rng.uniform(size=(n, self.d))
1552
+
1553
+ perms = np.tile(np.arange(1, n + 1),
1554
+ (self.d, 1)) # type: ignore[arg-type]
1555
+ for i in range(self.d):
1556
+ self.rng.shuffle(perms[i, :])
1557
+ perms = perms.T
1558
+
1559
+ samples = (perms - samples) / n
1560
+ return samples
1561
+
1562
+ def _random_oa_lhs(self, n: IntNumber = 4) -> np.ndarray:
1563
+ """Orthogonal array based LHS of strength 2."""
1564
+ p = np.sqrt(n).astype(int)
1565
+ n_row = p**2
1566
+ n_col = p + 1
1567
+
1568
+ primes = primes_from_2_to(p + 1)
1569
+ if p not in primes or n != n_row:
1570
+ raise ValueError(
1571
+ "n is not the square of a prime number. Close"
1572
+ f" values are {primes[-2:]**2}"
1573
+ )
1574
+ if self.d > p + 1:
1575
+ raise ValueError("n is too small for d. Must be n > (d-1)**2")
1576
+
1577
+ oa_sample = np.zeros(shape=(n_row, n_col), dtype=int)
1578
+
1579
+ # OA of strength 2
1580
+ arrays = np.tile(np.arange(p), (2, 1))
1581
+ oa_sample[:, :2] = np.stack(np.meshgrid(*arrays),
1582
+ axis=-1).reshape(-1, 2)
1583
+ for p_ in range(1, p):
1584
+ oa_sample[:, 2+p_-1] = np.mod(oa_sample[:, 0]
1585
+ + p_*oa_sample[:, 1], p)
1586
+
1587
+ # scramble the OA
1588
+ oa_sample_ = np.empty(shape=(n_row, n_col), dtype=int)
1589
+ for j in range(n_col):
1590
+ perms = self.rng.permutation(p)
1591
+ oa_sample_[:, j] = perms[oa_sample[:, j]]
1592
+
1593
+ oa_sample = oa_sample_
1594
+ # following is making a scrambled OA into an OA-LHS
1595
+ oa_lhs_sample = np.zeros(shape=(n_row, n_col))
1596
+ lhs_engine = LatinHypercube(d=1, scramble=self.scramble, strength=1,
1597
+ rng=self.rng) # type: QMCEngine
1598
+ for j in range(n_col):
1599
+ for k in range(p):
1600
+ idx = oa_sample[:, j] == k
1601
+ lhs = lhs_engine.random(p).flatten()
1602
+ oa_lhs_sample[:, j][idx] = lhs + oa_sample[:, j][idx]
1603
+
1604
+ oa_lhs_sample /= p
1605
+
1606
+ return oa_lhs_sample[:, :self.d]
1607
+
1608
+
1609
+ class Sobol(QMCEngine):
1610
+ """Engine for generating (scrambled) Sobol' sequences.
1611
+
1612
+ Sobol' sequences are low-discrepancy, quasi-random numbers. Points
1613
+ can be drawn using two methods:
1614
+
1615
+ * `random_base2`: safely draw :math:`n=2^m` points. This method
1616
+ guarantees the balance properties of the sequence.
1617
+ * `random`: draw an arbitrary number of points from the
1618
+ sequence. See warning below.
1619
+
1620
+ Parameters
1621
+ ----------
1622
+ d : int
1623
+ Dimensionality of the sequence. Max dimensionality is 21201.
1624
+ scramble : bool, optional
1625
+ If True, use LMS+shift scrambling. Otherwise, no scrambling is done.
1626
+ Default is True.
1627
+ bits : int, optional
1628
+ Number of bits of the generator. Control the maximum number of points
1629
+ that can be generated, which is ``2**bits``. Maximal value is 64.
1630
+ It does not correspond to the return type, which is always
1631
+ ``np.float64`` to prevent points from repeating themselves.
1632
+ Default is None, which for backward compatibility, corresponds to 30.
1633
+
1634
+ .. versionadded:: 1.9.0
1635
+ optimization : {None, "random-cd", "lloyd"}, optional
1636
+ Whether to use an optimization scheme to improve the quality after
1637
+ sampling. Note that this is a post-processing step that does not
1638
+ guarantee that all properties of the sample will be conserved.
1639
+ Default is None.
1640
+
1641
+ * ``random-cd``: random permutations of coordinates to lower the
1642
+ centered discrepancy. The best sample based on the centered
1643
+ discrepancy is constantly updated. Centered discrepancy-based
1644
+ sampling shows better space-filling robustness toward 2D and 3D
1645
+ subprojections compared to using other discrepancy measures.
1646
+ * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
1647
+ The process converges to equally spaced samples.
1648
+
1649
+ .. versionadded:: 1.10.0
1650
+
1651
+ rng : `numpy.random.Generator`, optional
1652
+ Pseudorandom number generator state. When `rng` is None, a new
1653
+ `numpy.random.Generator` is created using entropy from the
1654
+ operating system. Types other than `numpy.random.Generator` are
1655
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
1656
+
1657
+ .. versionchanged:: 1.15.0
1658
+
1659
+ As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
1660
+ transition from use of `numpy.random.RandomState` to
1661
+ `numpy.random.Generator`, this keyword was changed from `seed` to
1662
+ `rng`. For an interim period, both keywords will continue to work, although
1663
+ only one may be specified at a time. After the interim period, function
1664
+ calls using the `seed` keyword will emit warnings. Following a
1665
+ deprecation period, the `seed` keyword will be removed.
1666
+
1667
+ Notes
1668
+ -----
1669
+ Sobol' sequences [1]_ provide :math:`n=2^m` low discrepancy points in
1670
+ :math:`[0,1)^{d}`. Scrambling them [3]_ makes them suitable for singular
1671
+ integrands, provides a means of error estimation, and can improve their
1672
+ rate of convergence. The scrambling strategy which is implemented is a
1673
+ (left) linear matrix scramble (LMS) followed by a digital random shift
1674
+ (LMS+shift) [2]_.
1675
+
1676
+ There are many versions of Sobol' sequences depending on their
1677
+ 'direction numbers'. This code uses direction numbers from [4]_. Hence,
1678
+ the maximum number of dimension is 21201. The direction numbers have been
1679
+ precomputed with search criterion 6 and can be retrieved at
1680
+ https://web.maths.unsw.edu.au/~fkuo/sobol/.
1681
+
1682
+ .. warning::
1683
+
1684
+ Sobol' sequences are a quadrature rule and they lose their balance
1685
+ properties if one uses a sample size that is not a power of 2, or skips
1686
+ the first point, or thins the sequence [5]_.
1687
+
1688
+ If :math:`n=2^m` points are not enough then one should take :math:`2^M`
1689
+ points for :math:`M>m`. When scrambling, the number R of independent
1690
+ replicates does not have to be a power of 2.
1691
+
1692
+ Sobol' sequences are generated to some number :math:`B` of bits.
1693
+ After :math:`2^B` points have been generated, the sequence would
1694
+ repeat. Hence, an error is raised.
1695
+ The number of bits can be controlled with the parameter `bits`.
1696
+
1697
+ References
1698
+ ----------
1699
+ .. [1] I. M. Sobol', "The distribution of points in a cube and the accurate
1700
+ evaluation of integrals." Zh. Vychisl. Mat. i Mat. Phys., 7:784-802,
1701
+ 1967.
1702
+ .. [2] J. Matousek, "On the L2-discrepancy for anchored boxes."
1703
+ J. of Complexity 14, 527-556, 1998.
1704
+ .. [3] Art B. Owen, "Scrambling Sobol and Niederreiter-Xing points."
1705
+ Journal of Complexity, 14(4):466-489, December 1998.
1706
+ .. [4] S. Joe and F. Y. Kuo, "Constructing sobol sequences with better
1707
+ two-dimensional projections." SIAM Journal on Scientific Computing,
1708
+ 30(5):2635-2654, 2008.
1709
+ .. [5] Art B. Owen, "On dropping the first Sobol' point."
1710
+ :arxiv:`2008.08051`, 2020.
1711
+
1712
+ Examples
1713
+ --------
1714
+ Generate samples from a low discrepancy sequence of Sobol'.
1715
+
1716
+ >>> from scipy.stats import qmc
1717
+ >>> sampler = qmc.Sobol(d=2, scramble=False)
1718
+ >>> sample = sampler.random_base2(m=3)
1719
+ >>> sample
1720
+ array([[0. , 0. ],
1721
+ [0.5 , 0.5 ],
1722
+ [0.75 , 0.25 ],
1723
+ [0.25 , 0.75 ],
1724
+ [0.375, 0.375],
1725
+ [0.875, 0.875],
1726
+ [0.625, 0.125],
1727
+ [0.125, 0.625]])
1728
+
1729
+ Compute the quality of the sample using the discrepancy criterion.
1730
+
1731
+ >>> qmc.discrepancy(sample)
1732
+ 0.013882107204860938
1733
+
1734
+ To continue an existing design, extra points can be obtained
1735
+ by calling again `random_base2`. Alternatively, you can skip some
1736
+ points like:
1737
+
1738
+ >>> _ = sampler.reset()
1739
+ >>> _ = sampler.fast_forward(4)
1740
+ >>> sample_continued = sampler.random_base2(m=2)
1741
+ >>> sample_continued
1742
+ array([[0.375, 0.375],
1743
+ [0.875, 0.875],
1744
+ [0.625, 0.125],
1745
+ [0.125, 0.625]])
1746
+
1747
+ Finally, samples can be scaled to bounds.
1748
+
1749
+ >>> l_bounds = [0, 2]
1750
+ >>> u_bounds = [10, 5]
1751
+ >>> qmc.scale(sample_continued, l_bounds, u_bounds)
1752
+ array([[3.75 , 3.125],
1753
+ [8.75 , 4.625],
1754
+ [6.25 , 2.375],
1755
+ [1.25 , 3.875]])
1756
+
1757
+ """
1758
+
1759
+ MAXDIM: ClassVar[int] = _MAXDIM
1760
+
1761
+ @_transition_to_rng('seed', replace_doc=False)
1762
+ def __init__(
1763
+ self, d: IntNumber, *, scramble: bool = True,
1764
+ bits: IntNumber | None = None, rng: SeedType = None,
1765
+ optimization: Literal["random-cd", "lloyd"] | None = None
1766
+ ) -> None:
1767
+ # Used in `scipy.integrate.qmc_quad`
1768
+ self._init_quad = {'d': d, 'scramble': True, 'bits': bits,
1769
+ 'optimization': optimization}
1770
+
1771
+ super()._initialize(d=d, optimization=optimization, rng=rng)
1772
+ if d > self.MAXDIM:
1773
+ raise ValueError(
1774
+ f"Maximum supported dimensionality is {self.MAXDIM}."
1775
+ )
1776
+
1777
+ self.bits = bits
1778
+ self.dtype_i: type
1779
+ self.scramble = scramble
1780
+
1781
+ if self.bits is None:
1782
+ self.bits = 30
1783
+
1784
+ if self.bits <= 32:
1785
+ self.dtype_i = np.uint32
1786
+ elif 32 < self.bits <= 64:
1787
+ self.dtype_i = np.uint64
1788
+ else:
1789
+ raise ValueError("Maximum supported 'bits' is 64")
1790
+
1791
+ self.maxn = 2**self.bits
1792
+
1793
+ # v is d x maxbit matrix
1794
+ self._sv: np.ndarray = np.zeros((d, self.bits), dtype=self.dtype_i)
1795
+ _initialize_v(self._sv, dim=d, bits=self.bits)
1796
+
1797
+ if not scramble:
1798
+ self._shift: np.ndarray = np.zeros(d, dtype=self.dtype_i)
1799
+ else:
1800
+ # scramble self._shift and self._sv
1801
+ self._scramble()
1802
+
1803
+ self._quasi = self._shift.copy()
1804
+
1805
+ # normalization constant with the largest possible number
1806
+ # calculate in Python to not overflow int with 2**64
1807
+ self._scale = 1.0 / 2 ** self.bits
1808
+
1809
+ self._first_point = (self._quasi * self._scale).reshape(1, -1)
1810
+ # explicit casting to float64
1811
+ self._first_point = self._first_point.astype(np.float64)
1812
+
1813
+ def _scramble(self) -> None:
1814
+ """Scramble the sequence using LMS+shift."""
1815
+ # Generate shift vector
1816
+ self._shift = np.dot(
1817
+ rng_integers(self.rng, 2, size=(self.d, self.bits),
1818
+ dtype=self.dtype_i),
1819
+ 2 ** np.arange(self.bits, dtype=self.dtype_i),
1820
+ )
1821
+ # Generate lower triangular matrices (stacked across dimensions)
1822
+ ltm = np.tril(rng_integers(self.rng, 2,
1823
+ size=(self.d, self.bits, self.bits),
1824
+ dtype=self.dtype_i))
1825
+ _cscramble(
1826
+ dim=self.d, bits=self.bits, # type: ignore[arg-type]
1827
+ ltm=ltm, sv=self._sv
1828
+ )
1829
+
1830
+ def _random(
1831
+ self, n: IntNumber = 1, *, workers: IntNumber = 1
1832
+ ) -> np.ndarray:
1833
+ """Draw next point(s) in the Sobol' sequence.
1834
+
1835
+ Parameters
1836
+ ----------
1837
+ n : int, optional
1838
+ Number of samples to generate in the parameter space. Default is 1.
1839
+
1840
+ Returns
1841
+ -------
1842
+ sample : array_like (n, d)
1843
+ Sobol' sample.
1844
+
1845
+ """
1846
+ sample: np.ndarray = np.empty((n, self.d), dtype=np.float64)
1847
+
1848
+ if n == 0:
1849
+ return sample
1850
+
1851
+ total_n = self.num_generated + n
1852
+ if total_n > self.maxn:
1853
+ msg = (
1854
+ f"At most 2**{self.bits}={self.maxn} distinct points can be "
1855
+ f"generated. {self.num_generated} points have been previously "
1856
+ f"generated, then: n={self.num_generated}+{n}={total_n}. "
1857
+ )
1858
+ if self.bits != 64:
1859
+ msg += "Consider increasing `bits`."
1860
+ raise ValueError(msg)
1861
+
1862
+ if self.num_generated == 0:
1863
+ # verify n is 2**n
1864
+ if not (n & (n - 1) == 0):
1865
+ warnings.warn("The balance properties of Sobol' points require"
1866
+ " n to be a power of 2.", stacklevel=3)
1867
+
1868
+ if n == 1:
1869
+ sample = self._first_point
1870
+ else:
1871
+ _draw(
1872
+ n=n - 1, num_gen=self.num_generated, dim=self.d,
1873
+ scale=self._scale, sv=self._sv, quasi=self._quasi,
1874
+ sample=sample
1875
+ )
1876
+ sample = np.concatenate(
1877
+ [self._first_point, sample]
1878
+ )[:n]
1879
+ else:
1880
+ _draw(
1881
+ n=n, num_gen=self.num_generated - 1, dim=self.d,
1882
+ scale=self._scale, sv=self._sv, quasi=self._quasi,
1883
+ sample=sample
1884
+ )
1885
+
1886
+ return sample
1887
+
1888
+ def random_base2(self, m: IntNumber) -> np.ndarray:
1889
+ """Draw point(s) from the Sobol' sequence.
1890
+
1891
+ This function draws :math:`n=2^m` points in the parameter space
1892
+ ensuring the balance properties of the sequence.
1893
+
1894
+ Parameters
1895
+ ----------
1896
+ m : int
1897
+ Logarithm in base 2 of the number of samples; i.e., n = 2^m.
1898
+
1899
+ Returns
1900
+ -------
1901
+ sample : array_like (n, d)
1902
+ Sobol' sample.
1903
+
1904
+ """
1905
+ n = 2 ** m
1906
+
1907
+ total_n = self.num_generated + n
1908
+ if not (total_n & (total_n - 1) == 0):
1909
+ raise ValueError('The balance properties of Sobol\' points require '
1910
+ f'n to be a power of 2. {self.num_generated} points '
1911
+ 'have been previously generated, then: '
1912
+ f'n={self.num_generated}+2**{m}={total_n}. '
1913
+ 'If you still want to do this, the function '
1914
+ '\'Sobol.random()\' can be used.'
1915
+ )
1916
+
1917
+ return self.random(n)
1918
+
1919
+ def reset(self) -> "Sobol":
1920
+ """Reset the engine to base state.
1921
+
1922
+ Returns
1923
+ -------
1924
+ engine : Sobol
1925
+ Engine reset to its base state.
1926
+
1927
+ """
1928
+ super().reset()
1929
+ self._quasi = self._shift.copy()
1930
+ return self
1931
+
1932
+ def fast_forward(self, n: IntNumber) -> "Sobol":
1933
+ """Fast-forward the sequence by `n` positions.
1934
+
1935
+ Parameters
1936
+ ----------
1937
+ n : int
1938
+ Number of points to skip in the sequence.
1939
+
1940
+ Returns
1941
+ -------
1942
+ engine : Sobol
1943
+ The fast-forwarded engine.
1944
+
1945
+ """
1946
+ if self.num_generated == 0:
1947
+ _fast_forward(
1948
+ n=n - 1, num_gen=self.num_generated, dim=self.d,
1949
+ sv=self._sv, quasi=self._quasi
1950
+ )
1951
+ else:
1952
+ _fast_forward(
1953
+ n=n, num_gen=self.num_generated - 1, dim=self.d,
1954
+ sv=self._sv, quasi=self._quasi
1955
+ )
1956
+ self.num_generated += n
1957
+ return self
1958
+
1959
+
1960
+ class PoissonDisk(QMCEngine):
1961
+ """Poisson disk sampling.
1962
+
1963
+ Parameters
1964
+ ----------
1965
+ d : int
1966
+ Dimension of the parameter space.
1967
+ radius : float
1968
+ Minimal distance to keep between points when sampling new candidates.
1969
+ hypersphere : {"volume", "surface"}, optional
1970
+ Sampling strategy to generate potential candidates to be added in the
1971
+ final sample. Default is "volume".
1972
+
1973
+ * ``volume``: original Bridson algorithm as described in [1]_.
1974
+ New candidates are sampled *within* the hypersphere.
1975
+ * ``surface``: only sample the surface of the hypersphere.
1976
+ ncandidates : int
1977
+ Number of candidates to sample per iteration. More candidates result
1978
+ in a denser sampling as more candidates can be accepted per iteration.
1979
+ optimization : {None, "random-cd", "lloyd"}, optional
1980
+ Whether to use an optimization scheme to improve the quality after
1981
+ sampling. Note that this is a post-processing step that does not
1982
+ guarantee that all properties of the sample will be conserved.
1983
+ Default is None.
1984
+
1985
+ * ``random-cd``: random permutations of coordinates to lower the
1986
+ centered discrepancy. The best sample based on the centered
1987
+ discrepancy is constantly updated. Centered discrepancy-based
1988
+ sampling shows better space-filling robustness toward 2D and 3D
1989
+ subprojections compared to using other discrepancy measures.
1990
+ * ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
1991
+ The process converges to equally spaced samples.
1992
+
1993
+ .. versionadded:: 1.10.0
1994
+
1995
+ rng : `numpy.random.Generator`, optional
1996
+ Pseudorandom number generator state. When `rng` is None, a new
1997
+ `numpy.random.Generator` is created using entropy from the
1998
+ operating system. Types other than `numpy.random.Generator` are
1999
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
2000
+
2001
+ .. versionchanged:: 1.15.0
2002
+
2003
+ As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
2004
+ transition from use of `numpy.random.RandomState` to
2005
+ `numpy.random.Generator`, this keyword was changed from `seed` to
2006
+ `rng`. For an interim period, both keywords will continue to work, although
2007
+ only one may be specified at a time. After the interim period, function
2008
+ calls using the `seed` keyword will emit warnings. Following a
2009
+ deprecation period, the `seed` keyword will be removed.
2010
+
2011
+ l_bounds, u_bounds : array_like (d,)
2012
+ Lower and upper bounds of target sample data.
2013
+
2014
+ Notes
2015
+ -----
2016
+ Poisson disk sampling is an iterative sampling strategy. Starting from
2017
+ a seed sample, `ncandidates` are sampled in the hypersphere
2018
+ surrounding the seed. Candidates below a certain `radius` or outside the
2019
+ domain are rejected. New samples are added in a pool of sample seed. The
2020
+ process stops when the pool is empty or when the number of required
2021
+ samples is reached.
2022
+
2023
+ The maximum number of point that a sample can contain is directly linked
2024
+ to the `radius`. As the dimension of the space increases, a higher radius
2025
+ spreads the points further and help overcome the curse of dimensionality.
2026
+ See the :ref:`quasi monte carlo tutorial <quasi-monte-carlo>` for more
2027
+ details.
2028
+
2029
+ .. warning::
2030
+
2031
+ The algorithm is more suitable for low dimensions and sampling size
2032
+ due to its iterative nature and memory requirements.
2033
+ Selecting a small radius with a high dimension would
2034
+ mean that the space could contain more samples than using lower
2035
+ dimension or a bigger radius.
2036
+
2037
+ Some code taken from [2]_, written consent given on 31.03.2021
2038
+ by the original author, Shamis, for free use in SciPy under
2039
+ the 3-clause BSD.
2040
+
2041
+ References
2042
+ ----------
2043
+ .. [1] Robert Bridson, "Fast Poisson Disk Sampling in Arbitrary
2044
+ Dimensions." SIGGRAPH, 2007.
2045
+ .. [2] `StackOverflow <https://stackoverflow.com/questions/66047540>`__.
2046
+
2047
+ Examples
2048
+ --------
2049
+ Generate a 2D sample using a `radius` of 0.2.
2050
+
2051
+ >>> import numpy as np
2052
+ >>> import matplotlib.pyplot as plt
2053
+ >>> from matplotlib.collections import PatchCollection
2054
+ >>> from scipy.stats import qmc
2055
+ >>>
2056
+ >>> rng = np.random.default_rng()
2057
+ >>> radius = 0.2
2058
+ >>> engine = qmc.PoissonDisk(d=2, radius=radius, rng=rng)
2059
+ >>> sample = engine.random(20)
2060
+
2061
+ Visualizing the 2D sample and showing that no points are closer than
2062
+ `radius`. ``radius/2`` is used to visualize non-intersecting circles.
2063
+ If two samples are exactly at `radius` from each other, then their circle
2064
+ of radius ``radius/2`` will touch.
2065
+
2066
+ >>> fig, ax = plt.subplots()
2067
+ >>> _ = ax.scatter(sample[:, 0], sample[:, 1])
2068
+ >>> circles = [plt.Circle((xi, yi), radius=radius/2, fill=False)
2069
+ ... for xi, yi in sample]
2070
+ >>> collection = PatchCollection(circles, match_original=True)
2071
+ >>> ax.add_collection(collection)
2072
+ >>> _ = ax.set(aspect='equal', xlabel=r'$x_1$', ylabel=r'$x_2$',
2073
+ ... xlim=[0, 1], ylim=[0, 1])
2074
+ >>> plt.show()
2075
+
2076
+ Such visualization can be seen as circle packing: how many circle can
2077
+ we put in the space. It is a np-hard problem. The method `fill_space`
2078
+ can be used to add samples until no more samples can be added. This is
2079
+ a hard problem and parameters may need to be adjusted manually. Beware of
2080
+ the dimension: as the dimensionality increases, the number of samples
2081
+ required to fill the space increases exponentially
2082
+ (curse-of-dimensionality).
2083
+
2084
+ """
2085
+
2086
+ @_transition_to_rng('seed', replace_doc=False)
2087
+ def __init__(
2088
+ self,
2089
+ d: IntNumber,
2090
+ *,
2091
+ radius: DecimalNumber = 0.05,
2092
+ hypersphere: Literal["volume", "surface"] = "volume",
2093
+ ncandidates: IntNumber = 30,
2094
+ optimization: Literal["random-cd", "lloyd"] | None = None,
2095
+ rng: SeedType = None,
2096
+ l_bounds: "npt.ArrayLike | None" = None,
2097
+ u_bounds: "npt.ArrayLike | None" = None,
2098
+ ) -> None:
2099
+ # Used in `scipy.integrate.qmc_quad`
2100
+ self._init_quad = {'d': d, 'radius': radius,
2101
+ 'hypersphere': hypersphere,
2102
+ 'ncandidates': ncandidates,
2103
+ 'optimization': optimization}
2104
+ super()._initialize(d=d, optimization=optimization, rng=rng)
2105
+
2106
+ hypersphere_sample = {
2107
+ "volume": self._hypersphere_volume_sample,
2108
+ "surface": self._hypersphere_surface_sample
2109
+ }
2110
+
2111
+ try:
2112
+ self.hypersphere_method = hypersphere_sample[hypersphere]
2113
+ except KeyError as exc:
2114
+ message = (
2115
+ f"{hypersphere!r} is not a valid hypersphere sampling"
2116
+ f" method. It must be one of {set(hypersphere_sample)!r}")
2117
+ raise ValueError(message) from exc
2118
+
2119
+ # size of the sphere from which the samples are drawn relative to the
2120
+ # size of a disk (radius)
2121
+ # for the surface sampler, all new points are almost exactly 1 radius
2122
+ # away from at least one existing sample +eps to avoid rejection
2123
+ self.radius_factor = 2 if hypersphere == "volume" else 1.001
2124
+ self.radius = radius
2125
+ self.radius_squared = self.radius**2
2126
+
2127
+ # sample to generate per iteration in the hypersphere around center
2128
+ self.ncandidates = ncandidates
2129
+
2130
+ if u_bounds is None:
2131
+ u_bounds = np.ones(d)
2132
+ if l_bounds is None:
2133
+ l_bounds = np.zeros(d)
2134
+ self.l_bounds, self.u_bounds = _validate_bounds(
2135
+ l_bounds=l_bounds, u_bounds=u_bounds, d=int(d)
2136
+ )
2137
+
2138
+ with np.errstate(divide='ignore'):
2139
+ self.cell_size = self.radius / np.sqrt(self.d)
2140
+ self.grid_size = (
2141
+ np.ceil((self.u_bounds - self.l_bounds) / self.cell_size)
2142
+ ).astype(int)
2143
+
2144
+ self._initialize_grid_pool()
2145
+
2146
+ def _initialize_grid_pool(self):
2147
+ """Sampling pool and sample grid."""
2148
+ self.sample_pool = []
2149
+ # Positions of cells
2150
+ # n-dim value for each grid cell
2151
+ self.sample_grid = np.empty(
2152
+ np.append(self.grid_size, self.d),
2153
+ dtype=np.float32
2154
+ )
2155
+ # Initialise empty cells with NaNs
2156
+ self.sample_grid.fill(np.nan)
2157
+
2158
+ def _random(
2159
+ self, n: IntNumber = 1, *, workers: IntNumber = 1
2160
+ ) -> np.ndarray:
2161
+ """Draw `n` in the interval ``[l_bounds, u_bounds]``.
2162
+
2163
+ Note that it can return fewer samples if the space is full.
2164
+ See the note section of the class.
2165
+
2166
+ Parameters
2167
+ ----------
2168
+ n : int, optional
2169
+ Number of samples to generate in the parameter space. Default is 1.
2170
+
2171
+ Returns
2172
+ -------
2173
+ sample : array_like (n, d)
2174
+ QMC sample.
2175
+
2176
+ """
2177
+ if n == 0 or self.d == 0:
2178
+ return np.empty((n, self.d))
2179
+
2180
+ def in_limits(sample: np.ndarray) -> bool:
2181
+ for i in range(self.d):
2182
+ if (sample[i] > self.u_bounds[i] or sample[i] < self.l_bounds[i]):
2183
+ return False
2184
+ return True
2185
+
2186
+ def in_neighborhood(candidate: np.ndarray, n: int = 2) -> bool:
2187
+ """
2188
+ Check if there are samples closer than ``radius_squared`` to the
2189
+ `candidate` sample.
2190
+ """
2191
+ indices = ((candidate - self.l_bounds) / self.cell_size).astype(int)
2192
+ ind_min = np.maximum(indices - n, self.l_bounds.astype(int))
2193
+ ind_max = np.minimum(indices + n + 1, self.grid_size)
2194
+
2195
+ # Check if the center cell is empty
2196
+ if not np.isnan(self.sample_grid[tuple(indices)][0]):
2197
+ return True
2198
+
2199
+ a = [slice(ind_min[i], ind_max[i]) for i in range(self.d)]
2200
+
2201
+ # guards against: invalid value encountered in less as we are
2202
+ # comparing with nan and returns False. Which is wanted.
2203
+ with np.errstate(invalid='ignore'):
2204
+ if np.any(
2205
+ np.sum(
2206
+ np.square(candidate - self.sample_grid[tuple(a)]),
2207
+ axis=self.d
2208
+ ) < self.radius_squared
2209
+ ):
2210
+ return True
2211
+
2212
+ return False
2213
+
2214
+ def add_sample(candidate: np.ndarray) -> None:
2215
+ self.sample_pool.append(candidate)
2216
+ indices = ((candidate - self.l_bounds) / self.cell_size).astype(int)
2217
+ self.sample_grid[tuple(indices)] = candidate
2218
+ curr_sample.append(candidate)
2219
+
2220
+ curr_sample: list[np.ndarray] = []
2221
+
2222
+ if len(self.sample_pool) == 0:
2223
+ # the pool is being initialized with a single random sample
2224
+ add_sample(self.rng.uniform(self.l_bounds, self.u_bounds))
2225
+ num_drawn = 1
2226
+ else:
2227
+ num_drawn = 0
2228
+
2229
+ # exhaust sample pool to have up to n sample
2230
+ while len(self.sample_pool) and num_drawn < n:
2231
+ # select a sample from the available pool
2232
+ idx_center = rng_integers(self.rng, len(self.sample_pool))
2233
+ center = self.sample_pool[idx_center]
2234
+ del self.sample_pool[idx_center]
2235
+
2236
+ # generate candidates around the center sample
2237
+ candidates = self.hypersphere_method(
2238
+ center, self.radius * self.radius_factor, self.ncandidates
2239
+ )
2240
+
2241
+ # keep candidates that satisfy some conditions
2242
+ for candidate in candidates:
2243
+ if in_limits(candidate) and not in_neighborhood(candidate):
2244
+ add_sample(candidate)
2245
+
2246
+ num_drawn += 1
2247
+ if num_drawn >= n:
2248
+ break
2249
+
2250
+ self.num_generated += num_drawn
2251
+ return np.array(curr_sample)
2252
+
2253
+ def fill_space(self) -> np.ndarray:
2254
+ """Draw ``n`` samples in the interval ``[l_bounds, u_bounds]``.
2255
+
2256
+ Unlike `random`, this method will try to add points until
2257
+ the space is full. Depending on ``candidates`` (and to a lesser extent
2258
+ other parameters), some empty areas can still be present in the sample.
2259
+
2260
+ .. warning::
2261
+
2262
+ This can be extremely slow in high dimensions or if the
2263
+ ``radius`` is very small-with respect to the dimensionality.
2264
+
2265
+ Returns
2266
+ -------
2267
+ sample : array_like (n, d)
2268
+ QMC sample.
2269
+
2270
+ """
2271
+ return self.random(np.inf) # type: ignore[arg-type]
2272
+
2273
+ def reset(self) -> "PoissonDisk":
2274
+ """Reset the engine to base state.
2275
+
2276
+ Returns
2277
+ -------
2278
+ engine : PoissonDisk
2279
+ Engine reset to its base state.
2280
+
2281
+ """
2282
+ super().reset()
2283
+ self._initialize_grid_pool()
2284
+ return self
2285
+
2286
+ def _hypersphere_volume_sample(
2287
+ self, center: np.ndarray, radius: DecimalNumber,
2288
+ candidates: IntNumber = 1
2289
+ ) -> np.ndarray:
2290
+ """Uniform sampling within hypersphere."""
2291
+ # should remove samples within r/2
2292
+ x = self.rng.standard_normal(size=(candidates, self.d))
2293
+ ssq = np.sum(x**2, axis=1)
2294
+ fr = radius * gammainc(self.d/2, ssq/2)**(1/self.d) / np.sqrt(ssq)
2295
+ fr_tiled = np.tile(
2296
+ fr.reshape(-1, 1), (1, self.d) # type: ignore[arg-type]
2297
+ )
2298
+ p = center + np.multiply(x, fr_tiled)
2299
+ return p
2300
+
2301
+ def _hypersphere_surface_sample(
2302
+ self, center: np.ndarray, radius: DecimalNumber,
2303
+ candidates: IntNumber = 1
2304
+ ) -> np.ndarray:
2305
+ """Uniform sampling on the hypersphere's surface."""
2306
+ vec = self.rng.standard_normal(size=(candidates, self.d))
2307
+ vec /= np.linalg.norm(vec, axis=1)[:, None]
2308
+ p = center + np.multiply(vec, radius)
2309
+ return p
2310
+
2311
+
2312
+ class MultivariateNormalQMC:
2313
+ r"""QMC sampling from a multivariate Normal :math:`N(\mu, \Sigma)`.
2314
+
2315
+ Parameters
2316
+ ----------
2317
+ mean : array_like (d,)
2318
+ The mean vector. Where ``d`` is the dimension.
2319
+ cov : array_like (d, d), optional
2320
+ The covariance matrix. If omitted, use `cov_root` instead.
2321
+ If both `cov` and `cov_root` are omitted, use the identity matrix.
2322
+ cov_root : array_like (d, d'), optional
2323
+ A root decomposition of the covariance matrix, where ``d'`` may be less
2324
+ than ``d`` if the covariance is not full rank. If omitted, use `cov`.
2325
+ inv_transform : bool, optional
2326
+ If True, use inverse transform instead of Box-Muller. Default is True.
2327
+ engine : QMCEngine, optional
2328
+ Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
2329
+ rng : `numpy.random.Generator`, optional
2330
+ Pseudorandom number generator state. When `rng` is None, a new
2331
+ `numpy.random.Generator` is created using entropy from the
2332
+ operating system. Types other than `numpy.random.Generator` are
2333
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
2334
+
2335
+ .. versionchanged:: 1.15.0
2336
+
2337
+ As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
2338
+ transition from use of `numpy.random.RandomState` to
2339
+ `numpy.random.Generator`, this keyword was changed from `seed` to
2340
+ `rng`. For an interim period, both keywords will continue to work, although
2341
+ only one may be specified at a time. After the interim period, function
2342
+ calls using the `seed` keyword will emit warnings. Following a
2343
+ deprecation period, the `seed` keyword will be removed.
2344
+
2345
+ Examples
2346
+ --------
2347
+ >>> import matplotlib.pyplot as plt
2348
+ >>> from scipy.stats import qmc
2349
+ >>> dist = qmc.MultivariateNormalQMC(mean=[0, 5], cov=[[1, 0], [0, 1]])
2350
+ >>> sample = dist.random(512)
2351
+ >>> _ = plt.scatter(sample[:, 0], sample[:, 1])
2352
+ >>> plt.show()
2353
+
2354
+ """
2355
+
2356
+ @_transition_to_rng('seed', replace_doc=False)
2357
+ def __init__(
2358
+ self,
2359
+ mean: "npt.ArrayLike",
2360
+ cov: "npt.ArrayLike | None" = None,
2361
+ *,
2362
+ cov_root: "npt.ArrayLike | None" = None,
2363
+ inv_transform: bool = True,
2364
+ engine: QMCEngine | None = None,
2365
+ rng: SeedType = None,
2366
+ ) -> None:
2367
+ mean = np.asarray(np.atleast_1d(mean))
2368
+ d = mean.shape[0]
2369
+ if cov is not None:
2370
+ # covariance matrix provided
2371
+ cov = np.asarray(np.atleast_2d(cov))
2372
+ # check for square/symmetric cov matrix and mean vector has the
2373
+ # same d
2374
+ if not mean.shape[0] == cov.shape[0]:
2375
+ raise ValueError("Dimension mismatch between mean and "
2376
+ "covariance.")
2377
+ if not np.allclose(cov, cov.transpose()):
2378
+ raise ValueError("Covariance matrix is not symmetric.")
2379
+ # compute Cholesky decomp; if it fails, do the eigen decomposition
2380
+ try:
2381
+ cov_root = np.linalg.cholesky(cov).transpose()
2382
+ except np.linalg.LinAlgError:
2383
+ eigval, eigvec = np.linalg.eigh(cov)
2384
+ if not np.all(eigval >= -1.0e-8):
2385
+ raise ValueError("Covariance matrix not PSD.")
2386
+ eigval = np.clip(eigval, 0.0, None)
2387
+ cov_root = (eigvec * np.sqrt(eigval)).transpose()
2388
+ elif cov_root is not None:
2389
+ # root decomposition provided
2390
+ cov_root = np.atleast_2d(cov_root)
2391
+ if not mean.shape[0] == cov_root.shape[0]:
2392
+ raise ValueError("Dimension mismatch between mean and "
2393
+ "covariance.")
2394
+ else:
2395
+ # corresponds to identity covariance matrix
2396
+ cov_root = None
2397
+
2398
+ self._inv_transform = inv_transform
2399
+
2400
+ if not inv_transform:
2401
+ # to apply Box-Muller, we need an even number of dimensions
2402
+ engine_dim = 2 * math.ceil(d / 2)
2403
+ else:
2404
+ engine_dim = d
2405
+ if engine is None:
2406
+ # Need this during SPEC 7 transition to prevent `RandomState`
2407
+ # from being passed via `rng`.
2408
+ kwarg = "seed" if isinstance(rng, np.random.RandomState) else "rng"
2409
+ kwargs = {kwarg: rng}
2410
+ self.engine = Sobol(
2411
+ d=engine_dim, scramble=True, bits=30, **kwargs
2412
+ ) # type: QMCEngine
2413
+ elif isinstance(engine, QMCEngine):
2414
+ if engine.d != engine_dim:
2415
+ raise ValueError("Dimension of `engine` must be consistent"
2416
+ " with dimensions of mean and covariance."
2417
+ " If `inv_transform` is False, it must be"
2418
+ " an even number.")
2419
+ self.engine = engine
2420
+ else:
2421
+ raise ValueError("`engine` must be an instance of "
2422
+ "`scipy.stats.qmc.QMCEngine` or `None`.")
2423
+
2424
+ self._mean = mean
2425
+ self._corr_matrix = cov_root
2426
+
2427
+ self._d = d
2428
+
2429
+ def random(self, n: IntNumber = 1) -> np.ndarray:
2430
+ """Draw `n` QMC samples from the multivariate Normal.
2431
+
2432
+ Parameters
2433
+ ----------
2434
+ n : int, optional
2435
+ Number of samples to generate in the parameter space. Default is 1.
2436
+
2437
+ Returns
2438
+ -------
2439
+ sample : array_like (n, d)
2440
+ Sample.
2441
+
2442
+ """
2443
+ base_samples = self._standard_normal_samples(n)
2444
+ return self._correlate(base_samples)
2445
+
2446
+ def _correlate(self, base_samples: np.ndarray) -> np.ndarray:
2447
+ if self._corr_matrix is not None:
2448
+ return base_samples @ self._corr_matrix + self._mean
2449
+ else:
2450
+ # avoid multiplying with identity here
2451
+ return base_samples + self._mean
2452
+
2453
+ def _standard_normal_samples(self, n: IntNumber = 1) -> np.ndarray:
2454
+ """Draw `n` QMC samples from the standard Normal :math:`N(0, I_d)`.
2455
+
2456
+ Parameters
2457
+ ----------
2458
+ n : int, optional
2459
+ Number of samples to generate in the parameter space. Default is 1.
2460
+
2461
+ Returns
2462
+ -------
2463
+ sample : array_like (n, d)
2464
+ Sample.
2465
+
2466
+ """
2467
+ # get base samples
2468
+ samples = self.engine.random(n)
2469
+ if self._inv_transform:
2470
+ # apply inverse transform
2471
+ # (values to close to 0/1 result in inf values)
2472
+ return stats.norm.ppf(0.5 + (1 - 1e-10) * (samples - 0.5)) # type: ignore[attr-defined] # noqa: E501
2473
+ else:
2474
+ # apply Box-Muller transform (note: indexes starting from 1)
2475
+ even = np.arange(0, samples.shape[-1], 2)
2476
+ Rs = np.sqrt(-2 * np.log(samples[:, even]))
2477
+ thetas = 2 * math.pi * samples[:, 1 + even]
2478
+ cos = np.cos(thetas)
2479
+ sin = np.sin(thetas)
2480
+ transf_samples = np.stack([Rs * cos, Rs * sin],
2481
+ -1).reshape(n, -1)
2482
+ # make sure we only return the number of dimension requested
2483
+ return transf_samples[:, : self._d]
2484
+
2485
+
2486
+ class MultinomialQMC:
2487
+ r"""QMC sampling from a multinomial distribution.
2488
+
2489
+ Parameters
2490
+ ----------
2491
+ pvals : array_like (k,)
2492
+ Vector of probabilities of size ``k``, where ``k`` is the number
2493
+ of categories. Elements must be non-negative and sum to 1.
2494
+ n_trials : int
2495
+ Number of trials.
2496
+ engine : QMCEngine, optional
2497
+ Quasi-Monte Carlo engine sampler. If None, `Sobol` is used.
2498
+ rng : `numpy.random.Generator`, optional
2499
+ Pseudorandom number generator state. When `rng` is None, a new
2500
+ `numpy.random.Generator` is created using entropy from the
2501
+ operating system. Types other than `numpy.random.Generator` are
2502
+ passed to `numpy.random.default_rng` to instantiate a ``Generator``.
2503
+
2504
+ .. versionchanged:: 1.15.0
2505
+
2506
+ As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
2507
+ transition from use of `numpy.random.RandomState` to
2508
+ `numpy.random.Generator`, this keyword was changed from `seed` to
2509
+ `rng`. For an interim period, both keywords will continue to work, although
2510
+ only one may be specified at a time. After the interim period, function
2511
+ calls using the `seed` keyword will emit warnings. Following a
2512
+ deprecation period, the `seed` keyword will be removed.
2513
+
2514
+ Examples
2515
+ --------
2516
+ Let's define 3 categories and for a given sample, the sum of the trials
2517
+ of each category is 8. The number of trials per category is determined
2518
+ by the `pvals` associated to each category.
2519
+ Then, we sample this distribution 64 times.
2520
+
2521
+ >>> import matplotlib.pyplot as plt
2522
+ >>> from scipy.stats import qmc
2523
+ >>> dist = qmc.MultinomialQMC(
2524
+ ... pvals=[0.2, 0.4, 0.4], n_trials=10, engine=qmc.Halton(d=1)
2525
+ ... )
2526
+ >>> sample = dist.random(64)
2527
+
2528
+ We can plot the sample and verify that the median of number of trials
2529
+ for each category is following the `pvals`. That would be
2530
+ ``pvals * n_trials = [2, 4, 4]``.
2531
+
2532
+ >>> fig, ax = plt.subplots()
2533
+ >>> ax.yaxis.get_major_locator().set_params(integer=True)
2534
+ >>> _ = ax.boxplot(sample)
2535
+ >>> ax.set(xlabel="Categories", ylabel="Trials")
2536
+ >>> plt.show()
2537
+
2538
+ """
2539
+
2540
+ @_transition_to_rng('seed', replace_doc=False)
2541
+ def __init__(
2542
+ self,
2543
+ pvals: "npt.ArrayLike",
2544
+ n_trials: IntNumber,
2545
+ *,
2546
+ engine: QMCEngine | None = None,
2547
+ rng: SeedType = None,
2548
+ ) -> None:
2549
+ self.pvals = np.atleast_1d(np.asarray(pvals))
2550
+ if np.min(pvals) < 0:
2551
+ raise ValueError('Elements of pvals must be non-negative.')
2552
+ if not np.isclose(np.sum(pvals), 1):
2553
+ raise ValueError('Elements of pvals must sum to 1.')
2554
+ self.n_trials = n_trials
2555
+ if engine is None:
2556
+ # Need this during SPEC 7 transition to prevent `RandomState`
2557
+ # from being passed via `rng`.
2558
+ kwarg = "seed" if isinstance(rng, np.random.RandomState) else "rng"
2559
+ kwargs = {kwarg: rng}
2560
+ self.engine = Sobol(
2561
+ d=1, scramble=True, bits=30, **kwargs
2562
+ ) # type: QMCEngine
2563
+ elif isinstance(engine, QMCEngine):
2564
+ if engine.d != 1:
2565
+ raise ValueError("Dimension of `engine` must be 1.")
2566
+ self.engine = engine
2567
+ else:
2568
+ raise ValueError("`engine` must be an instance of "
2569
+ "`scipy.stats.qmc.QMCEngine` or `None`.")
2570
+
2571
+ def random(self, n: IntNumber = 1) -> np.ndarray:
2572
+ """Draw `n` QMC samples from the multinomial distribution.
2573
+
2574
+ Parameters
2575
+ ----------
2576
+ n : int, optional
2577
+ Number of samples to generate in the parameter space. Default is 1.
2578
+
2579
+ Returns
2580
+ -------
2581
+ samples : array_like (n, pvals)
2582
+ Sample.
2583
+
2584
+ """
2585
+ sample = np.empty((n, len(self.pvals)))
2586
+ for i in range(n):
2587
+ base_draws = self.engine.random(self.n_trials).ravel()
2588
+ p_cumulative = np.empty_like(self.pvals, dtype=float)
2589
+ _fill_p_cumulative(np.array(self.pvals, dtype=float), p_cumulative)
2590
+ sample_ = np.zeros_like(self.pvals, dtype=np.intp)
2591
+ _categorize(base_draws, p_cumulative, sample_)
2592
+ sample[i] = sample_
2593
+ return sample
2594
+
2595
+
2596
+ def _select_optimizer(
2597
+ optimization: Literal["random-cd", "lloyd"] | None, config: dict
2598
+ ) -> Callable | None:
2599
+ """A factory for optimization methods."""
2600
+ optimization_method: dict[str, Callable] = {
2601
+ "random-cd": _random_cd,
2602
+ "lloyd": _lloyd_centroidal_voronoi_tessellation
2603
+ }
2604
+
2605
+ optimizer: partial | None
2606
+ if optimization is not None:
2607
+ try:
2608
+ optimization = optimization.lower() # type: ignore[assignment]
2609
+ optimizer_ = optimization_method[optimization]
2610
+ except KeyError as exc:
2611
+ message = (f"{optimization!r} is not a valid optimization"
2612
+ f" method. It must be one of"
2613
+ f" {set(optimization_method)!r}")
2614
+ raise ValueError(message) from exc
2615
+
2616
+ # config
2617
+ optimizer = partial(optimizer_, **config)
2618
+ else:
2619
+ optimizer = None
2620
+
2621
+ return optimizer
2622
+
2623
+
2624
+ def _random_cd(
2625
+ best_sample: np.ndarray, n_iters: int, n_nochange: int, rng: GeneratorType,
2626
+ **kwargs: dict
2627
+ ) -> np.ndarray:
2628
+ """Optimal LHS on CD.
2629
+
2630
+ Create a base LHS and do random permutations of coordinates to
2631
+ lower the centered discrepancy.
2632
+ Because it starts with a normal LHS, it also works with the
2633
+ `scramble` keyword argument.
2634
+
2635
+ Two stopping criterion are used to stop the algorithm: at most,
2636
+ `n_iters` iterations are performed; or if there is no improvement
2637
+ for `n_nochange` consecutive iterations.
2638
+ """
2639
+ del kwargs # only use keywords which are defined, needed by factory
2640
+
2641
+ n, d = best_sample.shape
2642
+
2643
+ if d == 0 or n == 0:
2644
+ return np.empty((n, d))
2645
+
2646
+ if d == 1 or n == 1:
2647
+ # discrepancy measures are invariant under permuting factors and runs
2648
+ return best_sample
2649
+
2650
+ best_disc = discrepancy(best_sample)
2651
+
2652
+ bounds = ([0, d - 1],
2653
+ [0, n - 1],
2654
+ [0, n - 1])
2655
+
2656
+ n_nochange_ = 0
2657
+ n_iters_ = 0
2658
+ while n_nochange_ < n_nochange and n_iters_ < n_iters:
2659
+ n_iters_ += 1
2660
+
2661
+ col = rng_integers(rng, *bounds[0], endpoint=True) # type: ignore[misc]
2662
+ row_1 = rng_integers(rng, *bounds[1], endpoint=True) # type: ignore[misc]
2663
+ row_2 = rng_integers(rng, *bounds[2], endpoint=True) # type: ignore[misc]
2664
+ disc = _perturb_discrepancy(best_sample,
2665
+ row_1, row_2, col,
2666
+ best_disc)
2667
+ if disc < best_disc:
2668
+ best_sample[row_1, col], best_sample[row_2, col] = (
2669
+ best_sample[row_2, col], best_sample[row_1, col])
2670
+
2671
+ best_disc = disc
2672
+ n_nochange_ = 0
2673
+ else:
2674
+ n_nochange_ += 1
2675
+
2676
+ return best_sample
2677
+
2678
+
2679
+ def _l1_norm(sample: np.ndarray) -> float:
2680
+ return distance.pdist(sample, 'cityblock').min()
2681
+
2682
+
2683
+ def _lloyd_iteration(
2684
+ sample: np.ndarray,
2685
+ decay: float,
2686
+ qhull_options: str
2687
+ ) -> np.ndarray:
2688
+ """Lloyd-Max algorithm iteration.
2689
+
2690
+ Based on the implementation of Stéfan van der Walt:
2691
+
2692
+ https://github.com/stefanv/lloyd
2693
+
2694
+ which is:
2695
+
2696
+ Copyright (c) 2021-04-21 Stéfan van der Walt
2697
+ https://github.com/stefanv/lloyd
2698
+ MIT License
2699
+
2700
+ Parameters
2701
+ ----------
2702
+ sample : array_like (n, d)
2703
+ The sample to iterate on.
2704
+ decay : float
2705
+ Relaxation decay. A positive value would move the samples toward
2706
+ their centroid, and negative value would move them away.
2707
+ 1 would move the samples to their centroid.
2708
+ qhull_options : str
2709
+ Additional options to pass to Qhull. See Qhull manual
2710
+ for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and
2711
+ "Qbb Qc Qz Qj" otherwise.)
2712
+
2713
+ Returns
2714
+ -------
2715
+ sample : array_like (n, d)
2716
+ The sample after an iteration of Lloyd's algorithm.
2717
+
2718
+ """
2719
+ new_sample = np.empty_like(sample)
2720
+
2721
+ voronoi = Voronoi(sample, qhull_options=qhull_options)
2722
+
2723
+ for ii, idx in enumerate(voronoi.point_region):
2724
+ # the region is a series of indices into self.voronoi.vertices
2725
+ # remove samples at infinity, designated by index -1
2726
+ region = [i for i in voronoi.regions[idx] if i != -1]
2727
+
2728
+ # get the vertices for this region
2729
+ verts = voronoi.vertices[region]
2730
+
2731
+ # clipping would be wrong, we need to intersect
2732
+ # verts = np.clip(verts, 0, 1)
2733
+
2734
+ # move samples towards centroids:
2735
+ # Centroid in n-D is the mean for uniformly distributed nodes
2736
+ # of a geometry.
2737
+ centroid = np.mean(verts, axis=0)
2738
+ new_sample[ii] = sample[ii] + (centroid - sample[ii]) * decay
2739
+
2740
+ # only update sample to centroid within the region
2741
+ is_valid = np.all(np.logical_and(new_sample >= 0, new_sample <= 1), axis=1)
2742
+ sample[is_valid] = new_sample[is_valid]
2743
+
2744
+ return sample
2745
+
2746
+
2747
+ def _lloyd_centroidal_voronoi_tessellation(
2748
+ sample: "npt.ArrayLike",
2749
+ *,
2750
+ tol: DecimalNumber = 1e-5,
2751
+ maxiter: IntNumber = 10,
2752
+ qhull_options: str | None = None,
2753
+ **kwargs: dict
2754
+ ) -> np.ndarray:
2755
+ """Approximate Centroidal Voronoi Tessellation.
2756
+
2757
+ Perturb samples in N-dimensions using Lloyd-Max algorithm.
2758
+
2759
+ Parameters
2760
+ ----------
2761
+ sample : array_like (n, d)
2762
+ The sample to iterate on. With ``n`` the number of samples and ``d``
2763
+ the dimension. Samples must be in :math:`[0, 1]^d`, with ``d>=2``.
2764
+ tol : float, optional
2765
+ Tolerance for termination. If the min of the L1-norm over the samples
2766
+ changes less than `tol`, it stops the algorithm. Default is 1e-5.
2767
+ maxiter : int, optional
2768
+ Maximum number of iterations. It will stop the algorithm even if
2769
+ `tol` is above the threshold.
2770
+ Too many iterations tend to cluster the samples as a hypersphere.
2771
+ Default is 10.
2772
+ qhull_options : str, optional
2773
+ Additional options to pass to Qhull. See Qhull manual
2774
+ for details. (Default: "Qbb Qc Qz Qj Qx" for ndim > 4 and
2775
+ "Qbb Qc Qz Qj" otherwise.)
2776
+
2777
+ Returns
2778
+ -------
2779
+ sample : array_like (n, d)
2780
+ The sample after being processed by Lloyd-Max algorithm.
2781
+
2782
+ Notes
2783
+ -----
2784
+ Lloyd-Max algorithm is an iterative process with the purpose of improving
2785
+ the dispersion of samples. For given sample: (i) compute a Voronoi
2786
+ Tessellation; (ii) find the centroid of each Voronoi cell; (iii) move the
2787
+ samples toward the centroid of their respective cell. See [1]_, [2]_.
2788
+
2789
+ A relaxation factor is used to control how fast samples can move at each
2790
+ iteration. This factor is starting at 2 and ending at 1 after `maxiter`
2791
+ following an exponential decay.
2792
+
2793
+ The process converges to equally spaced samples. It implies that measures
2794
+ like the discrepancy could suffer from too many iterations. On the other
2795
+ hand, L1 and L2 distances should improve. This is especially true with
2796
+ QMC methods which tend to favor the discrepancy over other criteria.
2797
+
2798
+ .. note::
2799
+
2800
+ The current implementation does not intersect the Voronoi Tessellation
2801
+ with the boundaries. This implies that for a low number of samples,
2802
+ empirically below 20, no Voronoi cell is touching the boundaries.
2803
+ Hence, samples cannot be moved close to the boundaries.
2804
+
2805
+ Further improvements could consider the samples at infinity so that
2806
+ all boundaries are segments of some Voronoi cells. This would fix
2807
+ the computation of the centroid position.
2808
+
2809
+ .. warning::
2810
+
2811
+ The Voronoi Tessellation step is expensive and quickly becomes
2812
+ intractable with dimensions as low as 10 even for a sample
2813
+ of size as low as 1000.
2814
+
2815
+ .. versionadded:: 1.9.0
2816
+
2817
+ References
2818
+ ----------
2819
+ .. [1] Lloyd. "Least Squares Quantization in PCM".
2820
+ IEEE Transactions on Information Theory, 1982.
2821
+ .. [2] Max J. "Quantizing for minimum distortion".
2822
+ IEEE Transactions on Information Theory, 1960.
2823
+
2824
+ Examples
2825
+ --------
2826
+ >>> import numpy as np
2827
+ >>> from scipy.spatial import distance
2828
+ >>> from scipy.stats._qmc import _lloyd_centroidal_voronoi_tessellation
2829
+ >>> rng = np.random.default_rng()
2830
+ >>> sample = rng.random((128, 2))
2831
+
2832
+ .. note::
2833
+
2834
+ The samples need to be in :math:`[0, 1]^d`. `scipy.stats.qmc.scale`
2835
+ can be used to scale the samples from their
2836
+ original bounds to :math:`[0, 1]^d`. And back to their original bounds.
2837
+
2838
+ Compute the quality of the sample using the L1 criterion.
2839
+
2840
+ >>> def l1_norm(sample):
2841
+ ... return distance.pdist(sample, 'cityblock').min()
2842
+
2843
+ >>> l1_norm(sample)
2844
+ 0.00161... # random
2845
+
2846
+ Now process the sample using Lloyd's algorithm and check the improvement
2847
+ on the L1. The value should increase.
2848
+
2849
+ >>> sample = _lloyd_centroidal_voronoi_tessellation(sample)
2850
+ >>> l1_norm(sample)
2851
+ 0.0278... # random
2852
+
2853
+ """
2854
+ del kwargs # only use keywords which are defined, needed by factory
2855
+
2856
+ sample = np.asarray(sample).copy()
2857
+
2858
+ if not sample.ndim == 2:
2859
+ raise ValueError('`sample` is not a 2D array')
2860
+
2861
+ if not sample.shape[1] >= 2:
2862
+ raise ValueError('`sample` dimension is not >= 2')
2863
+
2864
+ # Checking that sample is within the hypercube
2865
+ if (sample.max() > 1.) or (sample.min() < 0.):
2866
+ raise ValueError('`sample` is not in unit hypercube')
2867
+
2868
+ if qhull_options is None:
2869
+ qhull_options = 'Qbb Qc Qz QJ'
2870
+
2871
+ if sample.shape[1] >= 5:
2872
+ qhull_options += ' Qx'
2873
+
2874
+ # Fit an exponential to be 2 at 0 and 1 at `maxiter`.
2875
+ # The decay is used for relaxation.
2876
+ # analytical solution for y=exp(-maxiter/x) - 0.1
2877
+ root = -maxiter / np.log(0.1)
2878
+ decay = [np.exp(-x / root)+0.9 for x in range(maxiter)]
2879
+
2880
+ l1_old = _l1_norm(sample=sample)
2881
+ for i in range(maxiter):
2882
+ sample = _lloyd_iteration(
2883
+ sample=sample, decay=decay[i],
2884
+ qhull_options=qhull_options,
2885
+ )
2886
+
2887
+ l1_new = _l1_norm(sample=sample)
2888
+
2889
+ if abs(l1_new - l1_old) < tol:
2890
+ break
2891
+ else:
2892
+ l1_old = l1_new
2893
+
2894
+ return sample
2895
+
2896
+
2897
+ def _validate_workers(workers: IntNumber = 1) -> IntNumber:
2898
+ """Validate `workers` based on platform and value.
2899
+
2900
+ Parameters
2901
+ ----------
2902
+ workers : int, optional
2903
+ Number of workers to use for parallel processing. If -1 is
2904
+ given all CPU threads are used. Default is 1.
2905
+
2906
+ Returns
2907
+ -------
2908
+ Workers : int
2909
+ Number of CPU used by the algorithm
2910
+
2911
+ """
2912
+ workers = int(workers)
2913
+ if workers == -1:
2914
+ workers = os.cpu_count() # type: ignore[assignment]
2915
+ if workers is None:
2916
+ raise NotImplementedError(
2917
+ "Cannot determine the number of cpus using os.cpu_count(), "
2918
+ "cannot use -1 for the number of workers"
2919
+ )
2920
+ elif workers <= 0:
2921
+ raise ValueError(f"Invalid number of workers: {workers}, must be -1 "
2922
+ "or > 0")
2923
+
2924
+ return workers
2925
+
2926
+
2927
+ def _validate_bounds(
2928
+ l_bounds: "npt.ArrayLike", u_bounds: "npt.ArrayLike", d: int
2929
+ ) -> "tuple[npt.NDArray[np.generic], npt.NDArray[np.generic]]":
2930
+ """Bounds input validation.
2931
+
2932
+ Parameters
2933
+ ----------
2934
+ l_bounds, u_bounds : array_like (d,)
2935
+ Lower and upper bounds.
2936
+ d : int
2937
+ Dimension to use for broadcasting.
2938
+
2939
+ Returns
2940
+ -------
2941
+ l_bounds, u_bounds : array_like (d,)
2942
+ Lower and upper bounds.
2943
+
2944
+ """
2945
+ try:
2946
+ lower = np.broadcast_to(l_bounds, d)
2947
+ upper = np.broadcast_to(u_bounds, d)
2948
+ except ValueError as exc:
2949
+ msg = ("'l_bounds' and 'u_bounds' must be broadcastable and respect"
2950
+ " the sample dimension")
2951
+ raise ValueError(msg) from exc
2952
+
2953
+ if not np.all(lower < upper):
2954
+ raise ValueError("Bounds are not consistent 'l_bounds' < 'u_bounds'")
2955
+
2956
+ return lower, upper